List of usage examples for java.util Queue isEmpty
boolean isEmpty();
From source file:com.connection.factory.SftpConnectionApacheLib.java
@Override public List<RemoteFileObject> readAllFilesWalkinPath(String remotePath) { List<RemoteFileObject> willReturnObject = new ArrayList<>(); Queue<RemoteFileObject> directorylist = new LinkedBlockingQueue<>(); RemoteFileObject object = null;// w ww . j ava 2 s. c o m object = new FtpApacheFileObject(FileInfoEnum.DIRECTORY); object.setDirectPath(remotePath); directorylist.add(object); try { while (!directorylist.isEmpty()) { object = directorylist.poll(); List<ChannelSftp.LsEntry> list = command.ls(object.getPath()); for (ChannelSftp.LsEntry each : list) { if (each.getFilename().equals(".") || each.getFilename().equals("..")) { continue; } RemoteFileObject objectTemp = null; SftpATTRS attributes = each.getAttrs(); if (attributes.isDir()) { objectTemp = new SftpApacheFileObject(FileInfoEnum.DIRECTORY); objectTemp.setFileName(each.getFilename()); objectTemp.setAbsolutePath(object.getPath()); directorylist.add(objectTemp); } else if (attributes.isReg()) { objectTemp = new SftpApacheFileObject(FileInfoEnum.FILE); objectTemp.setFileName(each.getFilename()); objectTemp.setAbsolutePath(object.getPath()); objectTemp.setFileSize(attributes.getSize()); objectTemp.setDate(attributes.getMtimeString()); objectTemp.setFileType(); willReturnObject.add(objectTemp); } } object = null; list = null; } } catch (ConnectionException | SftpException ex) { ex.printStackTrace(); } return willReturnObject; }
From source file:org.grouplens.grapht.graph.DAGNode.java
/** * Do a breadth-first search for an edge. * * @param pred The predicate for matching nodes. * @return The first node matching {@code pred} in a breadth-first search, or {@code null} if no * such node is found.//from w w w .j ava 2 s .c om */ public DAGEdge<V, E> findEdgeBFS(@Nonnull Predicate<? super DAGEdge<V, E>> pred) { Queue<DAGNode<V, E>> work = Lists.newLinkedList(); Set<DAGNode<V, E>> seen = Sets.newHashSet(); work.add(this); seen.add(this); while (!work.isEmpty()) { DAGNode<V, E> node = work.remove(); for (DAGEdge<V, E> e : node.getOutgoingEdges()) { // is this the edge we are looking for? if (pred.apply(e)) { return e; } else if (!seen.contains(e.getTail())) { seen.add(e.getTail()); work.add(e.getTail()); } } } // no node found return null; }
From source file:org.grouplens.grapht.graph.DAGNode.java
/** * Do a breadth-first search for a node. * * @param pred The predicate for matching nodes. * @return The first node matching {@code pred} in a breadth-first search, or {@code null} if no * such node is found./* w w w .j av a 2 s . c om*/ */ public DAGNode<V, E> findNodeBFS(@Nonnull Predicate<? super DAGNode<V, E>> pred) { if (pred.apply(this)) { return this; } Queue<DAGNode<V, E>> work = Lists.newLinkedList(); Set<DAGNode<V, E>> seen = Sets.newHashSet(); work.add(this); seen.add(this); while (!work.isEmpty()) { DAGNode<V, E> node = work.remove(); for (DAGEdge<V, E> e : node.getOutgoingEdges()) { // is this the node we are looking for? DAGNode<V, E> nbr = e.getTail(); if (!seen.contains(nbr)) { if (pred.apply(nbr)) { return nbr; } else { seen.add(nbr); work.add(nbr); } } } } // no node found return null; }
From source file:org.unitime.timetable.solver.curricula.CurriculaCourseDemands.java
protected void computeTargetShare(int nrStudents, Collection<CurriculumCourse> courses, CurriculumCourseGroupsProvider course2groups, CurModel model) { for (CurriculumCourse c1 : courses) { float x1 = c1.getPercShare() * nrStudents; Set<CurriculumCourse>[] group = new HashSet[] { new HashSet<CurriculumCourse>(), new HashSet<CurriculumCourse>() }; Queue<CurriculumCourse> queue = new LinkedList<CurriculumCourse>(); queue.add(c1);/*from www . j av a 2 s . c o m*/ Set<CurriculumCourseGroup> done = new HashSet<CurriculumCourseGroup>(); while (!queue.isEmpty()) { CurriculumCourse c = queue.poll(); for (CurriculumCourseGroup g : course2groups.getGroups(c)) if (done.add(g)) for (CurriculumCourse x : courses) if (!x.equals(c) && !x.equals(c1) && course2groups.getGroups(x).contains(g) && group[group[0].contains(c) ? 0 : g.getType()].add(x)) queue.add(x); } for (CurriculumCourse c2 : courses) { float x2 = c2.getPercShare() * nrStudents; if (c1.getUniqueId() >= c2.getUniqueId()) continue; float share = c1.getPercShare() * c2.getPercShare() * nrStudents; boolean opt = group[0].contains(c2); boolean req = !opt && group[1].contains(c2); model.setTargetShare(c1.getUniqueId(), c2.getUniqueId(), opt ? 0.0 : req ? Math.min(x1, x2) : share, true); } } }
From source file:net.dv8tion.jda.core.audio.AudioConnection.java
private synchronized void setupCombinedExecutor() { if (combinedAudioExecutor == null) { combinedAudioExecutor = Executors.newSingleThreadScheduledExecutor( r -> new Thread(AudioManagerImpl.AUDIO_THREADS, r, threadIdentifier + " Combined Thread")); combinedAudioExecutor.scheduleAtFixedRate(() -> { try { List<User> users = new LinkedList<>(); List<short[]> audioParts = new LinkedList<>(); if (receiveHandler != null && receiveHandler.canReceiveCombined()) { long currentTime = System.currentTimeMillis(); for (Map.Entry<User, Queue<Pair<Long, short[]>>> entry : combinedQueue.entrySet()) { User user = entry.getKey(); Queue<Pair<Long, short[]>> queue = entry.getValue(); if (queue.isEmpty()) continue; Pair<Long, short[]> audioData = queue.poll(); //Make sure the audio packet is younger than 100ms while (audioData != null && currentTime - audioData.getLeft() > queueTimeout) { audioData = queue.poll(); }/*w w w. j ava 2s . co m*/ //If none of the audio packets were younger than 100ms, then there is nothing to add. if (audioData == null) { continue; } users.add(user); audioParts.add(audioData.getRight()); } if (!audioParts.isEmpty()) { int audioLength = audioParts.get(0).length; short[] mix = new short[1920]; //960 PCM samples for each channel int sample; for (int i = 0; i < audioLength; i++) { sample = 0; for (short[] audio : audioParts) { sample += audio[i]; } if (sample > Short.MAX_VALUE) mix[i] = Short.MAX_VALUE; else if (sample < Short.MIN_VALUE) mix[i] = Short.MIN_VALUE; else mix[i] = (short) sample; } receiveHandler.handleCombinedAudio(new CombinedAudio(users, mix)); } else { //No audio to mix, provide 20 MS of silence. (960 PCM samples for each channel) receiveHandler.handleCombinedAudio( new CombinedAudio(Collections.emptyList(), new short[1920])); } } } catch (Exception e) { LOG.log(e); } }, 0, 20, TimeUnit.MILLISECONDS); } }
From source file:org.paxle.indexer.impl.IndexerWorker.java
@Override protected void execute(ICommand command) { final long start = System.currentTimeMillis(); IIndexerDocument indexerDoc = null;/*ww w .ja v a 2 s. c o m*/ ArrayList<IIndexerDocument> indexerSubDocs = null; try { /* ================================================================ * Input Parameter Check * ================================================================ */ String errorMsg = null; if (command.getResult() != ICommand.Result.Passed) { errorMsg = String.format("Won't index resource '%s'. Command status is: '%s' (%s)", command.getLocation(), command.getResult(), command.getResultText()); } else if (command.getCrawlerDocument() == null) { errorMsg = String.format("Won't index resource '%s'. Crawler-document is null", command.getLocation()); } else if (command.getCrawlerDocument().getStatus() != ICrawlerDocument.Status.OK) { errorMsg = String.format("Won't index resource '%s'. Crawler-document status is: '%s' (%s)", command.getLocation(), command.getCrawlerDocument().getStatus(), command.getCrawlerDocument().getStatusText()); } else if (command.getParserDocument() == null) { errorMsg = String.format("Won't index resource '%s'. Parser-document is null", command.getLocation()); } else if (command.getParserDocument().getStatus() != IParserDocument.Status.OK) { errorMsg = String.format("Won't index resource '%s'. Parser-document status is: '%s' (%s)", command.getLocation(), command.getCrawlerDocument().getStatus(), command.getCrawlerDocument().getStatusText()); } if (errorMsg != null) { this.logger.warn(errorMsg); return; } /* ================================================================ * Generate Indexer Document * ================================================================ */ // generate the "main" indexer document from the "main" parser document including the // data from the command object if ((command.getParserDocument().getFlags() & IParserDocument.FLAG_NOINDEX) == 0) { this.logger.debug(String.format("Indexing of URL '%s' (%s) ...", command.getLocation(), command.getCrawlerDocument().getMimeType())); indexerDoc = this.generateIIndexerDoc(command.getLocation(), command.getCrawlerDocument().getCrawlerDate(), null, command.getParserDocument()); } else { this.logger.info(String.format("Indexing of URL '%s' (%s) ommitted due to 'noindex'-flag", command.getLocation(), command.getCrawlerDocument().getMimeType())); // don't exit here already, we still have to process the sub-parser-docs } // generate indexer docs from all parser-sub-documents and add them to the command indexerSubDocs = new ArrayList<IIndexerDocument>(); final class Entry { public String key; public IParserDocument pdoc; public Entry(final String key, final IParserDocument pdoc) { this.key = key; this.pdoc = pdoc; } } // traverse the tree of sub-documents final Queue<Entry> queue = new LinkedList<Entry>(); for (Map.Entry<String, IParserDocument> pdoce : command.getParserDocument().getSubDocs().entrySet()) queue.add(new Entry(pdoce.getKey(), pdoce.getValue())); while (!queue.isEmpty()) { Entry e = queue.remove(); if ((e.pdoc.getFlags() & IParserDocument.FLAG_NOINDEX) == 0) { IIndexerDocument indexerSubDoc = this.generateIIndexerDoc(command.getLocation(), command.getCrawlerDocument().getCrawlerDate(), e.key, e.pdoc); indexerSubDocs.add(indexerSubDoc); } for (final Map.Entry<String, IParserDocument> pdoce : e.pdoc.getSubDocs().entrySet()) queue.add(new Entry(e.key + "/" + pdoce.getKey(), pdoce.getValue())); } /* ================================================================ * Process indexer response * ================================================================ */ /* There may be the case, that - i.e. by a document's and it's parser's restriction - the main * document, from which the sub-docs are retrieved, may not be indexed, but links, and therefore * sub-docs, may be followed. * In this case we simply omit the main document. If the document has no children, then this is the * only thing we need to check for correctness. */ if (indexerSubDocs.size() == 0) { if (indexerDoc == null) { command.setResult(ICommand.Result.Failure, String.format("Indexer returned no indexer-document.")); return; } else if (indexerDoc.getStatus() == null || indexerDoc.getStatus() != IIndexerDocument.Status.OK) { command.setResult(ICommand.Result.Failure, String.format("Indexer-document status is '%s'.", indexerDoc.getStatus())); return; } } // XXX: what to take if both (pdoc and cdoc) contain a different value for last mod? if (command.getCrawlerDocument().getLastModDate() != null) { indexerDoc.set(IIndexerDocument.LAST_MODIFIED, command.getCrawlerDocument().getLastModDate()); } indexerDoc.set(IIndexerDocument.SIZE, Long.valueOf(command.getCrawlerDocument().getSize())); // setting command status to passed command.setResult(ICommand.Result.Passed); } catch (Throwable e) { // setting command status command.setResult(ICommand.Result.Failure, String.format("Unexpected '%s' while indexing resource. %s", e.getClass().getName(), e.getMessage())); // log error this.logger.warn(String.format("Unexpected '%s' while indexing resource '%s'.", e.getClass().getName(), command.getLocation()), e); } finally { /* Add indexer-docs to command-object. * * This must be done even in error situations to * - allow filters to correct the error (if possible) * - to report the error back properly (e.g. to store it into db * or send it back to a remote peer). */ if (indexerDoc != null) { command.addIndexerDocument(indexerDoc); } if (indexerSubDocs != null) { // get all indexer-sub-docs and add them to the command for (IIndexerDocument indexerSubDoc : indexerSubDocs) { // XXX: do sub-docs need a size-field, too? command.addIndexerDocument(indexerSubDoc); } } ICrawlerDocument crawlerDoc = command.getCrawlerDocument(); IParserDocument parserDoc = command.getParserDocument(); if (logger.isDebugEnabled()) { this.logger.info(String.format( "Finished indexing of resource '%s' in %d ms.\r\n" + "\tCrawler-Status: '%s' %s\r\n" + "\tParser-Status: '%s' %s\r\n" + "\tIndexer-Status: '%s' %s", command.getLocation(), Long.valueOf(System.currentTimeMillis() - start), (crawlerDoc == null) ? "unknown" : crawlerDoc.getStatus().toString(), (crawlerDoc == null) ? "" : (crawlerDoc.getStatusText() == null) ? "" : crawlerDoc.getStatusText(), (parserDoc == null) ? "unknown" : parserDoc.getStatus().toString(), (parserDoc == null) ? "" : (parserDoc.getStatusText() == null) ? "" : parserDoc.getStatusText(), (indexerDoc == null) ? "unknown" : indexerDoc.getStatus().toString(), (indexerDoc == null) ? "" : (indexerDoc.getStatusText() == null) ? "" : indexerDoc.getStatusText())); } else if (logger.isInfoEnabled()) { this.logger.info(String.format( "Finished indexing of resource '%s' in %d ms.\r\n" + "\tIndexer-Status: '%s' %s", command.getLocation(), Long.valueOf(System.currentTimeMillis() - start), (indexerDoc == null) ? "unknown" : indexerDoc.getStatus().toString(), (indexerDoc == null) ? "" : (indexerDoc.getStatusText() == null) ? "" : indexerDoc.getStatusText())); } } }
From source file:it.scoppelletti.mobilepower.app.FragmentLayoutController.java
/** * Ricostruisce la successione dei frammenti nell’unico pannello. * //from w w w. j a va2 s . c o m * @param fragmentMgr Gestore dei frammenti. * @param fragmentQueue Frammenti. * @return Identificatore dell’ultimo elemento inserito * nel back stack. */ private int arrangePanel(FragmentManager fragmentMgr, Queue<FragmentLayoutController.FragmentEntry> fragmentQueue) { int tnId, lastTnId; FragmentLayoutController.FragmentEntry entry; FragmentTransaction fragmentTn = null; lastTnId = -1; while (!fragmentQueue.isEmpty()) { tnId = -1; entry = fragmentQueue.remove(); try { fragmentTn = fragmentMgr.beginTransaction(); fragmentTn.replace(myFrameIds[0], entry.getFragment().asFragment(), entry.getTag()); fragmentTn.addToBackStack(null); } finally { if (fragmentTn != null) { tnId = fragmentTn.commit(); fragmentTn = null; } } if (tnId >= 0) { lastTnId = tnId; } } return lastTnId; }
From source file:specminers.smartic.MergingBlock.java
public List<State<String>> getNodesByBreadthFirstSearch(Automaton<String> automaton) { LinkedList<State<String>> V = new LinkedList<>(); Queue<State<String>> Q = new LinkedBlockingDeque<>(); V.add(automaton.getInitialState());//from ww w .j av a 2s .c om Q.add(automaton.getInitialState()); while (!Q.isEmpty()) { State<String> t = Q.poll(); for (Step<String> delta : automaton.getDelta().get(t)) { State<String> u = delta.getDestination(); if (!V.contains(u)) { V.add(u); Q.add(u); } } } return V; }
From source file:it.geosolutions.geobatch.nrl.ndvi.NDVIIngestAction.java
/** * //from ww w .j ava2 s .c o m */ public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException { listenerForwarder.setTask("Check config"); listenerForwarder.started(); NDVIIngestConfiguration configuration = getConfiguration(); if (configuration == null) { throw new IllegalStateException("ActionConfig is null."); } // List<File> ndviFiles = new ArrayList<File>(); Map<File, Calendar[]> inputFiles = new TreeMap<File, Calendar[]>(); while (!events.isEmpty()) { EventObject event = events.poll(); if (event instanceof FileSystemEvent) { FileSystemEvent fse = (FileSystemEvent) event; File source = fse.getSource(); if (!source.exists()) { LOGGER.error("File does not exist: " + source); continue; } Calendar interval[]; try { interval = parseDekDate(source.getName()); } catch (ActionException e) { LOGGER.error("Error parsing source name: " + e.getMessage()); continue; } inputFiles.put(source, interval); } else { throw new ActionException(this, "EventObject not handled " + event); } } ImageMosaicCommand imc = processFiles(inputFiles); LinkedList<EventObject> ret = new LinkedList<EventObject>(); ret.add(new EventObject(imc)); return ret; }