List of usage examples for java.util.concurrent BlockingQueue poll
E poll();
From source file:ubic.gemma.core.loader.genome.gene.ncbi.NcbiGeneConverter.java
public void convert(final BlockingQueue<NcbiGeneData> geneInfoQueue, final BlockingQueue<Gene> geneQueue) { // start up thread to convert a member of geneInfoQueue to a gene/geneproduct/databaseentry // then push the gene onto the geneQueue for loading Thread convertThread = new Thread(new Runnable() { @Override/*from w w w . ja v a 2 s .c o m*/ @SuppressWarnings("synthetic-access") public void run() { while (!(sourceDone.get() && geneInfoQueue.isEmpty())) { try { NcbiGeneData data = geneInfoQueue.poll(); if (data == null) { continue; } Gene converted = NcbiGeneConverter.this.convert(data); if (converted.getProducts().isEmpty()) { log.info("Gene with no products skipped: " + converted); continue; } geneQueue.put(converted); } catch (InterruptedException e) { NcbiGeneConverter.log.warn("Interrupted"); break; } catch (Exception e) { NcbiGeneConverter.log.error(e, e); break; } } producerDone.set(true); } }, "Converter"); convertThread.start(); }
From source file:ubic.gemma.loader.genome.gene.ncbi.NcbiGeneConverter.java
public void convert(final BlockingQueue<NcbiGeneData> geneInfoQueue, final BlockingQueue<Gene> geneQueue) { // start up thread to convert a member of geneInfoQueue to a gene/geneproduct/databaseentry // then push the gene onto the geneQueue for loading if (!retainProteinInformation) { log.info("Note that protein information will be ignored; set " + RETAIN_PROTEIN_INFO_PARAM + " to true to change"); }// www . java 2s .c o m Thread convertThread = new Thread(new Runnable() { @Override @SuppressWarnings("synthetic-access") public void run() { while (!(sourceDone.get() && geneInfoQueue.isEmpty())) { try { NcbiGeneData data = geneInfoQueue.poll(); if (data == null) { continue; } Gene converted = convert(data); geneQueue.put(converted); } catch (InterruptedException e) { log.warn("Interrupted"); break; } catch (Exception e) { log.error(e, e); break; } } producerDone.set(true); } }, "Converter"); convertThread.start(); }
From source file:gobblin.couchbase.writer.CouchbaseWriterTest.java
private void drainQueue(BlockingQueue<Pair<AbstractDocument, Future>> queue, int threshold, long sleepTime, TimeUnit sleepUnit, List<Pair<AbstractDocument, Future>> failedFutures) { while (queue.remainingCapacity() < threshold) { if (sleepTime > 0) { Pair<AbstractDocument, Future> topElement = queue.peek(); if (topElement != null) { try { topElement.getSecond().get(sleepTime, sleepUnit); } catch (Exception te) { failedFutures.add(topElement); }/*from w w w. java 2s .c o m*/ queue.poll(); } } } }
From source file:org.apache.nifi.processors.standard.FetchFileTransfer.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get();// w w w. ja va 2 s. c o m if (flowFile == null) { return; } final StopWatch stopWatch = new StopWatch(true); final String host = context.getProperty(HOSTNAME).evaluateAttributeExpressions(flowFile).getValue(); final int port = context.getProperty(UNDEFAULTED_PORT).evaluateAttributeExpressions(flowFile).asInteger(); final String filename = context.getProperty(REMOTE_FILENAME).evaluateAttributeExpressions(flowFile) .getValue(); // Try to get a FileTransfer object from our cache. BlockingQueue<FileTransferIdleWrapper> transferQueue; synchronized (fileTransferMap) { final Tuple<String, Integer> tuple = new Tuple<>(host, port); transferQueue = fileTransferMap.get(tuple); if (transferQueue == null) { transferQueue = new LinkedBlockingQueue<>(); fileTransferMap.put(tuple, transferQueue); } // periodically close idle connections if (System.currentTimeMillis() - lastClearTime > IDLE_CONNECTION_MILLIS) { closeConnections(false); lastClearTime = System.currentTimeMillis(); } } // we have a queue of FileTransfer Objects. Get one from the queue or create a new one. FileTransfer transfer; FileTransferIdleWrapper transferWrapper = transferQueue.poll(); if (transferWrapper == null) { transfer = createFileTransfer(context); } else { transfer = transferWrapper.getFileTransfer(); } // Pull data from remote system. final InputStream in; try { in = transfer.getInputStream(filename, flowFile); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { StreamUtils.copy(in, out); transfer.flush(); } }); transferQueue.offer(new FileTransferIdleWrapper(transfer, System.nanoTime())); } catch (final FileNotFoundException e) { getLogger().error( "Failed to fetch content for {} from filename {} on remote host {} because the file could not be found on the remote system; routing to {}", new Object[] { flowFile, filename, host, REL_NOT_FOUND.getName() }); session.transfer(session.penalize(flowFile), REL_NOT_FOUND); session.getProvenanceReporter().route(flowFile, REL_NOT_FOUND); return; } catch (final PermissionDeniedException e) { getLogger().error( "Failed to fetch content for {} from filename {} on remote host {} due to insufficient permissions; routing to {}", new Object[] { flowFile, filename, host, REL_PERMISSION_DENIED.getName() }); session.transfer(session.penalize(flowFile), REL_PERMISSION_DENIED); session.getProvenanceReporter().route(flowFile, REL_PERMISSION_DENIED); return; } catch (final ProcessException | IOException e) { try { transfer.close(); } catch (final IOException e1) { getLogger().warn("Failed to close connection to {}:{} due to {}", new Object[] { host, port, e.toString() }, e); } getLogger().error( "Failed to fetch content for {} from filename {} on remote host {}:{} due to {}; routing to comms.failure", new Object[] { flowFile, filename, host, port, e.toString() }, e); session.transfer(session.penalize(flowFile), REL_COMMS_FAILURE); return; } // Add FlowFile attributes final String protocolName = transfer.getProtocolName(); final Map<String, String> attributes = new HashMap<>(); attributes.put(protocolName + ".remote.host", host); attributes.put(protocolName + ".remote.port", String.valueOf(port)); attributes.put(protocolName + ".remote.filename", filename); if (filename.contains("/")) { final String path = StringUtils.substringBeforeLast(filename, "/"); final String filenameOnly = StringUtils.substringAfterLast(filename, "/"); attributes.put(CoreAttributes.PATH.key(), path); attributes.put(CoreAttributes.FILENAME.key(), filenameOnly); } else { attributes.put(CoreAttributes.FILENAME.key(), filename); } flowFile = session.putAllAttributes(flowFile, attributes); // emit provenance event and transfer FlowFile session.getProvenanceReporter().fetch(flowFile, protocolName + "://" + host + ":" + port + "/" + filename, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS); // it is critical that we commit the session before moving/deleting the remote file. Otherwise, we could have a situation where // we ingest the data, delete/move the remote file, and then NiFi dies/is shut down before the session is committed. This would // result in data loss! If we commit the session first, we are safe. session.commit(); final String completionStrategy = context.getProperty(COMPLETION_STRATEGY).getValue(); if (COMPLETION_DELETE.getValue().equalsIgnoreCase(completionStrategy)) { try { transfer.deleteFile(null, filename); } catch (final FileNotFoundException e) { // file doesn't exist -- effectively the same as removing it. Move on. } catch (final IOException ioe) { getLogger().warn( "Successfully fetched the content for {} from {}:{}{} but failed to remove the remote file due to {}", new Object[] { flowFile, host, port, filename, ioe }, ioe); } } else if (COMPLETION_MOVE.getValue().equalsIgnoreCase(completionStrategy)) { String targetDir = context.getProperty(MOVE_DESTINATION_DIR).evaluateAttributeExpressions(flowFile) .getValue(); if (!targetDir.endsWith("/")) { targetDir = targetDir + "/"; } final String simpleFilename = StringUtils.substringAfterLast(filename, "/"); final String target = targetDir + simpleFilename; try { transfer.rename(filename, target); } catch (final IOException ioe) { getLogger().warn( "Successfully fetched the content for {} from {}:{}{} but failed to rename the remote file due to {}", new Object[] { flowFile, host, port, filename, ioe }, ioe); } } }
From source file:com.clickha.nifi.processors.FetchFileTransferV2.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get();/*from w w w .ja v a2 s . co m*/ if (flowFile == null) { return; } final StopWatch stopWatch = new StopWatch(true); final String host = context.getProperty(HOSTNAME).evaluateAttributeExpressions(flowFile).getValue(); final int port = context.getProperty(UNDEFAULTED_PORT).evaluateAttributeExpressions(flowFile).asInteger(); final String filename = context.getProperty(REMOTE_FILENAME).evaluateAttributeExpressions(flowFile) .getValue(); // Try to get a FileTransfer object from our cache. BlockingQueue<FileTransferIdleWrapper> transferQueue; synchronized (fileTransferMap) { final Tuple<String, Integer> tuple = new Tuple<>(host, port); transferQueue = fileTransferMap.get(tuple); if (transferQueue == null) { transferQueue = new LinkedBlockingQueue<>(); fileTransferMap.put(tuple, transferQueue); } // periodically close idle connections if (System.currentTimeMillis() - lastClearTime > IDLE_CONNECTION_MILLIS) { closeConnections(false); lastClearTime = System.currentTimeMillis(); } } // we have a queue of FileTransfer Objects. Get one from the queue or create a new one. FileTransferV2 transfer; FileTransferIdleWrapper transferWrapper = transferQueue.poll(); if (transferWrapper == null) { transfer = createFileTransfer(context); } else { transfer = transferWrapper.getFileTransfer(); } // Pull data from remote system. final InputStream in; try { in = transfer.getInputStream(filename, flowFile); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { StreamUtils.copy(in, out); transfer.flush(); } }); transferQueue.offer(new FileTransferIdleWrapper(transfer, System.nanoTime())); } catch (final FileNotFoundException e) { getLogger().error( "Failed to fetch content for {} from filename {} on remote host {} because the file could not be found on the remote system; routing to {}", new Object[] { flowFile, filename, host, REL_NOT_FOUND.getName() }); session.transfer(session.penalize(flowFile), REL_NOT_FOUND); session.getProvenanceReporter().route(flowFile, REL_NOT_FOUND); return; } catch (final PermissionDeniedException e) { getLogger().error( "Failed to fetch content for {} from filename {} on remote host {} due to insufficient permissions; routing to {}", new Object[] { flowFile, filename, host, REL_PERMISSION_DENIED.getName() }); session.transfer(session.penalize(flowFile), REL_PERMISSION_DENIED); session.getProvenanceReporter().route(flowFile, REL_PERMISSION_DENIED); return; } catch (final ProcessException | IOException e) { try { transfer.close(); } catch (final IOException e1) { getLogger().warn("Failed to close connection to {}:{} due to {}", new Object[] { host, port, e.toString() }, e); } getLogger().error( "Failed to fetch content for {} from filename {} on remote host {}:{} due to {}; routing to comms.failure", new Object[] { flowFile, filename, host, port, e.toString() }, e); session.transfer(session.penalize(flowFile), REL_COMMS_FAILURE); return; } // Add FlowFile attributes final String protocolName = transfer.getProtocolName(); final Map<String, String> attributes = new HashMap<>(); attributes.put(protocolName + ".remote.host", host); attributes.put(protocolName + ".remote.port", String.valueOf(port)); attributes.put(protocolName + ".remote.filename", filename); if (filename.contains("/")) { final String path = StringUtils.substringBeforeLast(filename, "/"); final String filenameOnly = StringUtils.substringAfterLast(filename, "/"); attributes.put(CoreAttributes.PATH.key(), path); attributes.put(CoreAttributes.FILENAME.key(), filenameOnly); } else { attributes.put(CoreAttributes.FILENAME.key(), filename); } flowFile = session.putAllAttributes(flowFile, attributes); // emit provenance event and transfer FlowFile session.getProvenanceReporter().fetch(flowFile, protocolName + "://" + host + ":" + port + "/" + filename, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS); // it is critical that we commit the session before moving/deleting the remote file. Otherwise, we could have a situation where // we ingest the data, delete/move the remote file, and then NiFi dies/is shut down before the session is committed. This would // result in data loss! If we commit the session first, we are safe. session.commit(); final String completionStrategy = context.getProperty(COMPLETION_STRATEGY).getValue(); if (COMPLETION_DELETE.getValue().equalsIgnoreCase(completionStrategy)) { try { transfer.deleteFile(null, filename); } catch (final FileNotFoundException e) { // file doesn't exist -- effectively the same as removing it. Move on. } catch (final IOException ioe) { getLogger().warn( "Successfully fetched the content for {} from {}:{}{} but failed to remove the remote file due to {}", new Object[] { flowFile, host, port, filename, ioe }, ioe); } } else if (COMPLETION_MOVE.getValue().equalsIgnoreCase(completionStrategy)) { String targetDir = context.getProperty(MOVE_DESTINATION_DIR).evaluateAttributeExpressions(flowFile) .getValue(); if (!targetDir.endsWith("/")) { targetDir = targetDir + "/"; } final String simpleFilename = StringUtils.substringAfterLast(filename, "/"); final String target = targetDir + simpleFilename; try { transfer.rename(filename, target); } catch (final IOException ioe) { getLogger().warn( "Successfully fetched the content for {} from {}:{}{} but failed to rename the remote file due to {}", new Object[] { flowFile, host, port, filename, ioe }, ioe); } } }
From source file:com.fluidops.iwb.api.CommunicationServiceImpl.java
protected static void handlePendingRequestsInternal() { BlockingQueue<UpdateRequest> requests = CommunicationServiceImpl.requests.instance(); Long start = System.currentTimeMillis(); handleRequestWarning(requests.size()); // NOTE: we implement a while on top of the boolean // variable (rather than the requests variable itself) // in order to be able to push the synchronized block inside; // this allows for interleaving of pushing and polling // and thus improved eCM core performance, as it imposes minimal // blocking time to the process filling the requests array boolean requestsEmpty = false; Set<URI> updatedURIs = new HashSet<URI>(); Set<URI> deletedURIs = new HashSet<URI>(); Set<URI> contextsWithRemoveOp = new HashSet<URI>(); Set<URI> contextsWithAddOp = new HashSet<URI>(); int ctr = 0;/*ww w . j a va 2 s . c o m*/ ReadWriteDataManager dm = null; try { while (!requestsEmpty) { UpdateRequest request = null; if (requests.isEmpty()) requestsEmpty = true; // abort else request = requests.poll(); // we process the request outside of the synchronized // block, in order to release the lock as early as possible if (request != null) { try { // we only open the data manager if required, and only once if (dm == null) dm = ReadWriteDataManagerImpl.openDataManager(Global.repository); request.handleRequest(dm, updatedURIs, deletedURIs, contextsWithRemoveOp, contextsWithAddOp); ctr++; if ((ctr % 1000) == 0) logger.info( "Synching requests into INT DB - count=" + ctr + " queue=" + requests.size()); } catch (Exception e) { logger.error(e.getMessage(), e); } } } if (ctr > 0) // something has been changed { String cleanupMsg = dm.cleanupMetaGarbage(contextsWithRemoveOp); KeywordIndexAPI.updateUrisInIndex(updatedURIs); KeywordIndexAPI.updateUrisInIndex(deletedURIs); logger.debug("Synchronized " + ctr + " objects to INT database in " + (System.currentTimeMillis() - start) + "ms (" + cleanupMsg + ")"); } // otherwise: no action has been performed, nothing to do } finally { ReadWriteDataManagerImpl.closeQuietly(dm); } }
From source file:com.splout.db.qnode.QNodeHandlerContext.java
/** * This method can be called to initialize a pool of connections to a dnode. This method may be called from multiple * threads so it should be safe to call it concurrently. *///from w w w . j a v a 2s . c o m public void initializeThriftClientCacheFor(String dnode) throws TTransportException, InterruptedException { // this lock is on the whole cache but we would actually be interested in a per-DNode lock... // there's only one lock for simplicity. thriftClientCacheLock.lock(); try { // initialize queue for this DNode BlockingQueue<DNodeService.Client> dnodeQueue = thriftClientCache.get(dnode); if (dnodeQueue == null) { // this assures that the per-DNode queue is only created once and then reused. dnodeQueue = new LinkedBlockingDeque<DNodeService.Client>(thriftClientPoolSize); } if (dnodeQueue.isEmpty()) { try { for (int i = dnodeQueue.size(); i < thriftClientPoolSize; i++) { dnodeQueue.put(DNodeClient.get(dnode)); } // we only put the queue if all connections have been populated thriftClientCache.put(dnode, dnodeQueue); } catch (TTransportException e) { log.error("Error while trying to populate queue for " + dnode + ", will discard created connections.", e); while (!dnodeQueue.isEmpty()) { dnodeQueue.poll().getOutputProtocol().getTransport().close(); } throw e; } } else { // it should be safe to call this method from different places concurrently // so we contemplate the case where another Thread already populated the queue // and only populate it if it's really empty. log.warn(Thread.currentThread().getName() + " : queue for [" + dnode + "] is not empty - it was populated before."); } } finally { thriftClientCacheLock.unlock(); } }
From source file:annis.gui.resultview.ResultViewPanel.java
/** * Set a new querys in result panel./*w ww.ja v a 2s. c om*/ * * @param queue holds the salt graph * @param q holds the ordinary query * @param numberOfResults the figure of all matches. */ public void setQueryResultQueue(BlockingQueue<SaltProject> queue, PagedResultQuery q, int numberOfResults) { this.projectQueue = queue; this.currentQuery = q; this.numberOfResults = numberOfResults; paging.setPageSize(q.getLimit(), false); paging.setInfo(q.getQuery()); resultLayout.removeAllComponents(); resultPanelList.clear(); Set<String> corpora = q.getCorpora(); if (corpora.size() == 1) { // fetched corpus config CorpusConfig corpusConfig = Helper.getCorpusConfig(corpora.iterator().next()); if (corpusConfig != null && corpusConfig.getConfig() != null && corpusConfig.getConfig().containsKey(KEY_DEFAULT_BASE_TEXT_SEGMENTATION)) { if (selectedSegmentationLayer == null) { selectedSegmentationLayer = corpusConfig.getConfig(KEY_DEFAULT_BASE_TEXT_SEGMENTATION); } } } // get the first query result SaltProject first = queue.poll(); Preconditions.checkState(first != null, "There must be already an element in the queue"); addQueryResult(q, Arrays.asList(first)); }
From source file:com.datatorrent.stram.engine.StreamingContainer.java
public void heartbeatLoop() throws Exception { umbilical.log(containerId, "[" + containerId + "] Entering heartbeat loop.."); logger.debug("Entering heartbeat loop (interval is {} ms)", this.heartbeatIntervalMillis); final YarnConfiguration conf = new YarnConfiguration(); long tokenLifeTime = (long) (containerContext.getValue(LogicalPlan.TOKEN_REFRESH_ANTICIPATORY_FACTOR) * containerContext.getValue(LogicalPlan.HDFS_TOKEN_LIFE_TIME)); long expiryTime = System.currentTimeMillis(); final Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); Iterator<Token<?>> iter = credentials.getAllTokens().iterator(); while (iter.hasNext()) { Token<?> token = iter.next(); logger.debug("token: {}", token); }/*from ww w .ja v a 2 s.c om*/ String hdfsKeyTabFile = containerContext.getValue(LogicalPlan.KEY_TAB_FILE); while (!exitHeartbeatLoop) { if (UserGroupInformation.isSecurityEnabled() && System.currentTimeMillis() >= expiryTime && hdfsKeyTabFile != null) { expiryTime = StramUserLogin.refreshTokens(tokenLifeTime, FileUtils.getTempDirectoryPath(), containerId, conf, hdfsKeyTabFile, credentials, null, false); } synchronized (this.heartbeatTrigger) { try { this.heartbeatTrigger.wait(heartbeatIntervalMillis); } catch (InterruptedException e1) { logger.warn("Interrupted in heartbeat loop, exiting.."); break; } } long currentTime = System.currentTimeMillis(); ContainerHeartbeat msg = new ContainerHeartbeat(); msg.jvmName = jvmName; if (this.bufferServerAddress != null) { msg.bufferServerHost = this.bufferServerAddress.getHostName(); msg.bufferServerPort = this.bufferServerAddress.getPort(); if (bufferServer != null && !eventloop.isActive()) { logger.warn("Requesting restart due to terminated event loop"); msg.restartRequested = true; } } msg.memoryMBFree = ((int) (Runtime.getRuntime().freeMemory() / (1024 * 1024))); garbageCollectorMXBeans = ManagementFactory.getGarbageCollectorMXBeans(); for (GarbageCollectorMXBean bean : garbageCollectorMXBeans) { msg.gcCollectionTime += bean.getCollectionTime(); msg.gcCollectionCount += bean.getCollectionCount(); } ContainerHeartbeatResponse rsp; do { ContainerStats stats = new ContainerStats(containerId); // gather heartbeat info for all operators for (Map.Entry<Integer, Node<?>> e : nodes.entrySet()) { OperatorHeartbeat hb = new OperatorHeartbeat(); hb.setNodeId(e.getKey()); hb.setGeneratedTms(currentTime); hb.setIntervalMs(heartbeatIntervalMillis); if (e.getValue().commandResponse.size() > 0) { BlockingQueue<StatsListener.OperatorResponse> commandResponse = e .getValue().commandResponse; ArrayList<StatsListener.OperatorResponse> response = new ArrayList<StatsListener.OperatorResponse>(); for (int i = 0; i < commandResponse.size(); i++) { response.add(commandResponse.poll()); } hb.requestResponse = response; } OperatorContext context = e.getValue().context; context.drainStats(hb.getOperatorStatsContainer()); if (context.getThread() == null || context.getThread().getState() != Thread.State.TERMINATED) { hb.setState(DeployState.ACTIVE); } else if (failedNodes.contains(hb.nodeId)) { hb.setState(DeployState.FAILED); } else { logger.debug("Reporting SHUTDOWN state because thread is {} and failedNodes is {}", context.getThread(), failedNodes); hb.setState(DeployState.SHUTDOWN); } stats.addNodeStats(hb); } /** * Container stats published for whoever is interested in listening. * Currently interested candidates are TupleRecorderCollection and BufferServerStatsSubscriber */ eventBus.publish(new ContainerStatsEvent(stats)); msg.setContainerStats(stats); // heartbeat call and follow-up processing //logger.debug("Sending heartbeat for {} operators.", msg.getContainerStats().size()); msg.sentTms = System.currentTimeMillis(); rsp = umbilical.processHeartbeat(msg); processHeartbeatResponse(rsp); if (rsp.hasPendingRequests) { logger.info("Waiting for pending request."); synchronized (this.heartbeatTrigger) { try { this.heartbeatTrigger.wait(500); } catch (InterruptedException ie) { logger.warn("Interrupted in heartbeat loop", ie); break; } } } } while (rsp.hasPendingRequests); } logger.debug("Exiting hearbeat loop"); umbilical.log(containerId, "[" + containerId + "] Exiting heartbeat loop.."); }
From source file:com.xorcode.andtweet.AndTweetService.java
private int saveQueue(BlockingQueue<CommandData> q, String prefsFileName) { Context context = MyPreferences.getContext(); int count = 0; // Delete any existing saved queue SharedPreferencesUtil.delete(context, prefsFileName); if (q.size() > 0) { SharedPreferences sp = MyPreferences.getSharedPreferences(prefsFileName, MODE_PRIVATE); while (q.size() > 0) { CommandData cd = q.poll(); cd.save(sp, count);//from www.ja v a 2s . c o m MyLog.v(TAG, "Command saved: " + cd.toString()); count += 1; } MyLog.d(TAG, "Queue saved to " + prefsFileName + ", " + count + " msgs"); } return count; }