List of usage examples for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue
public LinkedBlockingQueue()
From source file:com.jkoolcloud.tnt4j.streams.inputs.TNTInputStream.java
/** * Creates default thread pool executor service for a given number of threads. Using this executor service tasks * queue size is unbound. Thus memory use may be high to store all producer thread created tasks. * * @param threadsQty/*from w w w . j a v a2 s . c o m*/ * the number of threads in the pool * * @return the newly created thread pool executor * * @see ThreadPoolExecutor#ThreadPoolExecutor(int, int, long, TimeUnit, BlockingQueue, ThreadFactory) */ private ExecutorService getDefaultExecutorService(int threadsQty) { StreamsThreadFactory stf = new StreamsThreadFactory("StreamDefaultExecutorThread-"); // NON-NLS stf.addThreadFactoryListener(new StreamsThreadFactoryListener()); ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadsQty, threadsQty, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), stf); return tpe; }
From source file:com.commontime.plugin.LocationManager.java
private void initEventQueue() { //queue is limited to one thread at a time queue = new LinkedBlockingQueue<Runnable>(); threadPoolExecutor = new PausableThreadPoolExecutor(queue); //Add a timeout check new Handler().postDelayed(new Runnable() { @Override//from w ww . j a va 2 s. c o m public void run() { checkIfDomSignaldDelegateReady(); } }, CDV_LOCATION_MANAGER_DOM_DELEGATE_TIMEOUT * 1000); }
From source file:edu.cornell.med.icb.goby.modes.SortMode.java
/** * Sort the alignment./*from ww w . jav a 2 s . c om*/ * * @throws java.io.IOException error reading / writing */ @Override public void execute() throws IOException { final String threadId = String.format("%02d", Thread.currentThread().getId()); if (splitSize <= 0) { final long allocatedHeapSize = Runtime.getRuntime().totalMemory(); final long freeInHeap = Runtime.getRuntime().freeMemory(); final long maxHeapSize = Runtime.getRuntime().maxMemory(); final long freeMemory = maxHeapSize - allocatedHeapSize + freeInHeap;//Util.availableMemory(); splitSize = (long) (freeMemory * memoryPercentageForWork) / (long) ((numThreads > 0 ? numThreads : 1) * splitSizeScalingFactor); LOG.info(String.format("Maximum memory is %s. Using a split-size of %s", ICBStringUtils.humanMemorySize(freeMemory), ICBStringUtils.humanMemorySize(splitSize))); } final File entriesFile = new File(basename + ".entries"); if (!entriesFile.exists()) { System.err.println("Could not locate alignment .entries file " + entriesFile.toString()); return; } final long fileSize = entriesFile.length(); // Reduce the number of processors by one, as one thread is used by this running program // and it will be utilized since we've chosen CallerRunsPolicy LOG.debug(String.format("sort-large will run with %d threads (0 == no thread pool)", numThreads)); final AlignmentReader reader = new AlignmentReaderImpl(basename); try { reader.readHeader(); if (reader.isSorted()) { LOG.warn("Warning: The input alignment is already sorted."); } } finally { reader.close(); } if (numThreads > 0) { executorService = new ThreadPoolExecutor(numThreads, // core thread pool size numThreads, // maximum thread pool size 10, // time to wait before resizing pool TimeUnit.MINUTES, new LinkedBlockingQueue<Runnable>()); //new ArrayBlockingQueue<Runnable>(additionalThreads, true)); //new ThreadPoolExecutor.CallerRunsPolicy()*/); } // Setup splits and start first pass sort LOG.debug("Splitting file and sorting all splits"); long numberOfSplits = 0; long splitStart = 0; boolean lastSplit = false; boolean firstSort = true; progressSplitSort = new ProgressLogger(LOG, "split-sorts"); progressSplitSort.displayFreeMemory = true; int count = 0; ObjectArrayList<Runnable> splits = new ObjectArrayList<Runnable>(); while (!lastSplit) { long splitEnd = splitStart + splitSize; if (splitEnd >= fileSize - 1) { splitEnd = fileSize - 1; lastSplit = true; } final SortMergeSplit split = new SortMergeSplit(splitStart, splitEnd); numberOfSplits++; splits.add(sortSplit(split, firstSort)); firstSort = false; splitStart = splitEnd; } LOG.info(String.format("[%s] Split file into %d pieces", threadId, numberOfSplits)); progressSplitSort.expectedUpdates = numberOfSplits; progressSplitSort.start(); for (Runnable toRun : splits) { if (executorService != null) { executorService.submit(toRun); } else { toRun.run(); } } while (numSplitsCompleted.get() != numberOfSplits) { // Wait a bit for tasks to finish before finding more to submit if (!exceptions.isEmpty()) { break; } try { Thread.sleep(1000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } progressSplitSort.done(); progressMergeSort = new ProgressLogger(LOG, "merges"); progressMergeSort.displayFreeMemory = true; progressMergeSort.expectedUpdates = numberOfSplits; progressMergeSort.start(); // Subsequent sorts boolean lastMerge = false; boolean done = false; while (!done) { if (!exceptions.isEmpty()) { break; } // Move any completed sorts back into the splitsToMerge queue while (true) { final SortMergeSplit sortedSplit = sortedSplits.poll(); if (sortedSplit != null) { splitsToMerge.add(sortedSplit); numSortMergesRunning.decrementAndGet(); splitsToMergeSize.incrementAndGet(); } else { break; } } int splitsToMergeSizeLocal = splitsToMergeSize.get(); if (lastMerge && splitsToMergeSizeLocal == 1) { // We're done break; } final int numSplitsForMerge; if (splitsToMergeSizeLocal == numberOfSplits && numberOfSplits <= filesPerMerge) { numSplitsForMerge = (int) numberOfSplits; lastMerge = true; } else if (splitsToMergeSizeLocal == 0) { // Nothing to sort this iteration numSplitsForMerge = 0; } else if (splitsToMergeSizeLocal == 1) { // Just one thing to sort, but it's not then complete merge yet. Wait. numSplitsForMerge = 0; } else if (splitsToMergeSizeLocal > filesPerMerge) { numSplitsForMerge = filesPerMerge; } else { // Equal to or less than filesPerMerge. Perhaps the last merge? final List<SortMergeSplitFileRange> ranges = mergeMultiSplitRangeLists(splitsToMerge); if (ranges.size() == 1 && ranges.get(0).isRange(0, fileSize - 1)) { // Last merge. lastMerge = true; numSplitsForMerge = splitsToMergeSizeLocal; } else if (splitsToMergeSizeLocal == filesPerMerge) { // We have enough to merge, but it's not the last merge numSplitsForMerge = splitsToMergeSizeLocal; } else { // We don't have enough to merge and it's not the last merge numSplitsForMerge = 0; } } if (numSplitsForMerge > 0) { final List<SortMergeSplit> toMerge = new ArrayList<SortMergeSplit>(numSplitsForMerge); for (int i = 0; i < numSplitsForMerge; i++) { splitsToMergeSizeLocal = splitsToMergeSize.decrementAndGet(); toMerge.add(splitsToMerge.poll()); } LOG.debug(String.format("[%s] %d items in queue to sort after removing %d for sorting", threadId, splitsToMergeSizeLocal, numSplitsForMerge)); mergeSplits(toMerge, lastMerge); } else { // Wait a bit for tasks to finish before finding more to submit try { Thread.sleep(1000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } } if (executorService != null) { LOG.debug(String.format("[%s] Waiting for threads to finish.", threadId)); // accept no new tasks, but wait for all of the executor threads to finish : executorService.shutdown(); try { while (!executorService.awaitTermination(60, TimeUnit.SECONDS)) { } } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } progressMergeSort.stop(); if (!filesToDelete.isEmpty()) { // These files weren't deleted after merge for some reason. We'll try again one more time. while (true) { final File cleanupFile = filesToDelete.poll(); if (cleanupFile == null) { break; } else { deleteFile(cleanupFile, false); } } } if (exceptions.isEmpty()) { System.err.println("Sort completed"); final SortMergeSplit fullFile = splitsToMerge.poll(); LOG.info(String.format("%s made up from %d splits", fullFile, fullFile.numFiles)); LOG.info(String.format("Took %d secondary sort/merges", numMergesExecuted.get())); } else { LOG.error("Potentially multiple exceptions follow"); for (final Throwable t : exceptions) { LOG.error(t); } } }
From source file:com.commontime.plugin.LocationManager.java
private void checkEventQueue() { if (threadPoolExecutor != null && queue != null) return;//from w ww . j a v a2 s. c o m debugWarn("WARNING event queue should not be null."); queue = new LinkedBlockingQueue<Runnable>(); threadPoolExecutor = new PausableThreadPoolExecutor(queue); }
From source file:com.facebook.infrastructure.service.StorageService.java
public StorageService() throws Throwable { init();/*from w ww . j av a2 s . c o m*/ uptime_ = System.currentTimeMillis(); storageLoadBalancer_ = new StorageLoadBalancer(this); endPointSnitch_ = new EndPointSnitch(); /* register the verb handlers */ MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.tokenVerbHandler_, new TokenUpdateVerbHandler()); MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.binaryVerbHandler_, new BinaryVerbHandler()); MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.loadVerbHandler_, new LoadVerbHandler()); MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.mutationVerbHandler_, new RowMutationVerbHandler()); MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.readRepairVerbHandler_, new ReadRepairVerbHandler()); MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.readVerbHandler_, new ReadVerbHandler()); MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.bootStrapInitiateVerbHandler_, new Table.BootStrapInitiateVerbHandler()); MessagingService.getMessagingInstance().registerVerbHandlers( StorageService.bootStrapInitiateDoneVerbHandler_, new StorageService.BootstrapInitiateDoneVerbHandler()); MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.bootStrapTerminateVerbHandler_, new StreamManager.BootstrapTerminateVerbHandler()); MessagingService.getMessagingInstance().registerVerbHandlers(HttpConnection.httpRequestVerbHandler_, new HttpRequestVerbHandler(this)); MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.tokenInfoVerbHandler_, new TokenInfoVerbHandler()); MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.mbrshipCleanerVerbHandler_, new MembershipCleanerVerbHandler()); MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.bsMetadataVerbHandler_, new BootstrapMetadataVerbHandler()); /* register the stage for the mutations */ int threadCount = DatabaseDescriptor.getThreadsPerPool(); consistencyManager_ = new DebuggableThreadPoolExecutor(threadCount, threadCount, Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactoryImpl("CONSISTENCY-MANAGER")); StageManager.registerStage(StorageService.mutationStage_, new MultiThreadedStage("ROW-MUTATION", threadCount)); StageManager.registerStage(StorageService.readStage_, new MultiThreadedStage("ROW-READ", threadCount)); /* Stage for handling the HTTP messages. */ StageManager.registerStage(HttpConnection.httpStage_, new SingleThreadedStage("HTTP-REQUEST")); if (DatabaseDescriptor.isRackAware()) nodePicker_ = new RackAwareStrategy(tokenMetadata_); else nodePicker_ = new RackUnawareStrategy(tokenMetadata_); }
From source file:com.wordnik.swaggersocket.server.SwaggerSocketProtocolInterceptor.java
private final void attachWriter(final AtmosphereResource r) { final AtmosphereRequest request = r.getRequest(); AtmosphereResponse res = r.getResponse(); AsyncIOWriter writer = res.getAsyncIOWriter(); BlockingQueue<AtmosphereResource> queue = (BlockingQueue<AtmosphereResource>) getContextValue(request, SUSPENDED_RESPONSE);/*from ww w . ja v a 2s . co m*/ if (queue == null) { queue = new LinkedBlockingQueue<AtmosphereResource>(); request.getSession().setAttribute(SUSPENDED_RESPONSE, queue); } if (AtmosphereInterceptorWriter.class.isAssignableFrom(writer.getClass())) { // WebSocket already had one. if (r.transport() != AtmosphereResource.TRANSPORT.WEBSOCKET) { writer = new AtmosphereInterceptorWriter() { @Override protected void writeReady(AtmosphereResponse response, byte[] data) throws IOException { // We are buffering response. if (data == null) return; BlockingQueue<AtmosphereResource> queue = (BlockingQueue<AtmosphereResource>) getContextValue( request, SUSPENDED_RESPONSE); if (queue != null) { AtmosphereResource resource; try { // TODO: Should this be configurable // We stay suspended for 60 seconds resource = queue.poll(60, TimeUnit.SECONDS); } catch (InterruptedException e) { logger.trace("", e); return; } if (resource == null) { logger.debug("No resource was suspended, resuming the second connection."); } else { logger.trace("Resuming {}", resource.uuid()); try { OutputStream o = resource.getResponse().getResponse().getOutputStream(); o.write(data); o.flush(); resource.resume(); } catch (IOException ex) { logger.warn("", ex); } } } else { logger.error("Queue was null"); } } /** * Add an {@link AsyncIOInterceptor} that will be invoked in the order it was added. * * @param filter {@link AsyncIOInterceptor * @return this */ public AtmosphereInterceptorWriter interceptor(AsyncIOInterceptor filter) { if (!filters.contains(filter)) { filters.addLast(filter); } return this; } }; res.asyncIOWriter(writer); } //REVIST need a better way to add a custom filter at the first entry and not at the last as // e.g. interceptor(AsyncIOInterceptor interceptor, int position) LinkedList<AsyncIOInterceptor> filters = AtmosphereInterceptorWriter.class.cast(writer).filters(); if (!filters.contains(interceptor)) { filters.addFirst(interceptor); } } }
From source file:com.cyberway.issue.crawler.frontier.WorkQueueFrontier.java
/** * Set up the various queues-of-queues used by the frontier. Override * in implementing subclasses to reduce or eliminate risk of queues * growing without bound. //from w ww .j a v a2 s .c o m */ protected void initQueuesOfQueues() { // small risk of OutOfMemoryError: if 'hold-queues' is false, // readyClassQueues may grow in size without bound readyClassQueues = new LinkedBlockingQueue<String>(); // risk of OutOfMemoryError: in large crawls, // inactiveQueues may grow in size without bound inactiveQueues = new LinkedBlockingQueue<String>(); // risk of OutOfMemoryError: in large crawls with queue max-budgets, // inactiveQueues may grow in size without bound retiredQueues = new LinkedBlockingQueue<String>(); // small risk of OutOfMemoryError: in large crawls with many // unresponsive queues, an unbounded number of snoozed queues // may exist snoozedClassQueues = Collections.synchronizedSortedSet(new TreeSet<WorkQueue>()); }
From source file:com.twitter.distributedlog.auditor.DLAuditor.java
private void collectLedgersFromDL(final URI uri, final com.twitter.distributedlog.DistributedLogManagerFactory factory, final Set<Long> ledgers) throws IOException { logger.info("Enumerating {} to collect streams.", uri); Collection<String> streams = factory.enumerateAllLogsInNamespace(); final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>(); streamQueue.addAll(streams);/*from w w w.jav a 2 s.c o m*/ logger.info("Collected {} streams from uri {} : {}", new Object[] { streams.size(), uri, streams }); executeAction(streamQueue, 10, new Action<String>() { @Override public void execute(String stream) throws IOException { collectLedgersFromStream(factory, stream, ledgers); } }); }
From source file:org.apache.accumulo.core.clientImpl.ConditionalWriterImpl.java
@Override public Iterator<Result> write(Iterator<ConditionalMutation> mutations) { BlockingQueue<Result> resultQueue = new LinkedBlockingQueue<>(); List<QCMutation> mutationList = new ArrayList<>(); int count = 0; long entryTime = System.currentTimeMillis(); mloop: while (mutations.hasNext()) { ConditionalMutation mut = mutations.next(); count++;//w ww. j a v a 2 s .c om if (mut.getConditions().size() == 0) throw new IllegalArgumentException( "ConditionalMutation had no conditions " + new String(mut.getRow(), UTF_8)); for (Condition cond : mut.getConditions()) { if (!isVisible(cond.getVisibility())) { resultQueue.add(new Result(Status.INVISIBLE_VISIBILITY, mut, null)); continue mloop; } } // copy the mutations so that even if caller changes it, it will not matter mutationList.add(new QCMutation(mut, resultQueue, entryTime)); } queue(mutationList); return new RQIterator(resultQueue, count); }
From source file:org.opencron.server.service.ExecuteService.java
/** * //from w w w . ja v a2s .c om */ public boolean killJob(Record record) { final Queue<Record> recordQueue = new LinkedBlockingQueue<Record>(); //? if (JobType.SINGLETON.getCode().equals(record.getJobType())) { recordQueue.add(record); } else if (JobType.FLOW.getCode().equals(record.getJobType())) { //? recordQueue.addAll(recordService.getRunningFlowJob(record.getRecordId())); } final List<Boolean> result = new ArrayList<Boolean>(0); Thread jobThread = new Thread(new Runnable() { @Override public void run() { for (final Record cord : recordQueue) { //kill(?,?kill) Thread thread = new Thread(new Runnable() { public void run() { //??... cord.setStatus(RunStatus.STOPPING.getStatus());//? cord.setSuccess(ResultStatus.KILLED.getStatus());//?. JobVo job = null; try { recordService.save(cord); job = jobService.getJobVoById(cord.getJobId()); //???kill opencronCaller.call( Request.request(job.getIp(), job.getPort(), Action.KILL, job.getPassword()) .putParam("pid", cord.getPid()), job.getAgent()); cord.setStatus(RunStatus.STOPED.getStatus()); cord.setEndTime(new Date()); recordService.save(cord); loggerInfo("killed successful :jobName:{} at ip:{},port:{},pid:{}", job, cord.getPid()); } catch (Exception e) { if (e instanceof PacketTooBigException) { noticeService.notice(job, PACKETTOOBIG_ERROR); loggerError("killed error:jobName:%s at ip:%s,port:%d,pid:%s", job, cord.getPid() + " failed info: " + PACKETTOOBIG_ERROR, e); } noticeService.notice(job, null); loggerError("killed error:jobName:%s at ip:%s,port:%d,pid:%s", job, cord.getPid() + " failed info: " + e.getMessage(), e); result.add(false); } } }); thread.start(); } } }); jobThread.start(); //?kill,kill?... try { jobThread.join(); } catch (InterruptedException e) { logger.error("[opencron] kill job with error:{}", e.getMessage()); } return !result.contains(false); }