List of usage examples for java.util.concurrent ThreadPoolExecutor execute
public void execute(Runnable command)
From source file:org.apache.cxf.systest.jaxrs.AbstractJAXRSContinuationsTest.java
protected void doTestContinuation(String pathSegment) throws Exception { final String port = getPort(); ThreadPoolExecutor executor = new ThreadPoolExecutor(5, 5, 0, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(10)); CountDownLatch startSignal = new CountDownLatch(1); CountDownLatch doneSignal = new CountDownLatch(1); List<BookWorker> workers = new ArrayList<>(5); for (int x = 1; x < 6; x++) { workers.add(new BookWorker("http://localhost:" + port + getBaseAddress() + pathSegment + "/" + x, Integer.toString(x), "CXF in Action" + x, startSignal, doneSignal)); }/*from w ww .ja v a 2 s. com*/ for (BookWorker w : workers) { executor.execute(w); } startSignal.countDown(); doneSignal.await(60, TimeUnit.SECONDS); executor.shutdownNow(); assertEquals("Not all invocations have completed", 0, doneSignal.getCount()); for (BookWorker w : workers) { w.checkError(); } }
From source file:com.xerox.amazonws.sdb.Domain.java
/** * Gets attributes of given items. This method threads off the get requests and * aggregates the responses.//from w ww .j a va 2s. com * * @param items the list of items to get attributes for * @param listener class that will be notified when items are ready * @throws SDBException wraps checked exceptions */ public void getItemsAttributes(List<String> items, ItemListener listener) throws SDBException { ThreadPoolExecutor pool = getThreadPoolExecutor(); pool.setRejectedExecutionHandler(new RejectionHandler()); Counter running = new Counter(0); for (String item : items) { while (pool.getActiveCount() == pool.getMaximumPoolSize()) { try { Thread.sleep(100); } catch (InterruptedException ex) { } } synchronized (running) { running.increment(); } pool.execute(new AttrWorker(getItem(item), running, null, listener)); Thread.yield(); } while (true) { if (running.getValue() == 0) { break; } try { Thread.sleep(500); } catch (InterruptedException ex) { } } if (this.executor == null) { pool.shutdown(); } }
From source file:com.xerox.amazonws.sdb.Domain.java
/** * Gets attributes of given items. This method threads off the get requests and * aggregates the responses.//from ww w. j av a2 s. co m * * @param items the list of items to get attributes for * @return the map of items with lists of attributes * @throws SDBException wraps checked exceptions */ public Map<String, List<ItemAttribute>> getItemsAttributes(List<String> items) throws SDBException { Map<String, List<ItemAttribute>> results = new Hashtable<String, List<ItemAttribute>>(); ThreadPoolExecutor pool = getThreadPoolExecutor(); pool.setRejectedExecutionHandler(new RejectionHandler()); Counter running = new Counter(0); for (String item : items) { while (pool.getActiveCount() == pool.getMaximumPoolSize()) { try { Thread.sleep(100); } catch (InterruptedException ex) { } } synchronized (running) { running.increment(); } pool.execute(new AttrWorker(getItem(item), running, results, null)); Thread.yield(); } while (true) { if (running.getValue() == 0) { break; } try { Thread.sleep(500); } catch (InterruptedException ex) { } } if (this.executor == null) { pool.shutdown(); } return results; }
From source file:eagle.jobrunning.crawler.RunningJobCrawlerImpl.java
private void startJobConfigProcessThread() { int configThreadCount = DEFAULT_CONFIG_THREAD_COUNT; LOG.info("Job Config crawler main thread started, pool size: " + DEFAULT_CONFIG_THREAD_COUNT); ThreadFactory factory = new ThreadFactory() { private final AtomicInteger count = new AtomicInteger(0); public Thread newThread(Runnable runnable) { count.incrementAndGet();//from ww w.j a v a 2s . com Thread thread = Executors.defaultThreadFactory().newThread(runnable); thread.setName("config-crawler-workthread-" + count.get()); return thread; } }; ThreadPoolExecutor pool = new ThreadPoolExecutor(configThreadCount, configThreadCount, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), factory); while (true) { JobContext context; try { context = queueOfConfig.take(); LOG.info("queueOfConfig size: " + queueOfConfig.size()); Runnable configCrawlerThread = new ConfigWorkTask(new JobContext(context), fetcher, callback, this); pool.execute(configCrawlerThread); } catch (InterruptedException e) { LOG.warn("Got an InterruptedException: " + e.getMessage()); } catch (RejectedExecutionException e2) { LOG.warn("Got RejectedExecutionException: " + e2.getMessage()); } catch (Throwable t) { LOG.warn("Got an throwable t, " + t.getMessage()); } } }
From source file:com.alibaba.napoli.gecko.service.impl.BaseRemotingController.java
public void sendToGroup(final String group, final RequestCommand request, final SingleRequestCallBackListener listener, final long time, final TimeUnit timeunut) throws NotifyRemotingException { if (group == null) { throw new NotifyRemotingException("Null group"); }/*from w ww . ja v a2 s . c o m*/ if (request == null) { throw new NotifyRemotingException("Null command"); } if (listener == null) { throw new NotifyRemotingException("Null listener"); } if (timeunut == null) { throw new NotifyRemotingException("Null TimeUnit"); } final Connection conn = this.selectConnectionForGroup(group, this.connectionSelector, request); if (conn != null) { conn.send(request, listener, time, timeunut); } else { if (listener != null) { final ThreadPoolExecutor executor = listener.getExecutor(); if (executor != null) { executor.execute(new Runnable() { public void run() { listener.onResponse(BaseRemotingController.this .createNoConnectionResponseCommand(request.getRequestHeader()), null); } }); } else { listener.onResponse(this.createNoConnectionResponseCommand(request.getRequestHeader()), null); } } } }
From source file:io.anserini.index.IndexWebCollection.java
public int indexWithThreads(int numThreads) throws IOException, InterruptedException { LOG.info("Indexing with " + numThreads + " threads to directory '" + indexPath.toAbsolutePath() + "'..."); final Directory dir = FSDirectory.open(indexPath); final IndexWriterConfig iwc = new IndexWriterConfig(new EnglishAnalyzer()); iwc.setSimilarity(new BM25Similarity()); iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE); iwc.setRAMBufferSizeMB(512);// w ww . j ava 2 s . com iwc.setUseCompoundFile(false); iwc.setMergeScheduler(new ConcurrentMergeScheduler()); final IndexWriter writer = new IndexWriter(dir, iwc); final ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(numThreads); final String suffix = Collection.GOV2.equals(collection) ? ".gz" : ".warc.gz"; final Deque<Path> warcFiles = discoverWarcFiles(docDir, suffix); if (doclimit > 0 && warcFiles.size() < doclimit) for (int i = doclimit; i < warcFiles.size(); i++) warcFiles.removeFirst(); long totalWarcFiles = warcFiles.size(); LOG.info(totalWarcFiles + " many " + suffix + " files found under the docs path : " + docDir.toString()); for (int i = 0; i < 2000; i++) { if (!warcFiles.isEmpty()) executor.execute(new IndexerThread(writer, warcFiles.removeFirst())); else { if (!executor.isShutdown()) { Thread.sleep(30000); executor.shutdown(); } break; } } long first = 0; //add some delay to let some threads spawn by scheduler Thread.sleep(30000); try { // Wait for existing tasks to terminate while (!executor.awaitTermination(1, TimeUnit.MINUTES)) { final long completedTaskCount = executor.getCompletedTaskCount(); LOG.info(String.format("%.2f percentage completed", (double) completedTaskCount / totalWarcFiles * 100.0d)); if (!warcFiles.isEmpty()) for (long i = first; i < completedTaskCount; i++) { if (!warcFiles.isEmpty()) executor.execute(new IndexerThread(writer, warcFiles.removeFirst())); else { if (!executor.isShutdown()) executor.shutdown(); } } first = completedTaskCount; Thread.sleep(1000); } } catch (InterruptedException ie) { // (Re-)Cancel if current thread also interrupted executor.shutdownNow(); // Preserve interrupt status Thread.currentThread().interrupt(); } if (totalWarcFiles != executor.getCompletedTaskCount()) throw new RuntimeException("totalWarcFiles = " + totalWarcFiles + " is not equal to completedTaskCount = " + executor.getCompletedTaskCount()); int numIndexed = writer.maxDoc(); try { writer.commit(); if (optimize) writer.forceMerge(1); } finally { writer.close(); } return numIndexed; }
From source file:com.xerox.amazonws.sdb.Domain.java
/** * Gets attributes of items specified in the query string. This method threads off the * get requests and aggregates the responses. * * @param queryString the filter statement * @param listener class that will be notified when items are ready * @throws SDBException wraps checked exceptions *//* w w w.j a va 2 s .com*/ public void listItemsAttributes(String queryString, ItemListener listener) throws SDBException { ThreadPoolExecutor pool = getThreadPoolExecutor(); pool.setRejectedExecutionHandler(new RejectionHandler()); String nextToken = ""; Counter running = new Counter(0); do { try { QueryResult result = listItems(queryString, nextToken, 250); List<Item> items = result.getItemList(); for (Item i : items) { while (pool.getActiveCount() == pool.getMaximumPoolSize()) { try { Thread.sleep(100); } catch (InterruptedException ex) { } } synchronized (running) { running.increment(); } pool.execute(new AttrWorker(i, running, null, listener)); Thread.yield(); } nextToken = result.getNextToken(); } catch (SDBException ex) { System.out.println("Query '" + queryString + "' Failure: "); ex.printStackTrace(); } } while (nextToken != null && nextToken.trim().length() > 0); while (true) { if (running.getValue() == 0) { break; } try { Thread.sleep(500); } catch (InterruptedException ex) { } } if (this.executor == null) { pool.shutdown(); } }
From source file:com.emc.ecs.smart.SmartUploader.java
/** * Performs a segmented upload to ECS using the SmartClient and the ECS byte range PUT extensions. The upload * URL will be parsed and the hostname will be enumerated in DNS to see if it contains multiple 'A' records. If * so, those will be used to populate the software load balancer. *///from w ww. jav a 2s.c o m private void doSegmentedUpload() { try { long start = System.currentTimeMillis(); fileSize = Files.size(fileToUpload); // Verify md5Save file path is legit. PrintWriter pw = null; try { if (saveMD5 != null) { pw = new PrintWriter(saveMD5); } } catch (IOException e) { System.err.println("Invalid path specified to save local file MD5: " + e.getMessage()); System.exit(3); } // Figure out which segment size to use. if (segmentSize == -1) { if (fileSize >= LARGE_SEGMENT) { segmentSize = LARGE_SEGMENT; } else { segmentSize = SMALL_SEGMENT; } } // Expand the host String host = uploadUrl.getHost(); InetAddress addr = InetAddress.getByName(host); List<String> ipAddresses = new ArrayList<>(); try { ipAddresses = getIPAddresses(host); } catch (NamingException e) { LogMF.warn(l4j, "Could not resolve hostname: {0}: {1}. Using as-is.", host, e); ipAddresses.add(host); } LogMF.info(l4j, "Host {0} resolves to {1}", host, ipAddresses); // Initialize the SmartClient SmartConfig smartConfig = new SmartConfig(ipAddresses.toArray(new String[ipAddresses.size()])); // We don't need to update the host list smartConfig.setHostUpdateEnabled(false); // Configure the load balancer Client pingClient = SmartClientFactory.createStandardClient(smartConfig, new URLConnectionClientHandler()); pingClient.addFilter(new HostnameVerifierFilter(uploadUrl.getHost())); LoadBalancer loadBalancer = smartConfig.getLoadBalancer(); EcsHostListProvider hostListProvider = new EcsHostListProvider(pingClient, loadBalancer, null, null); hostListProvider.setProtocol(uploadUrl.getProtocol()); if (uploadUrl.getPort() != -1) { hostListProvider.setPort(uploadUrl.getPort()); } smartConfig.setHostListProvider(hostListProvider); client = SmartClientFactory.createSmartClient(smartConfig, new URLConnectionClientHandler()); // Add our retry handler client.addFilter(new HostnameVerifierFilter(uploadUrl.getHost())); client.addFilter(new MD5CheckFilter()); client.addFilter(new RetryFilter(retryDelay, retryCount)); // Create a FileChannel for the upload fileChannel = new RandomAccessFile(fileToUpload.toFile(), "r").getChannel(); System.out.printf("Starting upload at %s\n", new Date().toString()); // The first upload is done without a range to create the initial object. doUploadSegment(0); // See how many more segments we have int segmentCount = (int) (fileSize / (long) segmentSize); long remainder = fileSize % segmentSize; if (remainder != 0) { // Additional bytes at end segmentCount++; } if (segmentCount > 1) { // Build a thread pool to upload the segments. ThreadPoolExecutor executor = new ThreadPoolExecutor(threadCount, threadCount, 15, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); for (int i = 1; i < segmentCount; i++) { executor.execute(new SegmentUpload(i)); } // Wait for completion while (true) { try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } if (failed) { // Abort! l4j.warn("Error detected, terminating upload"); executor.shutdownNow(); break; } if (executor.getQueue().isEmpty()) { l4j.info("All tasks complete, awaiting shutdown"); try { executor.shutdown(); executor.awaitTermination(1, TimeUnit.MINUTES); } catch (InterruptedException e) { e.printStackTrace(); } break; } } } // Done! long elapsed = System.currentTimeMillis() - start; printRate(fileSize, elapsed); // Release buffers LogMF.debug(l4j, "buffer count at end: {0}", buffers.size()); buffers = new LinkedList<>(); System.out.printf("\nUpload completed at %s\n", new Date().toString()); // Verify if (verifyUrl != null) { System.out.printf("starting remote MD5...\n"); String objectMD5 = computeObjectMD5(); System.out.printf("Object MD5 = %s\n", objectMD5); System.out.printf("Remote MD5 complete at %s\nStarting local MD5\n", new Date().toString()); // At this point we don't need the clients anymore. l4j.debug("Shutting down SmartClient"); SmartClientFactory.destroy(client); SmartClientFactory.destroy(pingClient); String fileMD5 = standardChecksum ? computeFileMD5Standard() : computeFileMD5(); System.out.printf("\nFile on disk MD5 = %s\n", fileMD5); System.out.printf("Local MD5 complete at %s\n", new Date().toString()); if (!fileMD5.equals(objectMD5)) { System.err.printf("ERROR: file MD5 does not match object MD5! %s != %s", fileMD5, objectMD5); System.exit(10); } if (saveMD5 != null && pw != null) { pw.write(fileMD5); pw.close(); } System.out.printf("\nObject verification passed!\n"); } } catch (IOException e) { e.printStackTrace(); System.exit(4); } }
From source file:com.lucidtechnics.blackboard.Blackboard.java
protected void executePlans(final TargetSpace _targetSpace, final java.util.Collection<Plan> _planList) { WorkspaceExecutionContext workspaceExecutionContext = get(_targetSpace.getWorkspaceIdentifier()); ThreadPoolExecutor workspaceExecutor = getWorkspaceExecutorMap() .get(workspaceExecutionContext.getWorkspaceExecutorName()); workspaceExecutor.execute(new Runnable() { public void run() { long startWorkspaceRun = 0l; long endWorkspaceRun = 0l; if (getTimePlans() == true) { startWorkspaceRun = System.currentTimeMillis(); }/*from ww w . j ava 2 s. c o m*/ WorkspaceContext workspaceContext = null; Plan plan = null; Map planToChangeInfoCountMap = new HashMap(); boolean notifyPlans = false; Set activePlanSet = new HashSet(); try { _targetSpace.setExecuting(); for (Plan executingPlan : _planList) { plan = executingPlan; if (_targetSpace.isTerminated() == false && _targetSpace.isActive(plan) == true) { try { activePlanSet.add(plan); if (_targetSpace.isFinished(plan) == false) { if (logger.isDebugEnabled() == true) { logger.debug("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " plan: " + plan.getName() + " is about to be executed."); } _targetSpace.setExecuting(plan); try { if (getTimePlans() == true) { endWorkspaceRun = System.currentTimeMillis(); if (logger.isInfoEnabled() == true) { logger.info("Processing target space time: " + (endWorkspaceRun - startWorkspaceRun)); } } workspaceContext = new WorkspaceContext(_targetSpace, plan); long startTime = 0l; long endTime = 0l; long totalTime = 0l; if (getTimePlans() == true) { startTime = System.currentTimeMillis(); } boolean exceptionThrown = false; //Execute the plan try { _targetSpace.setPlanState(plan, plan.execute(workspaceContext)); } catch (Throwable t) { //If a plan self destructs //the whole target space is //considered compromised. //The target space is //retired and the target //clean up. This is to //prevent inadvertant //memory leaks. Plan //operators should strive //to not have their plans //throw unhandled exceptions. exceptionThrown = true; if (_targetSpace.getTerminateOnError() == true) { getErrorManager().logException(t, logger); _targetSpace.setTerminated(); } else { getErrorManager().warnException(t, logger); } } if (getTimePlans() == true) { endTime = System.currentTimeMillis(); totalTime = endTime - startTime; if (logger.isInfoEnabled() == true) { logger.info( "Plan: " + plan.getName() + " executed in: " + totalTime); } } if (_targetSpace.isFinished(plan) == true) { if (logger.isDebugEnabled() == true) { logger.debug("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " plan: " + plan.getName() + " ran and is now finished."); } } _targetSpace.setLastActiveTime(System.currentTimeMillis()); } finally { workspaceContext.expire(); //Keep track of the state of //the workspace when this plan //was finished. Later on we //will check to see if the //workspace had been changed by //other plans since this plan //was last run. } planToChangeInfoCountMap.put(plan, new Integer(_targetSpace.getChangeInfoCount())); } else { if (logger.isDebugEnabled() == true) { logger.debug("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " plan: " + plan.getName() + " is finished."); } activePlanSet.remove(plan); } if (logger.isDebugEnabled() == true) { logger.debug("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " plan: " + plan.getName() + " executed successfully."); } } catch (Throwable t) { _targetSpace.setErrored(plan, t); activePlanSet.remove(plan); logger.error("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " encountered exception while trying to execute plan: " + plan); getErrorManager().logException(t, logger); } finally { _targetSpace.setExecuted(plan); if (_targetSpace.isFinished(plan) == true && activePlanSet.contains(plan) == true) { activePlanSet.remove(plan); } } } } for (Iterator activePlans = activePlanSet.iterator(); activePlans.hasNext() == true;) { plan = (Plan) activePlans.next(); if (logger.isDebugEnabled() == true) { logger.debug("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " plan: " + plan.getName() + " is an active plan."); } if (_targetSpace.isFinished(plan) == true) { if (logger.isDebugEnabled() == true) { logger.debug("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " plan: " + plan.getName() + " is now a finished plan."); } activePlans.remove(); } else { if (logger.isDebugEnabled() == true) { logger.debug("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " plan: " + plan.getName() + " is still an active plan."); } int planChangeCount = 0; Integer planChangeCountInteger = (Integer) planToChangeInfoCountMap.get(plan); planChangeCount = planChangeCountInteger.intValue(); if (_targetSpace.getChangeInfoCount() > planChangeCount) { //Plan is interested in the //workspace and there have been //changes since it was last //run. if (logger.isDebugEnabled() == true) { logger.debug("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " notifying plans for plan: " + plan.getName()); } notifyPlans = true; } else { if (logger.isDebugEnabled() == true) { logger.debug("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " NOT notifying plans for plan: " + plan.getName()); } } } if (logger.isDebugEnabled() == true) { logger.debug("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " setting workspace as executed."); } } if (activePlanSet.isEmpty() == true) { _targetSpace.setCompleted(); } } finally { if (getTimePlans() == true) { endWorkspaceRun = System.currentTimeMillis(); if (logger.isInfoEnabled() == true) { logger.info("Processing target space time at beginning of finally: " + (endWorkspaceRun - startWorkspaceRun)); } } if (((_targetSpace.isCompleted() == true) || _targetSpace.isTerminated() == true) && _targetSpace.isPersisted() == false) { remove(_targetSpace.getWorkspaceIdentifier()); retireTargetSpace(_targetSpace); } else if (notifyPlans == true) { if (logger.isDebugEnabled() == true) { logger.debug("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " execute plan will notify plans."); } _targetSpace.setActive(); _targetSpace.notifyPlans(); } else { _targetSpace.setActive(); if (logger.isDebugEnabled() == true) { logger.debug("For workspace: " + _targetSpace.getWorkspaceIdentifier() + " execute plan will NOT notify plans"); } } if (getTimePlans() == true) { endWorkspaceRun = System.currentTimeMillis(); if (logger.isInfoEnabled() == true) { logger.info("Processing target space time: " + (endWorkspaceRun - startWorkspaceRun)); } } } } }); }
From source file:com.turn.ttorrent.client.TorrentHandler.java
/** * Build this torrent's pieces array./* w w w.j a va2 s . c o m*/ * * <p> * Hash and verify any potentially present local data and create this * torrent's pieces array from their respective hash provided in the * torrent meta-info. * </p> * * <p> * This function should be called soon after the constructor to initialize * the pieces array. * </p> */ @VisibleForTesting /* pp */ void init() throws InterruptedException, IOException { { State s = getState(); if (s != State.WAITING) { LOG.info("Restarting torrent from state " + s); return; } } setState(State.VALIDATING); try { int npieces = torrent.getPieceCount(); long size = getSize(); // Store in a local so we can update with minimal synchronization. BitSet completedPieces = new BitSet(npieces); long completedSize = 0; ThreadPoolExecutor executor = client.getEnvironment().getExecutorService(); // TorrentCreator.newExecutor("TorrentHandlerInit"); try { LOG.info("{}: Analyzing local data for {} ({} pieces)...", new Object[] { getLocalPeerName(), getName(), npieces }); int step = 10; CountDownLatch latch = new CountDownLatch(npieces); for (int index = 0; index < npieces; index++) { // TODO: Read the file sequentially and pass it to the validator. // Otherwise we thrash the disk on validation. ByteBuffer buffer = ByteBuffer.allocate(getPieceLength(index)); bucket.read(buffer, getPieceOffset(index)); buffer.flip(); executor.execute(new PieceValidator(torrent, index, buffer, completedPieces, latch)); if (index / (float) npieces * 100f > step) { LOG.info("{}: ... {}% complete", getLocalPeerName(), step); step += 10; } } latch.await(); for (int i = completedPieces.nextSetBit(0); i >= 0; i = completedPieces.nextSetBit(i + 1)) { completedSize += getPieceLength(i); } } finally { // Request orderly executor shutdown and wait for hashing tasks to // complete. // executor.shutdown(); // executor.awaitTermination(1, TimeUnit.SECONDS); } LOG.debug("{}: {}: we have {}/{} bytes ({}%) [{}/{} pieces].", new Object[] { getLocalPeerName(), getName(), completedSize, size, String.format("%.1f", (100f * (completedSize / (float) size))), completedPieces.cardinality(), getPieceCount() }); synchronized (lock) { this.completedPieces = completedPieces; } if (isComplete()) setState(State.SEEDING); else setState(State.SHARING); } catch (Exception e) { setState(State.ERROR); Throwables.propagateIfPossible(e, InterruptedException.class, IOException.class); throw Throwables.propagate(e); } }