List of usage examples for java.util.concurrent ExecutorService shutdownNow
List<Runnable> shutdownNow();
From source file:com.yahoo.yrlhaifa.liveqa.challenge.http_operation.QuestionOperationHttpRequestSender.java
public void sendRequestsAndCollectAnswers() throws QuestionOperationException, InterruptedException { if (mapParticipantToAnswer.size() > 0) { throw new QuestionOperationException("BUG: The given map from system-id to answers is not empty."); }//from w w w . ja v a 2s.c o m CloseableHttpClient httpClient = HttpClients.custom().setMaxConnPerRoute(participants.size()).build(); try { ExecutorService executor = Executors.newFixedThreadPool(participants.size()); try { FutureRequestExecutionService requestExecutor = new FutureRequestExecutionService(httpClient, executor); logger.info("Sending requests using request-executor..."); sendRequestsWithRequestExecutor(requestExecutor); logger.info("Sending requests using request-executor - done."); } finally { try { executor.shutdownNow(); } catch (RuntimeException e) { // TODO Add more error handling logger.error("Failed to shutdown executor. Program continues.", e); } } } finally { try { httpClient.close(); } catch (IOException | RuntimeException e) { // TODO Add more error handling logger.error("Failed to close HTTP client. Program continues.", e); } } // Remove those who did not finish on time, but did write results. Set<Participant> didNotSucceed = new LinkedHashSet<Participant>(); for (Participant participant : mapParticipantToAnswer.keySet()) { if (!(systemsSucceeded.contains(participant))) { didNotSucceed.add(participant); } } for (Participant toRemove : didNotSucceed) { mapParticipantToAnswer.remove(toRemove); } if (exception != null) { throw exception; } }
From source file:org.mule.module.mongo.tools.MongoDump.java
public void dump(final String outputDirectory, final String database, String outputName, final int threads) throws IOException { Validate.notNull(outputDirectory);//from w w w . j a va 2 s .c o m Validate.notNull(outputName); Validate.notNull(database); outputName += appendTimestamp(); initOplog(database); final Collection<String> collections = mongoClient.listCollections(); if (collections != null) { final ExecutorService executor = Executors.newFixedThreadPool(threads); final DumpWriter dumpWriter = new BsonDumpWriter(outputDirectory, outputName); for (final String collectionName : collections) { final DBCollection dbCollection = mongoClient.getCollection(collectionName); final MongoDumpCollection dumpCollection = new MongoDumpCollection(dbCollection); dumpCollection.setDumpWriter(dumpWriter); final Future<Void> future = executor.submit(dumpCollection); propagateException(future); } executor.shutdown(); try { if (!executor.awaitTermination(60, TimeUnit.SECONDS)) { executor.shutdownNow(); } if (oplog) { final ExecutorService singleExecutor = Executors.newSingleThreadExecutor(); final MongoDumpCollection dumpCollection = new MongoDumpCollection(oplogCollection); dumpCollection.setName(BackupConstants.OPLOG); dumpCollection.addOption(Bytes.QUERYOPTION_OPLOGREPLAY); dumpCollection.addOption(Bytes.QUERYOPTION_SLAVEOK); final DBObject query = new BasicDBObject(); query.put(BackupConstants.TIMESTAMP_FIELD, new BasicDBObject("$gt", oplogStart)); // Filter only oplogs for given database query.put(BackupConstants.NAMESPACE_FIELD, BackupUtils.getNamespacePattern(database)); dumpCollection.setQuery(query); dumpCollection.setDumpWriter(dumpWriter); final Future<Void> future = singleExecutor.submit(dumpCollection); propagateException(future); } if (zip) { final String dbDumpPath = outputDirectory + File.separator + outputName; ZipUtils.zipDirectory(dbDumpPath); FileUtils.deleteDirectory(new File(dbDumpPath)); } } catch (final InterruptedException ie) { executor.shutdownNow(); Thread.currentThread().interrupt(); } } }
From source file:com.pinterest.rocksplicator.controller.tasks.LoadSSTTask.java
@Override public void process(Context ctx) throws Exception { final String clusterName = ctx.getCluster(); final String segment = getParameter().getSegment(); ClusterBean clusterBean = ZKUtil.getClusterConfig(zkClient, clusterName); if (clusterBean == null) { LOG.error("Failed to get config for cluster {}.", clusterName); ctx.getTaskQueue().failTask(ctx.getId(), "Failed to read cluster config from zookeeper."); return;// w ww.jav a 2s.c om } SegmentBean segmentBean = clusterBean.getSegments().stream().filter(s -> s.getName().equals(segment)) .findAny().orElse(null); if (segmentBean == null) { String errMsg = String.format("Segment %s not in cluster %s.", segment, clusterName); LOG.error(errMsg); ctx.getTaskQueue().failTask(ctx.getId(), errMsg); return; } final ExecutorService executor = Executors.newFixedThreadPool(getParameter().getConcurrency()); try { // first pass load sst to masters doLoadSST(executor, segmentBean, Role.MASTER); LOG.info("First pass done."); // second pass load sst to slaves doLoadSST(executor, segmentBean, Role.SLAVE); LOG.info("Second pass done."); } catch (InterruptedException | ExecutionException ex) { LOG.error("Failed to load sst to cluster {}.", clusterName, ex); ctx.getTaskQueue().failTask(ctx.getId(), "Failed to load sst, error=" + ex.getMessage()); return; } executor.shutdown(); executor.shutdownNow(); ctx.getTaskQueue().finishTask(ctx.getId(), "Finished loading sst to " + clusterName); }
From source file:org.jactr.core.concurrent.ExecutorServices.java
/** * shutdown and wait for the shutdown of all the executors that are currently * installed. if millisecondsToWait is 0, it will wait indefinitely *//*from w w w .ja va2s . co m*/ static public void shutdown(long millisecondsToWait) { synchronized (_executors) { _isShuttingDown = true; } Collection<String> executorNames = new ArrayList<String>(); getExecutorNames(executorNames); /* * issue shutdown */ for (String name : executorNames) { ExecutorService executor = getExecutor(name); if (executor != null && !executor.isShutdown()) executor.shutdown(); } /* * and wait */ long interval = 500; long abortAt = System.currentTimeMillis() + millisecondsToWait; if (millisecondsToWait == 0) abortAt = Long.MAX_VALUE; while (abortAt > System.currentTimeMillis() && executorNames.size() != 0) { for (String name : executorNames) { ExecutorService executor = getExecutor(name); if (executor != null) try { if (executor.awaitTermination(interval, TimeUnit.MILLISECONDS)) removeExecutor(name); else if (LOGGER.isDebugEnabled()) LOGGER.debug(name + " did not terminate after " + interval + "ms"); } catch (Exception e) { if (LOGGER.isWarnEnabled()) LOGGER.warn("Failed to terminate " + name, e); removeExecutor(name); } } /* * get the current names again.. */ executorNames.clear(); getExecutorNames(executorNames); } if (executorNames.size() != 0) { if (LOGGER.isWarnEnabled()) LOGGER.warn("Forcing unresponsive executors to terminate " + executorNames + " after " + millisecondsToWait + "ms"); for (String name : executorNames) { ExecutorService executor = getExecutor(name); if (executor != null) executor.shutdownNow(); } } synchronized (_executors) { _executors.clear(); _isShuttingDown = false; } }
From source file:org.eclipse.gyrex.cloud.internal.zk.ZooKeeperPinger.java
@Override public void run() { final ExecutorService executor = Executors.newSingleThreadExecutor(); try {// ww w .j ava 2s . c o m if (CloudDebug.zooKeeperGateLifecycle) { LOG.debug("Checking ZooKeeper connection..."); } // execute in separate thread with timeout executor.submit(checkConnection).get(30, TimeUnit.SECONDS); if (CloudDebug.zooKeeperGateLifecycle) { LOG.debug("ZooKeeper connection OK"); } // ok setStatus(null); } catch (final Exception e) { LOG.error("The ZooKeeper connection is in trouble. {}", ExceptionUtils.getRootCauseMessage(e), e); setStatus(new Status(IStatus.ERROR, CloudActivator.SYMBOLIC_NAME, String.format("Unable to ping ZooKeeper. %s", ExceptionUtils.getRootCauseMessage(e)), e)); } finally { executor.shutdownNow(); } }
From source file:com.dtolabs.rundeck.core.execution.impl.jsch.JschNodeExecutor.java
/** * Shutdown the ExecutorService/*from ww w. j a v a 2s.co m*/ */ void shutdownAndAwaitTermination(ExecutorService pool) { pool.shutdownNow(); // Disable new tasks from being submitted try { logger.debug("Waiting up to 30 seconds for ExecutorService to shut down"); // Wait a while for existing tasks to terminate if (!pool.awaitTermination(30, TimeUnit.SECONDS)) { logger.debug("Pool did not terminate"); } } catch (InterruptedException ie) { // (Re-)Cancel if current thread also interrupted pool.shutdownNow(); // Preserve interrupt status Thread.currentThread().interrupt(); } }
From source file:org.jasig.cas.ticket.registry.JpaTicketRegistryTests.java
@Test @IfProfileValue(name = "cas.jpa.concurrent", value = "true") public void testConcurrentServiceTicketGeneration() throws Exception { final TicketGrantingTicket newTgt = newTGT(); addTicketInTransaction(newTgt);/* w w w . j ava 2 s . c o m*/ final ExecutorService executor = Executors.newFixedThreadPool(CONCURRENT_SIZE); try { final List<ServiceTicketGenerator> generators = new ArrayList<ServiceTicketGenerator>(CONCURRENT_SIZE); for (int i = 0; i < CONCURRENT_SIZE; i++) { generators.add(new ServiceTicketGenerator(newTgt.getId())); } final List<Future<String>> results = executor.invokeAll(generators); for (Future<String> result : results) { assertNotNull(result.get()); } } catch (Exception e) { logger.debug("testConcurrentServiceTicketGeneration produced an error", e); fail("testConcurrentServiceTicketGeneration failed."); } finally { executor.shutdownNow(); } }
From source file:com.stimulus.archiva.incoming.IAPRunnable.java
public void shutdownAndAwaitTermination(ExecutorService pool, String job) { pool.shutdown();//from w w w. j a v a 2s . c om try { logger.debug("awaiting termination of " + job); if (!pool.awaitTermination(DEAD_PERIOD, TimeUnit.MILLISECONDS)) { logger.debug("awaiting " + job + " did not terminate"); pool.shutdownNow(); if (!pool.awaitTermination(60, TimeUnit.SECONDS)) logger.debug("awaiting " + job + " still did not terminate"); } else { logger.debug("awaiting " + job + " terminated"); } } catch (InterruptedException ie) { logger.debug("awaiting " + job + " were interrupted. shutting thread pool down immediately."); pool.shutdownNow(); Thread.currentThread().interrupt(); } catch (Throwable e) { logger.debug("awaiting " + job + " were interrupted:" + e.getMessage()); pool.shutdownNow(); Thread.currentThread().interrupt(); } }
From source file:gobblin.ingestion.google.webmaster.GoogleWebmasterDataFetcherImpl.java
/** * Get all pages in an async mode./*from ww w . j ava2s. co m*/ */ private Collection<String> getPages(String startDate, String endDate, List<Dimension> dimensions, ApiDimensionFilter countryFilter, Queue<Pair<String, FilterOperator>> toProcess) throws IOException { String country = GoogleWebmasterFilter.countryFilterToString(countryFilter); ConcurrentLinkedDeque<String> allPages = new ConcurrentLinkedDeque<>(); int r = 0; while (r <= RETRY) { ++r; log.info(String.format("Get pages at round %d with size %d.", r, toProcess.size())); ConcurrentLinkedDeque<Pair<String, FilterOperator>> nextRound = new ConcurrentLinkedDeque<>(); ExecutorService es = Executors.newFixedThreadPool(10, ExecutorsUtils .newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName()))); while (!toProcess.isEmpty()) { submitJob(toProcess.poll(), countryFilter, startDate, endDate, dimensions, es, allPages, nextRound); } //wait for jobs to finish and start next round if necessary. try { es.shutdown(); boolean terminated = es.awaitTermination(5, TimeUnit.MINUTES); if (!terminated) { es.shutdownNow(); log.warn(String.format( "Timed out while getting all pages for country-%s at round %d. Next round now has size %d.", country, r, nextRound.size())); } } catch (InterruptedException e) { throw new RuntimeException(e); } if (nextRound.isEmpty()) { break; } toProcess = nextRound; } if (r == RETRY) { throw new RuntimeException(String.format( "Getting all pages reaches the maximum number of retires %d. Date range: %s ~ %s. Country: %s.", RETRY, startDate, endDate, country)); } return allPages; }
From source file:com.emc.ecs.sync.CasMigrationTest.java
protected void delete(FPPool pool, List<String> clipIds) throws Exception { ExecutorService service = Executors.newFixedThreadPool(CAS_THREADS); System.out.print("Deleting clips"); for (String clipId : clipIds) { service.submit(new ClipDeleter(pool, clipId)); }/*from w w w .jav a 2 s . c om*/ service.shutdown(); service.awaitTermination(CAS_SETUP_WAIT_MINUTES, TimeUnit.MINUTES); service.shutdownNow(); System.out.println(); }