List of usage examples for java.util.concurrent ExecutorService invokeAll
<T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException;
From source file:io.rhiot.spec.IoTSpec.java
public static void main(String[] args) throws Exception { CommandLineParser parser = new DefaultParser(); Options options = new Options(); options.addOption(Option.builder("c").longOpt(CONFIG).desc( "Location of the test configuration file. A default value is 'src/main/resources/test.yaml' for easy IDE testing") .hasArg().build());// w w w. j a v a2s .c o m options.addOption(Option.builder("i").longOpt(INSTANCE).desc("Instance of the test; A default value is 1") .hasArg().build()); options.addOption(Option.builder("r").longOpt(REPORT) .desc("Location of the test report. A default value is 'target/report.csv'").hasArg().build()); CommandLine line = parser.parse(options, args); ObjectMapper mapper = new ObjectMapper(new YAMLFactory()); TestProfile test = mapper.readValue(new File(line.getOptionValue(CONFIG, "src/main/resources/test.yaml")), TestProfile.class); int instance = Integer.valueOf(line.getOptionValue(INSTANCE, "1")); test.setInstance(instance); String report = line.getOptionValue(REPORT, "target/report.csv"); test.setReport(new CSVReport(report)); LOG.info("Test '" + test.getName() + "' instance " + instance + " started"); final List<Driver> drivers = test.getDrivers(); ExecutorService executorService = Executors.newFixedThreadPool(drivers.size()); List<Future<Void>> results = executorService.invokeAll(drivers, test.getDuration(), TimeUnit.MILLISECONDS); executorService.shutdownNow(); executorService.awaitTermination(5, TimeUnit.SECONDS); results.forEach(result -> { try { result.get(); } catch (ExecutionException execution) { LOG.warn("Exception running driver", execution); } catch (Exception interrupted) { } }); drivers.forEach(driver -> { driver.stop(); try { test.getReport().print(driver); } catch (Exception e) { LOG.warn("Failed to write reports for the driver " + driver); } LOG.debug("Driver " + driver); LOG.debug("\t " + driver.getResult()); }); test.getReport().close(); LOG.info("Test '" + test.getName() + "' instance " + instance + " finished"); }
From source file:org.apache.bookkeeper.benchmark.TestClient.java
/** * First says if entries should be written to BookKeeper (0) or to the local * disk (1). Second parameter is an integer defining the length of a ledger entry. * Third parameter is the number of writes. * * @param args/*from w w w.j av a 2 s. com*/ */ public static void main(String[] args) throws ParseException { Options options = new Options(); options.addOption("length", true, "Length of packets being written. Default 1024"); options.addOption("target", true, "Target medium to write to. Options are bk, fs & hdfs. Default fs"); options.addOption("runfor", true, "Number of seconds to run for. Default 60"); options.addOption("path", true, "Path to write to. fs & hdfs only. Default /foobar"); options.addOption("zkservers", true, "ZooKeeper servers, comma separated. bk only. Default localhost:2181."); options.addOption("bkensemble", true, "BookKeeper ledger ensemble size. bk only. Default 3"); options.addOption("bkquorum", true, "BookKeeper ledger quorum size. bk only. Default 2"); options.addOption("bkthrottle", true, "BookKeeper throttle size. bk only. Default 10000"); options.addOption("sync", false, "Use synchronous writes with BookKeeper. bk only."); options.addOption("numconcurrent", true, "Number of concurrently clients. Default 1"); options.addOption("timeout", true, "Number of seconds after which to give up"); options.addOption("help", false, "This message"); CommandLineParser parser = new PosixParser(); CommandLine cmd = parser.parse(options, args); if (cmd.hasOption("help")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("TestClient <options>", options); System.exit(-1); } int length = Integer.parseInt(cmd.getOptionValue("length", "1024")); String target = cmd.getOptionValue("target", "fs"); long runfor = Long.parseLong(cmd.getOptionValue("runfor", "60")) * 1000; StringBuilder sb = new StringBuilder(); while (length-- > 0) { sb.append('a'); } Timer timeouter = new Timer(); if (cmd.hasOption("timeout")) { final long timeout = Long.parseLong(cmd.getOptionValue("timeout", "360")) * 1000; timeouter.schedule(new TimerTask() { public void run() { System.err.println("Timing out benchmark after " + timeout + "ms"); System.exit(-1); } }, timeout); } BookKeeper bkc = null; try { int numFiles = Integer.parseInt(cmd.getOptionValue("numconcurrent", "1")); int numThreads = Math.min(numFiles, 1000); byte[] data = sb.toString().getBytes(UTF_8); long runid = System.currentTimeMillis(); List<Callable<Long>> clients = new ArrayList<Callable<Long>>(); if (target.equals("bk")) { String zkservers = cmd.getOptionValue("zkservers", "localhost:2181"); int bkensemble = Integer.parseInt(cmd.getOptionValue("bkensemble", "3")); int bkquorum = Integer.parseInt(cmd.getOptionValue("bkquorum", "2")); int bkthrottle = Integer.parseInt(cmd.getOptionValue("bkthrottle", "10000")); ClientConfiguration conf = new ClientConfiguration(); conf.setThrottleValue(bkthrottle); conf.setZkServers(zkservers); bkc = new BookKeeper(conf); List<LedgerHandle> handles = new ArrayList<LedgerHandle>(); for (int i = 0; i < numFiles; i++) { handles.add(bkc.createLedger(bkensemble, bkquorum, DigestType.CRC32, new byte[] { 'a', 'b' })); } for (int i = 0; i < numFiles; i++) { clients.add(new BKClient(handles, data, runfor, cmd.hasOption("sync"))); } } else if (target.equals("hdfs")) { FileSystem fs = FileSystem.get(new Configuration()); LOG.info("Default replication for HDFS: {}", fs.getDefaultReplication()); List<FSDataOutputStream> streams = new ArrayList<FSDataOutputStream>(); for (int i = 0; i < numFiles; i++) { String path = cmd.getOptionValue("path", "/foobar"); streams.add(fs.create(new Path(path + runid + "_" + i))); } for (int i = 0; i < numThreads; i++) { clients.add(new HDFSClient(streams, data, runfor)); } } else if (target.equals("fs")) { List<FileOutputStream> streams = new ArrayList<FileOutputStream>(); for (int i = 0; i < numFiles; i++) { String path = cmd.getOptionValue("path", "/foobar " + i); streams.add(new FileOutputStream(path + runid + "_" + i)); } for (int i = 0; i < numThreads; i++) { clients.add(new FileClient(streams, data, runfor)); } } else { LOG.error("Unknown option: " + target); throw new IllegalArgumentException("Unknown target " + target); } ExecutorService executor = Executors.newFixedThreadPool(numThreads); long start = System.currentTimeMillis(); List<Future<Long>> results = executor.invokeAll(clients, 10, TimeUnit.MINUTES); long end = System.currentTimeMillis(); long count = 0; for (Future<Long> r : results) { if (!r.isDone()) { LOG.warn("Job didn't complete"); System.exit(2); } long c = r.get(); if (c == 0) { LOG.warn("Task didn't complete"); } count += c; } long time = end - start; LOG.info("Finished processing writes (ms): {} TPT: {} op/s", time, count / ((double) time / 1000)); executor.shutdown(); } catch (ExecutionException ee) { LOG.error("Exception in worker", ee); } catch (KeeperException ke) { LOG.error("Error accessing zookeeper", ke); } catch (BKException e) { LOG.error("Error accessing bookkeeper", e); } catch (IOException ioe) { LOG.error("I/O exception during benchmark", ioe); } catch (InterruptedException ie) { LOG.error("Benchmark interrupted", ie); } finally { if (bkc != null) { try { bkc.close(); } catch (BKException bke) { LOG.error("Error closing bookkeeper client", bke); } catch (InterruptedException ie) { LOG.warn("Interrupted closing bookkeeper client", ie); } } } timeouter.cancel(); }
From source file:org.apache.hadoop.hdfs.BlockStorageLocationUtil.java
/** * Queries datanodes for the blocks specified in <code>datanodeBlocks</code>, * making one RPC to each datanode. These RPCs are made in parallel using a * threadpool.//from w w w . j a va 2s. co m * * @param datanodeBlocks * Map of datanodes to the blocks present on the DN * @return metadatas Map of datanodes to block metadata of the DN * @throws InvalidBlockTokenException * if client does not have read access on a requested block */ static Map<DatanodeInfo, HdfsBlocksMetadata> queryDatanodesForHdfsBlocksMetadata(Configuration conf, Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks, int poolsize, int timeoutMs, boolean connectToDnViaHostname, Tracer tracer, SpanId parentSpanId) throws InvalidBlockTokenException { List<VolumeBlockLocationCallable> callables = createVolumeBlockLocationCallables(conf, datanodeBlocks, timeoutMs, connectToDnViaHostname, tracer, parentSpanId); // Use a thread pool to execute the Callables in parallel List<Future<HdfsBlocksMetadata>> futures = new ArrayList<>(); ExecutorService executor = new ScheduledThreadPoolExecutor(poolsize); try { futures = executor.invokeAll(callables, timeoutMs, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { // Swallow the exception here, because we can return partial results } executor.shutdown(); Map<DatanodeInfo, HdfsBlocksMetadata> metadatas = Maps.newHashMapWithExpectedSize(datanodeBlocks.size()); // Fill in metadatas with results from DN RPCs, where possible for (int i = 0; i < futures.size(); i++) { VolumeBlockLocationCallable callable = callables.get(i); DatanodeInfo datanode = callable.getDatanodeInfo(); Future<HdfsBlocksMetadata> future = futures.get(i); try { HdfsBlocksMetadata metadata = future.get(); metadatas.put(callable.getDatanodeInfo(), metadata); } catch (CancellationException e) { LOG.info( "Cancelled while waiting for datanode " + datanode.getIpcAddr(false) + ": " + e.toString()); } catch (ExecutionException e) { Throwable t = e.getCause(); if (t instanceof InvalidBlockTokenException) { LOG.warn("Invalid access token when trying to retrieve " + "information from datanode " + datanode.getIpcAddr(false)); throw (InvalidBlockTokenException) t; } else if (t instanceof UnsupportedOperationException) { LOG.info("Datanode " + datanode.getIpcAddr(false) + " does not support" + " required #getHdfsBlocksMetadata() API"); throw (UnsupportedOperationException) t; } else { LOG.info( "Failed to query block locations on datanode " + datanode.getIpcAddr(false) + ": " + t); } if (LOG.isDebugEnabled()) { LOG.debug("Could not fetch information from datanode", t); } } catch (InterruptedException e) { // Shouldn't happen, because invokeAll waits for all Futures to be ready LOG.info("Interrupted while fetching HdfsBlocksMetadata"); } } return metadatas; }
From source file:com.twitter.graphjet.bipartite.edgepool.EdgePoolConcurrentTestHelper.java
/** * This helper method sets up a concurrent read-write situation with a single writer and multiple * readers that access the same underlying edgePool, and tests for correct edge access during * simultaneous edge writes. This helps test read consistency during arbitrary points of * inserting edges. Note that the exact read-write sequence here is non-deterministic and would * vary depending on the machine, but the hope is that given the large number of readers the reads * would be done at many different points of edge insertion. The test itself checks only for * partial correctness (it could have false positives) so this should only be used as a supplement * to other testing./*from w ww.j av a2s . c o m*/ * * @param edgePool is the underlying * {@link com.twitter.graphjet.bipartite.edgepool.EdgePool} * @param numReadersPerNode is the number of reader threads to use per node * @param leftSize is the number of left nodes * @param rightSize is the number of right nodes * @param edgeProbability is the probability of an edge between a left-right node pair * @param random is the random number generator to use for generating a random graph */ public static void testRandomConcurrentReadWriteThreads(EdgePool edgePool, int numReadersPerNode, int leftSize, int rightSize, double edgeProbability, Random random) { int maxWaitingTimeForThreads = 20; // in milliseconds int numReaders = leftSize * numReadersPerNode; CountDownLatch readersDoneLatch = new CountDownLatch(numReaders); // First, construct a random set of edges to insert in the graph Set<Pair<Integer, Integer>> edges = Sets .newHashSetWithExpectedSize((int) (leftSize * rightSize * edgeProbability)); List<EdgePoolReader> readers = Lists.newArrayListWithCapacity(numReaders); Int2ObjectMap<IntSet> leftSideGraph = new Int2ObjectOpenHashMap<IntSet>(leftSize); int averageLeftDegree = (int) (rightSize * edgeProbability); for (int i = 0; i < leftSize; i++) { IntSet nodeEdges = new IntOpenHashSet(averageLeftDegree); for (int j = 0; j < rightSize; j++) { if (random.nextDouble() < edgeProbability) { nodeEdges.add(j); edges.add(Pair.of(i, j)); } } leftSideGraph.put(i, nodeEdges); } // Create a bunch of leftReaders per node that'll read from the graph at random for (int i = 0; i < leftSize; i++) { for (int j = 0; j < numReadersPerNode; j++) { readers.add(new EdgePoolReader(edgePool, new CountDownLatch(0), readersDoneLatch, i, random.nextInt(maxWaitingTimeForThreads))); } } // Create a single writer that will insert these edges in random order List<WriterInfo> writerInfo = Lists.newArrayListWithCapacity(edges.size()); List<Pair<Integer, Integer>> edgesList = Lists.newArrayList(edges); Collections.shuffle(edgesList); CountDownLatch writerDoneLatch = new CountDownLatch(edgesList.size()); for (Pair<Integer, Integer> edge : edgesList) { writerInfo.add(new WriterInfo(edge.getLeft(), edge.getRight(), new CountDownLatch(0), writerDoneLatch)); } ExecutorService executor = Executors.newFixedThreadPool(numReaders + 1); // single writer List<Callable<Integer>> allThreads = Lists.newArrayListWithCapacity(numReaders + 1); // First, we add the writer allThreads.add(Executors.callable(new EdgePoolWriter(edgePool, writerInfo), 1)); // then the readers for (int i = 0; i < numReaders; i++) { allThreads.add(Executors.callable(readers.get(i), 1)); } // these will execute in some non-deterministic order Collections.shuffle(allThreads, random); // Wait for all the processes to finish try { List<Future<Integer>> results = executor.invokeAll(allThreads, 10, TimeUnit.SECONDS); for (Future<Integer> result : results) { assertTrue(result.isDone()); assertEquals(1, result.get().intValue()); } } catch (InterruptedException e) { throw new RuntimeException("Execution for a thread was interrupted: ", e); } catch (ExecutionException e) { throw new RuntimeException("Execution issue in an executor thread: ", e); } // confirm that these worked as expected try { readersDoneLatch.await(); writerDoneLatch.await(); } catch (InterruptedException e) { throw new RuntimeException("Execution for last reader was interrupted: ", e); } // Check that all readers' read info is consistent with the graph for (EdgePoolReader reader : readers) { IntSet expectedEdges = leftSideGraph.get(reader.queryNode); assertTrue(reader.getQueryNodeDegree() <= expectedEdges.size()); if (reader.getQueryNodeDegree() == 0) { assertNull(reader.getQueryNodeEdges()); } else { for (int edge : reader.getQueryNodeEdges()) { assertTrue(expectedEdges.contains(edge)); } } } }
From source file:com.twitter.graphjet.bipartite.GraphConcurrentTestHelper.java
/** * This helper method sets up a concurrent read-write situation with a single writer and multiple * readers that access the same underlying bipartiteGraph, and tests for correct edge access during * simultaneous edge writes. This helps test read consistency during arbitrary points of * inserting edges. Note that the exact read-write sequence here is non-deterministic and would * vary depending on the machine, but the hope is that given the large number of readers the reads * would be done at many different points of edge insertion. The test itself checks only for * partial correctness (it could have false positives) so this should only be used as a supplement * to other testing./* ww w. j ava 2 s . c o m*/ * * @param graph is the underlying * {@link BipartiteGraph} * @param numReadersPerNode is the number of reader threads to use per node * @param leftSize is the number of left nodes * @param rightSize is the number of right nodes * @param edgeProbability is the probability of an edge between a left-right node pair * @param random is the random number generator to use for generating a random graph */ public static <T extends BipartiteGraph & DynamicBipartiteGraph> void testRandomConcurrentReadWriteThreads( T graph, int numReadersPerNode, int leftSize, int rightSize, double edgeProbability, Random random) { int maxWaitingTimeForThreads = 20; // in milliseconds int numLeftReaders = leftSize * numReadersPerNode; int numRightReaders = rightSize * numReadersPerNode; int totalNumReaders = numLeftReaders + numRightReaders; CountDownLatch readersDoneLatch = new CountDownLatch(totalNumReaders); // First, construct a random set of edges to insert in the graph Set<Pair<Long, Long>> edges = Sets .newHashSetWithExpectedSize((int) (leftSize * rightSize * edgeProbability)); List<BipartiteGraphReader> leftReaders = Lists.newArrayListWithCapacity(numLeftReaders); List<BipartiteGraphReader> rightReaders = Lists.newArrayListWithCapacity(numRightReaders); Long2ObjectMap<LongSet> leftSideGraph = new Long2ObjectOpenHashMap<LongSet>(leftSize); Long2ObjectMap<LongSet> rightSideGraph = new Long2ObjectOpenHashMap<LongSet>(leftSize); int averageLeftDegree = (int) (rightSize * edgeProbability); for (int i = 0; i < leftSize; i++) { LongSet nodeEdges = new LongOpenHashSet(averageLeftDegree); for (int j = 0; j < rightSize; j++) { if (random.nextDouble() < edgeProbability) { nodeEdges.add(j); if (!rightSideGraph.containsKey(j)) { rightSideGraph.put(j, new LongOpenHashSet(new long[] { i })); } else { rightSideGraph.get(j).add(i); } edges.add(Pair.of((long) i, (long) j)); } } leftSideGraph.put(i, nodeEdges); } // Create a bunch of leftReaders per node that'll read from the graph at random for (int i = 0; i < leftSize; i++) { for (int j = 0; j < numReadersPerNode; j++) { leftReaders.add(new BipartiteGraphReader(graph, new CountDownLatch(0), readersDoneLatch, i, true, random.nextInt(maxWaitingTimeForThreads))); } } // Create a bunch of rightReaders per node that'll read from the graph at random for (int i = 0; i < rightSize; i++) { for (int j = 0; j < numReadersPerNode; j++) { rightReaders.add(new BipartiteGraphReader(graph, new CountDownLatch(0), readersDoneLatch, i, false, random.nextInt(maxWaitingTimeForThreads))); } } // Create a single writer that will insert these edges in random order List<WriterInfo> writerInfo = Lists.newArrayListWithCapacity(edges.size()); List<Pair<Long, Long>> edgesList = Lists.newArrayList(edges); Collections.shuffle(edgesList); CountDownLatch writerDoneLatch = new CountDownLatch(edgesList.size()); for (Pair<Long, Long> edge : edgesList) { writerInfo.add(new WriterInfo(edge.getLeft(), edge.getRight(), new CountDownLatch(0), writerDoneLatch)); } ExecutorService executor = Executors.newFixedThreadPool(totalNumReaders + 1); // single writer List<Callable<Integer>> allThreads = Lists.newArrayListWithCapacity(totalNumReaders + 1); // First, we add the writer allThreads.add(Executors.callable(new BipartiteGraphWriter(graph, writerInfo), 1)); // then the readers for (int i = 0; i < numLeftReaders; i++) { allThreads.add(Executors.callable(leftReaders.get(i), 1)); } for (int i = 0; i < numRightReaders; i++) { allThreads.add(Executors.callable(rightReaders.get(i), 1)); } // these will execute in some non-deterministic order Collections.shuffle(allThreads, random); // Wait for all the processes to finish try { List<Future<Integer>> results = executor.invokeAll(allThreads, 10, TimeUnit.SECONDS); for (Future<Integer> result : results) { assertTrue(result.isDone()); assertEquals(1, result.get().intValue()); } } catch (InterruptedException e) { throw new RuntimeException("Execution for a thread was interrupted: ", e); } catch (ExecutionException e) { throw new RuntimeException("Execution issue in an executor thread: ", e); } // confirm that these worked as expected try { readersDoneLatch.await(); writerDoneLatch.await(); } catch (InterruptedException e) { throw new RuntimeException("Execution for a latch was interrupted: ", e); } // Check that all readers' read info is consistent with the graph // first check the left side for (int i = 0; i < numLeftReaders; i++) { LongSet expectedLeftEdges = leftSideGraph.get(leftReaders.get(i).queryNode); assertTrue(leftReaders.get(i).getQueryNodeDegree() <= expectedLeftEdges.size()); if (leftReaders.get(i).getQueryNodeDegree() == 0) { assertNull(leftReaders.get(i).getQueryNodeEdges()); } else { for (long edge : leftReaders.get(i).getQueryNodeEdges()) { assertTrue(expectedLeftEdges.contains(edge)); } } } // then the right side for (int i = 0; i < numRightReaders; i++) { LongSet expectedRightEdges = rightSideGraph.get(rightReaders.get(i).queryNode); assertTrue(rightReaders.get(i).getQueryNodeDegree() <= expectedRightEdges.size()); if (rightReaders.get(i).getQueryNodeDegree() == 0) { assertNull(rightReaders.get(i).getQueryNodeEdges()); } else { for (long edge : rightReaders.get(i).getQueryNodeEdges()) { assertTrue(expectedRightEdges.contains(edge)); } } } }
From source file:gov.ca.cwds.cals.service.ComplaintsService.java
@SuppressWarnings("squid:S2142") //Logging and informing client instead of shutdown private List<ComplaintDto> aggregateComplaintsFromDifferentSources(String facilityNumber) { List<ComplaintDto> complaints = new ArrayList<>(); ExecutorService executorService = Executors.newFixedThreadPool(3); try {/*from w w w . j a va2s . c om*/ List<Future<List<ComplaintDto>>> futures = executorService.invokeAll(prepareListOfTasks(facilityNumber), 1, TimeUnit.MINUTES); for (Future<List<ComplaintDto>> future : futures) { complaints.addAll(future.get()); } } catch (InterruptedException e) { String message = "One of complaints execution threads has been interrupted"; LOGGER.error(message, e); throw new ServiceException(message, e); } catch (ExecutionException e) { LOGGER.error(e.getMessage(), e); throw new ServiceException(e.getMessage(), e); } shutdownExecutionService(executorService); return complaints; }
From source file:edu.mit.oidc.web.StatusEndpoint.java
@RequestMapping(value = "/" + URL, method = RequestMethod.GET, produces = MediaType.APPLICATION_JSON_VALUE) public String getStatus(Model m) { Map<String, Map<String, Object>> e = new HashMap<>(); ExecutorService executor = Executors.newFixedThreadPool(3); try {//from w w w . java2s .c o m List<Future<Map<String, Map<String, Object>>>> results = executor .invokeAll(Arrays.asList(new Callable<Map<String, Map<String, Object>>>() { // get database status @Override public Map<String, Map<String, Object>> call() throws Exception { return getDbStatus(); } }, new Callable<Map<String, Map<String, Object>>>() { // get kerberos status @Override public Map<String, Map<String, Object>> call() throws Exception { return getKerbStatus(); } }, new Callable<Map<String, Map<String, Object>>>() { // get LDAP status @Override public Map<String, Map<String, Object>> call() throws Exception { return getLdapStatus(); } }), getTimeoutSeconds(), TimeUnit.SECONDS); // collect all the results and return them for (Future<Map<String, Map<String, Object>>> result : results) { e.putAll(result.get()); } m.addAttribute(JsonEntityView.ENTITY, e); return JsonEntityView.VIEWNAME; } catch (InterruptedException | ExecutionException ex) { m.addAttribute(HttpCodeView.CODE, HttpStatus.INTERNAL_SERVER_ERROR); m.addAttribute(JsonErrorView.ERROR_MESSAGE, ex.getMessage()); return JsonErrorView.VIEWNAME; } }
From source file:com.nts.alphamale.handler.ExecutorHandler.java
/** * parallel execute synchronous commandline * @param cmdList/*from w ww .ja v a2s .c o m*/ * @param timeoutSecond * @return */ public List<Map<String, Object>> executeParallel(List<CommandLine> cmdList, int timeoutSecond) { ExecutorService executor = Executors.newCachedThreadPool(); List<Future<Map<String, Object>>> resultList; List<Map<String, Object>> results = new ArrayList<Map<String, Object>>(); List<SynchronousTask> taskList = new ArrayList<SynchronousTask>(); for (CommandLine cmd : cmdList) { taskList.add(new SynchronousTask(cmd, timeoutSecond * 1000)); } try { resultList = executor.invokeAll(taskList, timeoutSecond + 10, TimeUnit.SECONDS); for (Future<Map<String, Object>> result : resultList) { results.add(result.get()); } } catch (InterruptedException e) { log.error(e.getMessage()); } catch (ExecutionException e) { log.error(e.getMessage()); } if (!executor.isShutdown()) { executor.shutdown(); } return results; }
From source file:org.apache.solr.schema.ManagedIndexSchema.java
/** * Block up to a specified maximum time until we see agreement on the schema * version in ZooKeeper across all replicas for a collection. *//*from w w w . j av a 2s . c o m*/ public static void waitForSchemaZkVersionAgreement(String collection, String localCoreNodeName, int schemaZkVersion, ZkController zkController, int maxWaitSecs) { RTimer timer = new RTimer(); // get a list of active replica cores to query for the schema zk version (skipping this core of course) List<GetZkSchemaVersionCallable> concurrentTasks = new ArrayList<>(); for (String coreUrl : getActiveReplicaCoreUrls(zkController, collection, localCoreNodeName)) concurrentTasks.add(new GetZkSchemaVersionCallable(coreUrl, schemaZkVersion)); if (concurrentTasks.isEmpty()) return; // nothing to wait for ... log.info("Waiting up to " + maxWaitSecs + " secs for " + concurrentTasks.size() + " replicas to apply schema update version " + schemaZkVersion + " for collection " + collection); // use an executor service to invoke schema zk version requests in parallel with a max wait time int poolSize = Math.min(concurrentTasks.size(), 10); ExecutorService parallelExecutor = ExecutorUtil.newMDCAwareFixedThreadPool(poolSize, new DefaultSolrThreadFactory("managedSchemaExecutor")); try { List<Future<Integer>> results = parallelExecutor.invokeAll(concurrentTasks, maxWaitSecs, TimeUnit.SECONDS); // determine whether all replicas have the update List<String> failedList = null; // lazily init'd for (int f = 0; f < results.size(); f++) { int vers = -1; Future<Integer> next = results.get(f); if (next.isDone() && !next.isCancelled()) { // looks to have finished, but need to check the version value too try { vers = next.get(); } catch (ExecutionException e) { // shouldn't happen since we checked isCancelled } } if (vers == -1) { String coreUrl = concurrentTasks.get(f).coreUrl; log.warn("Core " + coreUrl + " version mismatch! Expected " + schemaZkVersion + " but got " + vers); if (failedList == null) failedList = new ArrayList<>(); failedList.add(coreUrl); } } // if any tasks haven't completed within the specified timeout, it's an error if (failedList != null) throw new SolrException(ErrorCode.SERVER_ERROR, failedList.size() + " out of " + (concurrentTasks.size() + 1) + " replicas failed to update their schema to version " + schemaZkVersion + " within " + maxWaitSecs + " seconds! Failed cores: " + failedList); } catch (InterruptedException ie) { log.warn("Core " + localCoreNodeName + " was interrupted waiting for schema version " + schemaZkVersion + " to propagate to " + concurrentTasks.size() + " replicas for collection " + collection); Thread.currentThread().interrupt(); } finally { if (!parallelExecutor.isShutdown()) parallelExecutor.shutdown(); } log.info("Took {}ms for {} replicas to apply schema update version {} for collection {}", timer.getTime(), concurrentTasks.size(), schemaZkVersion, collection); }
From source file:org.geomajas.plugin.rasterizing.layer.RasterDirectLayer.java
@Override public void draw(Graphics2D graphics, MapContent map, MapViewport viewport) { try {/*from w ww.j a v a 2s .c om*/ if (tiles.size() > 0) { Collection<Callable<ImageResult>> callables = new ArrayList<Callable<ImageResult>>(tiles.size()); // Build the image downloading threads for (RasterTile tile : tiles) { RasterImageDownloadCallable downloadThread = new RasterImageDownloadCallable( DOWNLOAD_MAX_ATTEMPTS, tile); callables.add(downloadThread); } // Loop until all images are downloaded or timeout is reached long totalTimeout = DOWNLOAD_TIMEOUT + DOWNLOAD_TIMEOUT_ONE_TILE * tiles.size(); log.debug("=== total timeout (millis): {}", totalTimeout); ExecutorService service = Executors.newFixedThreadPool(DOWNLOAD_MAX_THREADS); List<Future<ImageResult>> futures = service.invokeAll(callables, totalTimeout, TimeUnit.MILLISECONDS); // determine the pixel bounds of the mosaic Bbox pixelBounds = getPixelBounds(tiles); // create the images for the mosaic List<RenderedImage> images = new ArrayList<RenderedImage>(); for (Future<ImageResult> future : futures) { ImageResult result = null; if (future.isDone()) { try { result = future.get(); // create a rendered image if (result.getImage() != null && result.getImage().length > 0) { RenderedImage image = JAI.create("stream", new ByteArraySeekableStream(result.getImage())); // convert to common direct color model (some images have their own indexed color model) RenderedImage colored = toDirectColorModel(image); // translate to the correct position in the tile grid double xOffset = result.getRasterImage().getCode().getX() * tileWidth - pixelBounds.getX(); double yOffset; // TODO: in some cases, the y-index is up (e.g. WMS), should be down for // all layers !!!! if (isYIndexUp(tiles)) { yOffset = result.getRasterImage().getCode().getY() * tileHeight - pixelBounds.getY(); } else { yOffset = (pixelBounds.getMaxY() - (result.getRasterImage().getCode().getY() + 1) * tileHeight); } log.debug("adding to(" + xOffset + "," + yOffset + "), url = " + result.getRasterImage().getUrl()); RenderedImage translated = TranslateDescriptor.create(colored, (float) xOffset, (float) yOffset, new InterpolationNearest(), null); images.add(translated); } } catch (ExecutionException e) { addLoadError(graphics, (ImageException) (e.getCause()), viewport); log.warn(MISSING_TILE_IN_MOSAIC + e.getMessage()); } catch (Exception e) { log.warn("Missing tile " + result.getRasterImage().getUrl()); log.warn(MISSING_TILE_IN_MOSAIC + e.getMessage()); } } } if (images.size() > 0) { ImageLayout imageLayout = new ImageLayout(0, 0, (int) pixelBounds.getWidth(), (int) pixelBounds.getHeight()); imageLayout.setTileWidth(tileWidth); imageLayout.setTileHeight(tileHeight); // create the mosaic image ParameterBlock pbMosaic = new ParameterBlock(); pbMosaic.add(MosaicDescriptor.MOSAIC_TYPE_OVERLAY); for (RenderedImage renderedImage : images) { pbMosaic.addSource(renderedImage); } RenderedOp mosaic = JAI.create("mosaic", pbMosaic, new RenderingHints(JAI.KEY_IMAGE_LAYOUT, imageLayout)); try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); log.debug("rendering to buffer..."); ImageIO.write(mosaic, "png", baos); log.debug("rendering done, size = " + baos.toByteArray().length); RasterTile mosaicTile = new RasterTile(); mosaicTile.setBounds(getWorldBounds(tiles)); log.info("application bounds = " + mosaicTile.getBounds()); ImageResult mosaicResult = new ImageResult(mosaicTile); mosaicResult.setImage(baos.toByteArray()); addImage(graphics, mosaicResult, viewport); } catch (IOException e) { log.warn("could not write mosaic image " + e.getMessage()); } } } } catch (InterruptedException e) { log.warn("rendering {} to {} failed : ", getTitle(), viewport.getBounds()); } }