List of usage examples for java.util.concurrent ExecutorService shutdown
void shutdown();
From source file:edu.cmu.cs.lti.ark.fn.identification.latentmodel.LatentAlphabetCreationThreaded.java
/** * Splits frameElementLines into numThreads equally-sized batches and creates an alphabet * file for each one./* w w w . ja va 2s . c om*/ * * @throws java.io.IOException */ public void createLocalAlphabets() throws IOException { final List<String> frameLines = Files.readLines(new File(frameElementsFile), Charsets.UTF_8) .subList(startIndex, endIndex); final List<String> parseLines = Files.readLines(new File(parseFile), Charsets.UTF_8); final ExecutorService threadPool = newFixedThreadPool(numThreads); int i = 0; for (int start = startIndex; start < endIndex; start += BATCH_SIZE) { final int threadId = i; final List<String> frameLineBatch = frameLines.subList(start, Math.min(frameLines.size(), start + BATCH_SIZE)); threadPool.execute(new Runnable() { public void run() { logger.info("Thread " + threadId + " : start"); processBatch(threadId, frameLineBatch, parseLines); logger.info("Thread " + threadId + " : end"); } }); i++; } threadPool.shutdown(); }
From source file:kmi.taa.core.PredicateObjectRetriever.java
public SortedMap<Integer, String> retrieveAll(Map<Integer, String> originalLines, String proxy) { SortedMap<Integer, String> results = Collections.synchronizedSortedMap(new TreeMap<Integer, String>()); ExecutorService pool = Executors.newFixedThreadPool(50); int howManyslinks = originalLines.size(); for (Integer id : originalLines.keySet()) { String line = originalLines.get(id); String[] str = line.split("\t"); String candidateUrl = str[2]; pool.execute(new Retriever(id, candidateUrl, proxy, results)); }/*w w w . ja v a 2 s. com*/ pool.shutdown(); int count = 0; int previousResultSize = 0; while (results.size() < howManyslinks && count < 100) { try { Thread.sleep(1000); count += 1; if (results.size() != previousResultSize) { previousResultSize = results.size(); count = 0; } System.out.println("Already retrieved " + results.size() + " triples ..."); } catch (InterruptedException e) { } } System.out.println("All slinks are queried"); return results; }
From source file:com.ibm.bi.dml.runtime.io.ReaderTextCSVParallel.java
/** * // www .j a v a 2 s.c o m * @param path * @param job * @param hasHeader * @param delim * @return * @throws IOException * @throws DMLRuntimeException */ private MatrixBlock computeCSVSizeAndCreateOutputMatrixBlock(InputSplit[] splits, Path path, JobConf job, boolean hasHeader, String delim, long estnnz) throws IOException, DMLRuntimeException { int nrow = 0; int ncol = 0; FileInputFormat.addInputPath(job, path); TextInputFormat informat = new TextInputFormat(); informat.configure(job); // count no of entities in the first non-header row LongWritable key = new LongWritable(); Text oneLine = new Text(); RecordReader<LongWritable, Text> reader = informat.getRecordReader(splits[0], job, Reporter.NULL); try { if (reader.next(key, oneLine)) { String cellStr = oneLine.toString().trim(); ncol = StringUtils.countMatches(cellStr, delim) + 1; } } finally { IOUtilFunctions.closeSilently(reader); } // count rows in parallel per split try { ExecutorService pool = Executors.newFixedThreadPool(_numThreads); ArrayList<CountRowsTask> tasks = new ArrayList<CountRowsTask>(); for (InputSplit split : splits) { tasks.add(new CountRowsTask(split, informat, job, hasHeader)); hasHeader = false; } pool.invokeAll(tasks); pool.shutdown(); // collect row counts for offset computation // early error notify in case not all tasks successful _offsets = new SplitOffsetInfos(tasks.size()); for (CountRowsTask rt : tasks) { if (!rt.getReturnCode()) throw new IOException("Count task for csv input failed: " + rt.getErrMsg()); _offsets.setOffsetPerSplit(tasks.indexOf(rt), nrow); _offsets.setLenghtPerSplit(tasks.indexOf(rt), rt.getRowCount()); nrow = nrow + rt.getRowCount(); } } catch (Exception e) { throw new IOException("Threadpool Error " + e.getMessage(), e); } // allocate target matrix block based on given size; // need to allocate sparse as well since lock-free insert into target return createOutputMatrixBlock(nrow, ncol, estnnz, true, true); }
From source file:com.blacklocus.jres.request.index.JresUpdateDocumentScriptTest.java
@Test(expected = ExecutionException.class) public void testRetryOnConflictExpectError() throws InterruptedException, ExecutionException { final String index = "JresUpdateDocumentScriptTest.testRetryOnConflictExpectError".toLowerCase(); final String type = "test"; final String id = "warzone"; final AtomicReference<String> error = new AtomicReference<String>(); final int numThreads = 16, numIterations = 100; ExecutorService x = Executors.newFixedThreadPool(numThreads); List<Future<?>> futures = new ArrayList<Future<?>>(numThreads); for (int i = 0; i < numThreads; i++) { futures.add(x.submit(new Callable<Void>() { @Override// w w w . ja v a 2 s . c om public Void call() throws Exception { for (int j = 0; j < numIterations; j++) { jres.quest(new JresUpdateDocumentScript(index, type, id, "ctx._source.value += 1", null, ImmutableMap.of("value", 0), null)); } return null; } })); } x.shutdown(); x.awaitTermination(1, TimeUnit.MINUTES); for (Future<?> future : futures) { // expecting a conflict exception from ElasticSearch future.get(); } }
From source file:byps.test.servlet.MyServerIF.java
@Override public int callClientParallel(int nbOfCalls) throws RemoteException { if (log.isDebugEnabled()) log.debug("callClientParallel(" + nbOfCalls); final ClientIF clientIF = getClientIF(); final AtomicInteger ret = new AtomicInteger(0); ExecutorService tpool = Executors.newCachedThreadPool(); for (int i = 0; i < nbOfCalls; i++) { Runnable run = new Runnable() { public void run() { try { if (log.isDebugEnabled()) log.debug("clientIF.incrementInt("); int v = clientIF.incrementInt(0); if (log.isDebugEnabled()) log.debug(")clientIF.incrementInt"); ret.addAndGet(v);/*ww w.jav a 2 s .c o m*/ } catch (Exception e) { log.error(e); } } }; tpool.execute(run); } tpool.shutdown(); try { tpool.awaitTermination(10, TimeUnit.SECONDS); } catch (InterruptedException e) { throw new BException(BExceptionC.CANCELLED, e.toString(), e); } if (log.isDebugEnabled()) log.debug(")callClientParallel"); return ret.get(); }
From source file:hivemall.mix.server.MixServerTest.java
@Test public void testMultipleClients() throws InterruptedException { final int port = NetUtils.getAvailablePort(); CommandLine cl = CommandLineUtils.parseOptions( new String[] { "-port", Integer.toString(port), "-sync_threshold", "3" }, MixServer.getOptions()); MixServer server = new MixServer(cl); ExecutorService serverExec = Executors.newSingleThreadExecutor(); serverExec.submit(server);//from w w w. ja v a2 s.c om waitForState(server, ServerState.RUNNING); final int numClients = 5; final ExecutorService clientsExec = Executors.newCachedThreadPool(); for (int i = 0; i < numClients; i++) { clientsExec.submit(new Runnable() { @Override public void run() { try { invokeClient("testMultipleClients", port); } catch (InterruptedException e) { Assert.fail(e.getMessage()); } } }); } clientsExec.awaitTermination(10, TimeUnit.SECONDS); clientsExec.shutdown(); serverExec.shutdown(); }
From source file:com.pinterest.rocksplicator.controller.tasks.LoadSSTTask.java
@Override public void process(Context ctx) throws Exception { final String clusterName = ctx.getCluster(); final String segment = getParameter().getSegment(); ClusterBean clusterBean = ZKUtil.getClusterConfig(zkClient, clusterName); if (clusterBean == null) { LOG.error("Failed to get config for cluster {}.", clusterName); ctx.getTaskQueue().failTask(ctx.getId(), "Failed to read cluster config from zookeeper."); return;//w w w .j a v a 2 s . co m } SegmentBean segmentBean = clusterBean.getSegments().stream().filter(s -> s.getName().equals(segment)) .findAny().orElse(null); if (segmentBean == null) { String errMsg = String.format("Segment %s not in cluster %s.", segment, clusterName); LOG.error(errMsg); ctx.getTaskQueue().failTask(ctx.getId(), errMsg); return; } final ExecutorService executor = Executors.newFixedThreadPool(getParameter().getConcurrency()); try { // first pass load sst to masters doLoadSST(executor, segmentBean, Role.MASTER); LOG.info("First pass done."); // second pass load sst to slaves doLoadSST(executor, segmentBean, Role.SLAVE); LOG.info("Second pass done."); } catch (InterruptedException | ExecutionException ex) { LOG.error("Failed to load sst to cluster {}.", clusterName, ex); ctx.getTaskQueue().failTask(ctx.getId(), "Failed to load sst, error=" + ex.getMessage()); return; } executor.shutdown(); executor.shutdownNow(); ctx.getTaskQueue().finishTask(ctx.getId(), "Finished loading sst to " + clusterName); }
From source file:hivemall.mix.server.MixServerTest.java
@Test public void test2ClientsZeroOneDenseModelWithMixCanceling() throws InterruptedException { final int port = NetUtils.getAvailablePort(); CommandLine cl = CommandLineUtils.parseOptions( new String[] { "-port", Integer.toString(port), "-sync_threshold", "30" }, MixServer.getOptions()); MixServer server = new MixServer(cl); ExecutorService serverExec = Executors.newSingleThreadExecutor(); serverExec.submit(server);//from ww w.j av a2 s .c o m waitForState(server, ServerState.RUNNING); final ExecutorService clientsExec = Executors.newCachedThreadPool(); for (int i = 0; i < 2; i++) { clientsExec.submit(new Runnable() { @Override public void run() { try { invokeClient01("test2ClientsZeroOne", port, true, true); } catch (InterruptedException e) { Assert.fail(e.getMessage()); } } }); } clientsExec.awaitTermination(30, TimeUnit.SECONDS); clientsExec.shutdown(); serverExec.shutdown(); }
From source file:hudson.FilePathTest.java
private List<Future<Integer>> whenFileIsCopied100TimesConcurrently(final File file) throws InterruptedException { List<Callable<Integer>> r = new ArrayList<Callable<Integer>>(); for (int i = 0; i < 100; i++) { r.add(new Callable<Integer>() { public Integer call() throws Exception { class Sink extends OutputStream { private Exception closed; private volatile int count; private void checkNotClosed() throws IOException { if (closed != null) throw new IOException(closed); }/*from www .j ava 2 s . c o m*/ @Override public void write(int b) throws IOException { count++; checkNotClosed(); } @Override public void write(byte[] b) throws IOException { count += b.length; checkNotClosed(); } @Override public void write(byte[] b, int off, int len) throws IOException { count += len; checkNotClosed(); } @Override public void close() throws IOException { closed = new Exception(); //if (size!=count) // fail(); } } FilePath f = new FilePath(channels.french, file.getPath()); Sink sink = new Sink(); f.copyTo(sink); return sink.count; } }); } ExecutorService es = Executors.newFixedThreadPool(100); try { return es.invokeAll(r); } finally { es.shutdown(); } }
From source file:com.emc.ecs.sync.CasMigrationTest.java
protected void delete(FPPool pool, List<String> clipIds) throws Exception { ExecutorService service = Executors.newFixedThreadPool(CAS_THREADS); System.out.print("Deleting clips"); for (String clipId : clipIds) { service.submit(new ClipDeleter(pool, clipId)); }/*from w w w. j a va 2 s . co m*/ service.shutdown(); service.awaitTermination(CAS_SETUP_WAIT_MINUTES, TimeUnit.MINUTES); service.shutdownNow(); System.out.println(); }