List of usage examples for java.util.concurrent ExecutorService submit
Future<?> submit(Runnable task);
From source file:com.sm.connector.server.ExecMapReduce.java
/** * each thread will process 3 of record (end -begin) /noOfThread * @param store -name of store/*from ww w. j av a 2s.co m*/ * @param noOfThread how many thread to run concurrently * @param begin record# for this node * @param end record# for this node * @return by reducer */ public Object execute(String store, int noOfThread, int begin, int end) { logger.info("execute " + store + " threads " + noOfThread + " begin " + begin + " end " + end); if (noOfThread <= 0 || begin >= end) throw new RuntimeException( "number of thread " + noOfThread + " must be > 0 or begin " + begin + " >= end " + end); serverStore = serverStoreMap.get(store); if (serverStore == null) throw new RuntimeException("can not find ServerStore " + store); //how many record need to be process int totalRec = end - begin; int blockSize = (totalRec % noOfThread == 0 ? totalRec / noOfThread : (totalRec / noOfThread) + 1); CountDownLatch countDownLatch = new CountDownLatch(noOfThread); ExecutorService executor = Executors.newFixedThreadPool(noOfThread, new ThreadPoolFactory("exec")); List<Runnable> runnableList = new ArrayList<Runnable>(noOfThread); logger.info("start to run " + noOfThread + " threads block size " + blockSize + " for " + store + " total " + totalRec); for (int i = 0; i < noOfThread; i++) { try { //T t = tClass.newInstance(); T t = (T) QueryUtils.createInstance(tClass); if (i < noOfThread - 1) { RunThread runThread = new RunThread(countDownLatch, begin + blockSize * i, begin + blockSize * (i + 1), t, i, store); runnableList.add(runThread); executor.submit(runThread); } else { //the last block RunThread runThread = new RunThread(countDownLatch, begin + blockSize * i, begin + totalRec, t, i, store); runnableList.add(runThread); executor.submit(runThread); } } catch (Exception e) { throw new RuntimeException(e.getMessage(), e); } } try { countDownLatch.await(timeout * 10, TimeUnit.MILLISECONDS); } catch (InterruptedException ex) { logger.warn(ex.getMessage(), ex); } finally { executor.shutdown(); return mapReduce.reduce(list); } }
From source file:io.pravega.client.stream.impl.ControllerImplTest.java
@Test public void testParallelCreateStream() throws Exception { final ExecutorService executorService = Executors.newFixedThreadPool(10); Semaphore createCount = new Semaphore(-19); AtomicBoolean success = new AtomicBoolean(true); for (int i = 0; i < 10; i++) { executorService.submit(() -> { for (int j = 0; j < 2; j++) { try { CompletableFuture<Boolean> createStreamStatus; createStreamStatus = controllerClient .createStream(StreamConfiguration.builder().streamName("streamparallel") .scope("scope1").scalingPolicy(ScalingPolicy.fixed(1)).build()); log.info("{}", createStreamStatus.get()); assertTrue(createStreamStatus.get()); createCount.release(); } catch (Exception e) { log.error("Exception when creating stream: {}", e); // Don't wait for other threads to complete. success.set(false);/* w ww. j a va2 s. c o m*/ createCount.release(20); } } }); } createCount.acquire(); executorService.shutdownNow(); assertTrue(success.get()); }
From source file:io.pravega.client.stream.impl.ControllerImplTest.java
@Test public void testParallelGetCurrentSegments() throws Exception { final ExecutorService executorService = Executors.newFixedThreadPool(10); Semaphore createCount = new Semaphore(-19); AtomicBoolean success = new AtomicBoolean(true); for (int i = 0; i < 10; i++) { executorService.submit(() -> { for (int j = 0; j < 2; j++) { try { CompletableFuture<StreamSegments> streamSegments; streamSegments = controllerClient.getCurrentSegments("scope1", "streamparallel"); assertTrue(streamSegments.get().getSegments().size() == 2); assertEquals(new Segment("scope1", "streamparallel", 0), streamSegments.get().getSegmentForKey(0.2)); assertEquals(new Segment("scope1", "streamparallel", 1), streamSegments.get().getSegmentForKey(0.6)); createCount.release(); } catch (Exception e) { log.error("Exception when getting segments: {}", e); // Don't wait for other threads to complete. success.set(false);// w w w.j a v a 2 s. c o m createCount.release(20); } } }); } createCount.acquire(); executorService.shutdownNow(); assertTrue(success.get()); }
From source file:com.netcrest.pado.tools.pado.command.temporal.java
/** * Builds Lucene indexes.//from w w w. j a v a 2 s . c om * * @param gridIds * null or empty to build indexes for all grids. * @param fullPaths * null or empty to build all paths. */ @SuppressWarnings({ "rawtypes", "unchecked" }) private void runLucene(String gridIds[], String... fullPaths) { if (fullPaths == null || fullPaths.length == 0) { if (gridIds == null || gridIds.length == 0) { // This code is not reachable by the 'temporal' command. // Grid IDs are always specified. PadoShell.println("Building Lucene indexes for ALL temporal paths for ALL grids... Please wait."); ILuceneBiz luceneBiz = (ILuceneBiz) SharedCache.getSharedCache().getPado().getCatalog() .newInstance(ILuceneBiz.class); gridIds = SharedCache.getSharedCache().getPado().getCatalog().getGridIds(); for (String gridId : gridIds) { PadoShell.println(" " + gridId + ": <all paths>"); } luceneBiz.buildAllIndexes(); } else { PadoShell.println("Building Lucene indexes for ALL temporal paths... Please wait."); ExecutorService es = Executors.newFixedThreadPool(gridIds.length); Future futures[] = new Future[gridIds.length]; int i = 0; for (final String gridId : gridIds) { PadoShell.println(" " + gridId + ": <all paths>"); futures[i++] = es.submit(new Callable() { @Override public Object call() throws Exception { ILuceneBiz luceneBiz = (ILuceneBiz) SharedCache.getSharedCache().getPado().getCatalog() .newInstance(ILuceneBiz.class); luceneBiz.buildAllPathIndexes(gridId); return true; } }); } for (Future future : futures) { try { future.get(); } catch (InterruptedException e) { e.printStackTrace(); } catch (ExecutionException e) { e.printStackTrace(); } } } } else { PadoShell.println("Building Lucene indexes for the specified temporal paths... Please wait."); ILuceneBiz luceneBiz = (ILuceneBiz) SharedCache.getSharedCache().getPado().getCatalog() .newInstance(ILuceneBiz.class); Map<String, List<String>> map = PadoShellUtil.getGridPathMap(padoShell, fullPaths); for (Map.Entry<String, List<String>> entry : map.entrySet()) { luceneBiz.buildIndexes(entry.getKey(), entry.getValue().toArray(new String[0])); } } PadoShell.println("Lucene indexing complete."); }
From source file:bes.injector.InjectorBurnTest.java
private void testPromptnessOfExecution(long intervalNanos, float loadIncrement) throws InterruptedException, ExecutionException, TimeoutException { final int executorCount = 4; int threadCount = 8; int maxQueued = 1024; final WeibullDistribution workTime = new WeibullDistribution(3, 200000); final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1); final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1); final int[] threadCounts = new int[executorCount]; final WeibullDistribution[] workCount = new WeibullDistribution[executorCount]; final ExecutorService[] executors = new ExecutorService[executorCount]; final Injector injector = new Injector(""); for (int i = 0; i < executors.length; i++) { executors[i] = injector.newExecutor(threadCount, maxQueued); threadCounts[i] = threadCount;/*from w ww . j a v a2 s . c o m*/ workCount[i] = new WeibullDistribution(2, maxQueued); threadCount *= 2; maxQueued *= 2; } long runs = 0; long events = 0; final TreeSet<Batch> pending = new TreeSet<Batch>(); final BitSet executorsWithWork = new BitSet(executorCount); long until = 0; // basic idea is to go through different levels of load on the executor service; initially is all small batches // (mostly within max queue size) of very short operations, moving to progressively larger batches // (beyond max queued size), and longer operations for (float multiplier = 0f; multiplier < 2.01f;) { if (System.nanoTime() > until) { System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f, events * 0.000001f)); events = 0; until = System.nanoTime() + intervalNanos; multiplier += loadIncrement; System.out.println(String.format("Running for %ds with load multiplier %.1f", TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier)); } // wait a random amount of time so we submit new tasks in various stages of long timeout; if (pending.isEmpty()) timeout = 0; else if (Math.random() > 0.98) timeout = Long.MAX_VALUE; else if (pending.size() == executorCount) timeout = pending.first().timeout; else timeout = (long) (Math.random() * pending.last().timeout); while (!pending.isEmpty() && timeout > System.nanoTime()) { Batch first = pending.first(); boolean complete = false; try { for (Result result : first.results.descendingSet()) result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS); complete = true; } catch (TimeoutException e) { } if (!complete && System.nanoTime() > first.timeout) { for (Result result : first.results) if (!result.future.isDone()) throw new AssertionError(); complete = true; } if (complete) { pending.pollFirst(); executorsWithWork.clear(first.executorIndex); } } // if we've emptied the executors, give all our threads an opportunity to spin down if (timeout == Long.MAX_VALUE) { try { Thread.sleep(10); } catch (InterruptedException e) { } } // submit a random batch to the first free executor service int executorIndex = executorsWithWork.nextClearBit(0); if (executorIndex >= executorCount) continue; executorsWithWork.set(executorIndex); ExecutorService executor = executors[executorIndex]; TreeSet<Result> results = new TreeSet<Result>(); int count = (int) (workCount[executorIndex].sample() * multiplier); long targetTotalElapsed = 0; long start = System.nanoTime(); long baseTime; if (Math.random() > 0.5) baseTime = 2 * (long) (workTime.sample() * multiplier); else baseTime = 0; for (int j = 0; j < count; j++) { long time; if (baseTime == 0) time = (long) (workTime.sample() * multiplier); else time = (long) (baseTime * Math.random()); if (time < minWorkTime) time = minWorkTime; if (time > maxWorkTime) time = maxWorkTime; targetTotalElapsed += time; Future<?> future = executor.submit(new WaitTask(time)); results.add(new Result(future, System.nanoTime() + time)); } long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex]) + TimeUnit.MILLISECONDS.toNanos(100L); long now = System.nanoTime(); if (runs++ > executorCount && now > end) throw new AssertionError(); events += results.size(); pending.add(new Batch(results, end, executorIndex)); // System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start))); } }
From source file:gobblin.ingestion.google.webmaster.GoogleWebmasterDataFetcherImpl.java
private int getPagesSize(final String startDate, final String endDate, final String country, final List<Dimension> requestedDimensions, final List<ApiDimensionFilter> apiDimensionFilters) throws IOException { final ExecutorService es = Executors.newCachedThreadPool(ExecutorsUtils .newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName()))); int startRow = 0; long groupSize = Math.max(1, Math.round(API_REQUESTS_PER_SECOND)); List<Future<Integer>> results = new ArrayList<>((int) groupSize); while (true) { for (int i = 0; i < groupSize; ++i) { startRow += GoogleWebmasterClient.API_ROW_LIMIT; final int start = startRow; final String interruptedMsg = String.format( "Interrupted while trying to get the size of all pages for %s. Current start row is %d.", country, start);//from ww w .ja v a 2s . c om Future<Integer> submit = es.submit(new Callable<Integer>() { @Override public Integer call() { log.info(String.format("Getting page size from %s...", start)); while (true) { try { LIMITER.acquirePermits(1); } catch (InterruptedException e) { log.error("RateBasedLimiter: " + interruptedMsg, e); return -1; } if (Thread.interrupted()) { log.error(interruptedMsg); return -1; } try { List<String> pages = _client.getPages(_siteProperty, startDate, endDate, country, GoogleWebmasterClient.API_ROW_LIMIT, requestedDimensions, apiDimensionFilters, start); if (pages.size() < GoogleWebmasterClient.API_ROW_LIMIT) { return pages.size() + start; //Figured out the size } else { return -1; } } catch (IOException e) { log.info(String.format("Getting page size from %s failed. Retrying...", start)); } } } }); results.add(submit); } //Check the results group in order. The first non-negative count indicates the size of total pages. for (Future<Integer> result : results) { try { Integer integer = result.get(GET_PAGE_SIZE_TIME_OUT, TimeUnit.MINUTES); if (integer >= 0) { es.shutdownNow(); return integer; } } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } catch (TimeoutException e) { throw new RuntimeException(String.format( "Exceeding the timeout of %d minutes while getting the total size of all pages.", GET_PAGE_SIZE_TIME_OUT), e); } } results.clear(); } }
From source file:com.emc.ecs.sync.storage.CasStorageTest.java
private List<String> createTestClips(FPPool pool, int maxBlobSize, int thisMany, Writer summaryWriter) throws Exception { ExecutorService service = Executors.newFixedThreadPool(CAS_THREADS); System.out.print("Creating clips"); List<String> clipIds = Collections.synchronizedList(new ArrayList<String>()); List<String> summaries = Collections.synchronizedList(new ArrayList<String>()); for (int clipIdx = 0; clipIdx < thisMany; clipIdx++) { service.submit(new ClipWriter(pool, clipIds, maxBlobSize, summaries)); }//from w ww .j ava 2 s .c om service.shutdown(); service.awaitTermination(CAS_SETUP_WAIT_MINUTES, TimeUnit.MINUTES); service.shutdownNow(); Collections.sort(summaries); for (String summary : summaries) { summaryWriter.append(summary); } System.out.println(); return clipIds; }
From source file:com.marketplace.Main.java
/** * Creates <code>ImageThread</code> for each app stored in the database. * /*from w ww .j av a 2s. c o m*/ * @return a set containing <code>ImageThread</code> */ private Set<Future<?>> createImageThread() { log.info("Creating threads for fetching images for app(s)"); Fetcher fetcher = new Fetcher(); ExecutorService executorService = Executors.newFixedThreadPool(15); Set<Future<?>> set = new HashSet<Future<?>>(); List<Session> imageSessions = new LinkedList<Session>(); try { // Create 3 new Gingerbread Sessions for (int i = 0; i < 3; i++) { imageSessions.add(this.sessionManager.createNewSession(AndroidVersion.GINGERBREAD)); } CyclicIterator<Session> sessions = new CyclicIterator<Session>(imageSessions); NextAppResponse[] appsResponse = fetcher.getNextAppIds(Main.imageIndex); if (appsResponse.length == 0) { log.info("Reached the end of collection. Reseting the Start index to 0."); Main.imageIndex = 0; appsResponse = fetcher.getNextAppIds(Main.imageIndex); } for (NextAppResponse nextAppResponse : appsResponse) { set.add(executorService.submit(new ImageThread(sessions.next(), fetcher, nextAppResponse.app.id, nextAppResponse.app.appId))); } } catch (UserUnavailableException e) { e.printStackTrace(); } catch (ConnectivityException e) { e.printStackTrace(); } return set; }