List of usage examples for java.util.concurrent ExecutorCompletionService take
public Future<V> take() throws InterruptedException
From source file:com.splout.db.hadoop.GeneratorCMD.java
public int run(String[] args) throws Exception { JCommander jComm = new JCommander(this); jComm.setProgramName(/* w w w . j ava 2s . c o m*/ "Splout Tablespaces Generator. Generates tablespaces, ready to be deployed to a Splout Cluster."); try { jComm.parse(args); } catch (Throwable t) { t.printStackTrace(); jComm.usage(); return -1; } if (parallelism < 1) { System.err.println("Parallelism must be greater than 0."); System.exit(1); } log.info("Parsing input parameters..."); // All the tablespaces that will be generated and deployed atomically, hashed by their name // We generate this first so we can detect errors in the configuration before even using Hadoop Map<String, TablespaceSpec> tablespacesToGenerate = new HashMap<String, TablespaceSpec>(); // Partition maps to reuse at indexation. Used when sampling is skipped. final Map<String, PartitionMap> partitionMapsToReuse = new HashMap<String, PartitionMap>(); for (String tablespaceFile : tablespaceFiles) { Path file = new Path(tablespaceFile); FileSystem fS = FileSystem.get(file.toUri(), getConf()); if (!fS.exists(file)) { throw new IllegalArgumentException("Config input file: " + file + " doesn't exist!"); } String strContents = HadoopUtils.fileToString(fS, file); JSONTablespaceDefinition def = JSONSerDe.deSer(strContents, JSONTablespaceDefinition.class); TablespaceSpec spec = def.build(conf); String name = def.getName(); tablespacesToGenerate.put(name, spec); // Reusing partition maps? if (qnodeURL != null) { partitionMapsToReuse.put(name, retrievePartitionMapfromQNode(name)); } } if (!FileSystem.getLocal(conf).equals(FileSystem.get(conf))) { File nativeLibs = new File("native"); if (nativeLibs.exists()) { SploutHadoopConfiguration.addSQLite4JavaNativeLibsToDC(conf); } } Path out = new Path(output); FileSystem outFs = out.getFileSystem(getConf()); HadoopUtils.deleteIfExists(outFs, out); ExecutorService executor = Executors.newFixedThreadPool(parallelism); ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(executor); ArrayList<Future<Boolean>> generatorFutures = new ArrayList<Future<Boolean>>(); // Generate each tablespace for (final Map.Entry<String, TablespaceSpec> tablespace : tablespacesToGenerate.entrySet()) { Path tablespaceOut = new Path(out, tablespace.getKey()); TablespaceSpec spec = tablespace.getValue(); log.info("Generating view with Hadoop (" + tablespace.getKey() + ")"); final TablespaceGenerator viewGenerator = new TablespaceGenerator(spec, tablespaceOut, this.getClass()); generatorFutures.add(ecs.submit(new Callable<Boolean>() { @Override public Boolean call() throws Exception { if (qnodeURL == null) { viewGenerator.generateView(conf, samplingType, new TupleSampler.RandomSamplingOptions()); return true; } else { viewGenerator.generateView(conf, partitionMapsToReuse.get(tablespace.getKey())); return true; } } })); } // Waiting all tasks to finish. for (int i = 0; i < tablespacesToGenerate.size(); i++) { // Get will throw an exception if the callable returned it. try { ecs.take().get(); } catch (ExecutionException e) { // One job was wrong. Stopping the rest. for (Future<Boolean> task : generatorFutures) { task.cancel(true); } executor.shutdown(); throw e; } } executor.shutdown(); log.info("Done!"); return 0; }
From source file:com.netflix.curator.framework.recipes.leader.TestLeaderLatch.java
@Test public void testWaiting() throws Exception { final int PARTICIPANT_QTY = 10; ExecutorService executorService = Executors.newFixedThreadPool(PARTICIPANT_QTY); ExecutorCompletionService<Void> service = new ExecutorCompletionService<Void>(executorService); final Timing timing = new Timing(); final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); try {//from w w w . ja v a 2s. c o m client.start(); final AtomicBoolean thereIsALeader = new AtomicBoolean(false); for (int i = 0; i < PARTICIPANT_QTY; ++i) { service.submit(new Callable<Void>() { @Override public Void call() throws Exception { LeaderLatch latch = new LeaderLatch(client, PATH_NAME); try { latch.start(); Assert.assertTrue(latch.await(timing.forWaiting().seconds(), TimeUnit.SECONDS)); Assert.assertTrue(thereIsALeader.compareAndSet(false, true)); Thread.sleep((int) (10 * Math.random())); } finally { thereIsALeader.set(false); latch.close(); } return null; } }); } for (int i = 0; i < PARTICIPANT_QTY; ++i) { service.take().get(); } } finally { executorService.shutdown(); IOUtils.closeQuietly(client); } }
From source file:com.netflix.curator.framework.recipes.locks.TestReaper.java
private void testSimulationWithLocks(String namespace) throws Exception { final int LOCK_CLIENTS = 10; final int ITERATIONS = 250; final int MAX_WAIT_MS = 10; ExecutorService service = Executors.newFixedThreadPool(LOCK_CLIENTS); ExecutorCompletionService<Object> completionService = new ExecutorCompletionService<Object>(service); Timing timing = new Timing(); Reaper reaper = null;/*from w ww. jav a2 s . c o m*/ final CuratorFramework client = makeClient(timing, namespace); try { client.start(); reaper = new Reaper(client, MAX_WAIT_MS / 2); reaper.start(); reaper.addPath("/a/b"); for (int i = 0; i < LOCK_CLIENTS; ++i) { completionService.submit(new Callable<Object>() { @Override public Object call() throws Exception { final InterProcessMutex lock = new InterProcessMutex(client, "/a/b"); for (int i = 0; i < ITERATIONS; ++i) { lock.acquire(); try { Thread.sleep((int) (Math.random() * MAX_WAIT_MS)); } finally { lock.release(); } } return null; } }); } for (int i = 0; i < LOCK_CLIENTS; ++i) { completionService.take().get(); } Thread.sleep(timing.session()); timing.sleepABit(); Stat stat = client.checkExists().forPath("/a/b"); Assert.assertNull("Child qty: " + ((stat != null) ? stat.getNumChildren() : 0), stat); } finally { service.shutdownNow(); IOUtils.closeQuietly(reaper); IOUtils.closeQuietly(client); } }
From source file:biz.c24.io.spring.integration.samples.fpml.PreRenderingFpmlGenerator.java
private List<Generator> preRender() throws Exception { List<Generator> result = new ArrayList<Generator>(THREADS); final TradeConfirmed tradeConfirmed = readTradeConfirmed(); ExecutorCompletionService<Generator> completionService = new ExecutorCompletionService<Generator>( Executors.newFixedThreadPool(THREADS)); for (int i = 0; i < THREADS; i++) { completionService.submit(new Callable<Generator>() { public Generator call() throws Exception { System.out.println("Rendering... "); OutputType ot = OutputType.BYTE_ARRAY; Random rand = new Random(); TradeConfirmed myTradeConfirmed = (TradeConfirmed) tradeConfirmed.cloneDeep(); Generator gen = new Generator(); List<Object> payloads = new ArrayList<Object>(ITERATIONS); for (int j = 0; j < ITERATIONS; j++) { TradeConfirmed fpML = randomizeFpML(myTradeConfirmed); if (rand.nextInt(100) == 0) { breakFpml(fpML); }/*from w w w . j ava 2 s. c o m*/ Sink sink = ot.getSink(sinkFactory); sink.writeObject(fpML); Object payload = ot.getOutput(sink); payloads.add(payload); } gen.payloads = payloads; return gen; } }); } for (int i = 0; i < THREADS; i++) { Future<Generator> future = completionService.take(); result.add(future.get()); } return result; }
From source file:com.netflix.curator.framework.recipes.barriers.TestDistributedDoubleBarrier.java
@Test public void testOverSubscribed() throws Exception { final Timing timing = new Timing(); final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); ExecutorService service = Executors.newCachedThreadPool(); ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(service); try {//from w w w .ja v a 2 s. co m client.start(); final Semaphore semaphore = new Semaphore(0); final CountDownLatch latch = new CountDownLatch(1); for (int i = 0; i < (QTY + 1); ++i) { completionService.submit(new Callable<Void>() { @Override public Void call() throws Exception { DistributedDoubleBarrier barrier = new DistributedDoubleBarrier(client, "/barrier", QTY) { @Override protected List<String> getChildrenForEntering() throws Exception { semaphore.release(); Assert.assertTrue(timing.awaitLatch(latch)); return super.getChildrenForEntering(); } }; Assert.assertTrue(barrier.enter(timing.seconds(), TimeUnit.SECONDS)); Assert.assertTrue(barrier.leave(timing.seconds(), TimeUnit.SECONDS)); return null; } }); } Assert.assertTrue(semaphore.tryAcquire(QTY + 1, timing.seconds(), TimeUnit.SECONDS)); // wait until all QTY+1 barriers are trying to enter latch.countDown(); for (int i = 0; i < (QTY + 1); ++i) { completionService.take().get(); // to check for assertions } } finally { service.shutdown(); IOUtils.closeQuietly(client); } }
From source file:com.alibaba.otter.node.etl.load.loader.db.FileLoadAction.java
/** * ? fast-fail //from w w w . j a v a 2s . co m */ private void moveFiles(FileLoadContext context, List<FileData> fileDatas, File rootDir) { Exception exception = null; adjustPoolSize(context); ExecutorCompletionService<Exception> executorComplition = new ExecutorCompletionService<Exception>( executor); List<Future<Exception>> results = new ArrayList<Future<Exception>>(); for (FileData fileData : fileDatas) { Future<Exception> future = executorComplition.submit(new FileLoadWorker(context, rootDir, fileData)); results.add(future); // fast fail if (future.isDone()) { // ( CallerRunsPolicy) try { exception = future.get(); } catch (Exception e) { exception = e; } if (exception != null) { for (Future<Exception> result : results) { if (!result.isDone() && !result.isCancelled()) { result.cancel(true); } } throw exception instanceof LoadException ? (LoadException) exception : new LoadException(exception); } } } int resultSize = results.size(); int cursor = 0; while (cursor < resultSize) { try { Future<Exception> result = executorComplition.take(); exception = result.get(); } catch (Exception e) { exception = e; break; } cursor++; } if (cursor != resultSize) { // ?? for (Future<Exception> future : results) { if (!future.isDone() && !future.isCancelled()) { future.cancel(true); } } } if (exception != null) { throw exception instanceof LoadException ? (LoadException) exception : new LoadException(exception); } }
From source file:org.zenoss.zep.dao.impl.EventSummaryDaoImplIT.java
@Test public void testSummaryMultiThreadDedup() throws ZepException, InterruptedException, ExecutionException { // Attempts to create the same event from multiple threads - verifies we get the appropriate de-duping behavior // for the count and that we are holding the lock on the database appropriately. int poolSize = 10; final CyclicBarrier barrier = new CyclicBarrier(poolSize); ExecutorService executorService = Executors.newFixedThreadPool(poolSize); ExecutorCompletionService<String> ecs = new ExecutorCompletionService<String>(executorService); final Event event = EventTestUtils.createSampleEvent(); final EventPreCreateContext context = new EventPreCreateContextImpl(); for (int i = 0; i < poolSize; i++) { ecs.submit(new Callable<String>() { @Override//ww w . j a v a 2s . c om public String call() throws Exception { barrier.await(); return eventSummaryDao.create(event, context); } }); } String uuid = null; for (int i = 0; i < poolSize; i++) { String thisUuid = ecs.take().get(); if (uuid == null) { assertNotNull(thisUuid); uuid = thisUuid; } else { assertEquals(uuid, thisUuid); } } // Now look up the event and make sure the count is equal to the number of submitted workers assertEquals(poolSize, this.eventSummaryDao.findByUuid(uuid).getCount()); }
From source file:org.ocelotds.integration.AbstractOcelotTest.java
/** * * @param <T>/* ww w . ja v a 2s. co m*/ * @param nb * @param client * @param returnClass * @param ds * @param methodName * @param params * @return */ protected <T> Collection<T> testCallMultiMethodsInClient(int nb, final Client client, final Class<T> returnClass, final Class ds, final String methodName, final String... params) { ExecutorCompletionService<ResultMonitored<T>> executorCompletionService = new ExecutorCompletionService( managedExecutor); Collection<T> results = new ArrayList<>(); long t0 = System.currentTimeMillis(); for (int i = 0; i < nb; i++) { final int num = i; Callable<ResultMonitored<T>> task = new Callable() { @Override public ResultMonitored<T> call() { Client cl = client; if (cl == null) { cl = getClient(); } long t0 = System.currentTimeMillis(); T result = getJava(returnClass, (String) testRSCallWithoutResult(cl, ds, methodName, params).getResponse()); ResultMonitored resultMonitored = new ResultMonitored(result, num); long t1 = System.currentTimeMillis(); resultMonitored.setTime(t1 - t0); return resultMonitored; } }; executorCompletionService.submit(task); } for (int i = 0; i < nb; i++) { try { Future<ResultMonitored<T>> fut = executorCompletionService.take(); ResultMonitored<T> res = fut.get(); // System.out.println("Time of execution of service " + res.getNum() + ": " + res.getTime() + " ms"); results.add(res.getResult()); } catch (InterruptedException | ExecutionException e) { } } long t1 = System.currentTimeMillis(); System.out.println("Time of execution of all services : " + (t1 - t0) + " ms"); assertThat(results).hasSize(nb); return results; }
From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphore.java
@Test public void testReleaseInChunks() throws Exception { final int MAX_LEASES = 11; final int THREADS = 100; final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); client.start();// w w w. ja v a2 s . c o m try { final Stepper latch = new Stepper(); final Random random = new Random(); final Counter counter = new Counter(); ExecutorService service = Executors.newCachedThreadPool(); ExecutorCompletionService<Object> completionService = new ExecutorCompletionService<Object>(service); for (int i = 0; i < THREADS; ++i) { completionService.submit(new Callable<Object>() { @Override public Object call() throws Exception { InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, "/test", MAX_LEASES); Lease lease = semaphore.acquire(10, TimeUnit.SECONDS); if (lease == null) { throw new Exception("timed out"); } try { synchronized (counter) { ++counter.currentCount; if (counter.currentCount > counter.maxCount) { counter.maxCount = counter.currentCount; } counter.notifyAll(); } latch.await(); } finally { synchronized (counter) { --counter.currentCount; } semaphore.returnLease(lease); } return null; } }); } int remaining = THREADS; while (remaining > 0) { int times = Math.min(random.nextInt(5) + 1, remaining); latch.countDown(times); remaining -= times; Thread.sleep(random.nextInt(100) + 1); } for (int i = 0; i < THREADS; ++i) { completionService.take(); } synchronized (counter) { Assert.assertTrue(counter.currentCount == 0); Assert.assertTrue(counter.maxCount > 0); Assert.assertTrue(counter.maxCount <= MAX_LEASES); System.out.println(counter.maxCount); } } finally { client.close(); } }
From source file:com.netflix.curator.framework.recipes.queue.TestBoundedDistributedQueue.java
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") @Test/*from w w w .j a v a2 s .c o m*/ public void testMulti() throws Exception { final String PATH = "/queue"; final int CLIENT_QTY = 4; final int MAX_ITEMS = 10; final int ADD_ITEMS = MAX_ITEMS * 100; final int SLOP_FACTOR = 2; final QueueConsumer<String> consumer = new QueueConsumer<String>() { @Override public void consumeMessage(String message) throws Exception { Thread.sleep(10); } @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { } }; final Timing timing = new Timing(); final ExecutorService executor = Executors.newCachedThreadPool(); ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executor); final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); try { client.start(); client.create().forPath(PATH); final CountDownLatch isWaitingLatch = new CountDownLatch(1); final AtomicBoolean isDone = new AtomicBoolean(false); final List<Integer> counts = new CopyOnWriteArrayList<Integer>(); final Object lock = new Object(); executor.submit(new Callable<Void>() { @Override public Void call() throws Exception { Watcher watcher = new Watcher() { @Override public void process(WatchedEvent event) { synchronized (lock) { lock.notifyAll(); } } }; while (!Thread.currentThread().isInterrupted() && client.isStarted() && !isDone.get()) { synchronized (lock) { int size = client.getChildren().usingWatcher(watcher).forPath(PATH).size(); counts.add(size); isWaitingLatch.countDown(); lock.wait(); } } return null; } }); isWaitingLatch.await(); for (int i = 0; i < CLIENT_QTY; ++i) { final int index = i; completionService.submit(new Callable<Void>() { @Override public Void call() throws Exception { CuratorFramework client = null; DistributedQueue<String> queue = null; try { client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); client.start(); queue = QueueBuilder.builder(client, consumer, serializer, PATH).executor(executor) .maxItems(MAX_ITEMS).putInBackground(false).lockPath("/locks").buildQueue(); queue.start(); for (int i = 0; i < ADD_ITEMS; ++i) { queue.put("" + index + "-" + i); } } finally { IOUtils.closeQuietly(queue); IOUtils.closeQuietly(client); } return null; } }); } for (int i = 0; i < CLIENT_QTY; ++i) { completionService.take().get(); } isDone.set(true); synchronized (lock) { lock.notifyAll(); } for (int count : counts) { Assert.assertTrue(counts.toString(), count <= (MAX_ITEMS * SLOP_FACTOR)); } } finally { executor.shutdownNow(); IOUtils.closeQuietly(client); } }