Example usage for java.util.concurrent CyclicBarrier CyclicBarrier

List of usage examples for java.util.concurrent CyclicBarrier CyclicBarrier

Introduction

In this page you can find the example usage for java.util.concurrent CyclicBarrier CyclicBarrier.

Prototype

public CyclicBarrier(int parties) 

Source Link

Document

Creates a new CyclicBarrier that will trip when the given number of parties (threads) are waiting upon it, and does not perform a predefined action when the barrier is tripped.

Usage

From source file:org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.java

@Test(timeout = 20000)
public void testCheckJobCompleteSuccess() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(conf);//from w w w.j  a  va2 s  .  co m
    dispatcher.start();
    CyclicBarrier syncBarrier = new CyclicBarrier(2);
    OutputCommitter committer = new TestingOutputCommitter(syncBarrier, true);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();

    JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
    completeJobTasks(job);
    assertJobState(job, JobStateInternal.COMMITTING);

    job.handle(new JobEvent(job.getID(), JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
    assertJobState(job, JobStateInternal.COMMITTING);

    job.handle(new JobEvent(job.getID(), JobEventType.JOB_MAP_TASK_RESCHEDULED));
    assertJobState(job, JobStateInternal.COMMITTING);

    // let the committer complete and verify the job succeeds
    syncBarrier.await();
    assertJobState(job, JobStateInternal.SUCCEEDED);

    job.handle(new JobEvent(job.getID(), JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
    assertJobState(job, JobStateInternal.SUCCEEDED);

    job.handle(new JobEvent(job.getID(), JobEventType.JOB_MAP_TASK_RESCHEDULED));
    assertJobState(job, JobStateInternal.SUCCEEDED);

    dispatcher.stop();
    commitHandler.stop();
}

From source file:org.apache.flume.channel.kafka.TestKafkaChannel.java

private List<Event> pullEvents(final KafkaChannel channel, ExecutorCompletionService<Void> submitterSvc,
        final int total, final boolean testRollbacks, final boolean retryAfterRollback) {
    final List<Event> eventsPulled = Collections.synchronizedList(new ArrayList<Event>(50));
    final CyclicBarrier barrier = new CyclicBarrier(5);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicInteger rolledBackCount = new AtomicInteger(0);
    final AtomicBoolean startedGettingEvents = new AtomicBoolean(false);
    final AtomicBoolean rolledBack = new AtomicBoolean(false);
    for (int k = 0; k < 5; k++) {
        final int index = k;
        submitterSvc.submit(new Callable<Void>() {
            @Override/*from  w w  w .  jav a 2 s. c om*/
            public Void call() throws Exception {
                Transaction tx = null;
                final List<Event> eventsLocal = Lists.newLinkedList();
                int takenByThisThread = 0;
                channel.registerThread();
                Thread.sleep(1000);
                barrier.await();
                while (counter.get() < (total - rolledBackCount.get())) {
                    if (tx == null) {
                        tx = channel.getTransaction();
                        tx.begin();
                    }
                    try {
                        Event e = channel.take();
                        if (e != null) {
                            startedGettingEvents.set(true);
                            eventsLocal.add(e);
                        } else {
                            if (testRollbacks && index == 4 && (!rolledBack.get())
                                    && startedGettingEvents.get()) {
                                tx.rollback();
                                tx.close();
                                tx = null;
                                rolledBack.set(true);
                                final int eventsLocalSize = eventsLocal.size();
                                eventsLocal.clear();
                                if (!retryAfterRollback) {
                                    rolledBackCount.set(eventsLocalSize);
                                    return null;
                                }
                            } else {
                                tx.commit();
                                tx.close();
                                tx = null;
                                eventsPulled.addAll(eventsLocal);
                                counter.getAndAdd(eventsLocal.size());
                                eventsLocal.clear();
                            }
                        }
                    } catch (Exception ex) {
                        eventsLocal.clear();
                        if (tx != null) {
                            tx.rollback();
                            tx.close();
                        }
                        tx = null;
                        ex.printStackTrace();
                    }
                }
                // Close txn.
                return null;
            }
        });
    }
    return eventsPulled;
}

From source file:org.acmsl.queryj.api.handlers.AbstractTemplateWritingHandler.java

/**
 * Writes the templates.//from  w  ww. j ava  2  s .  c  o m
 * @param templates the templates.
 * @param engineName the engine name.
 * @param parameters the parameters.
 * @param charset the file encoding.
 * @param templateGenerator the template generator.
 * @param threadCount the number of threads to use.
 * @param rootDir the root dir.
 * @return the futures for the concurrent threads.
 * @throws QueryJBuildException if the templates cannot be written.
 */
@NotNull
@SuppressWarnings("unused")
protected List<Future<?>> writeTemplatesMultithread2ndVersion(@Nullable final List<T> templates,
        @NotNull final String engineName, @NotNull final QueryJCommand parameters,
        @NotNull final Charset charset, @NotNull final TG templateGenerator, final int threadCount,
        @NotNull final File rootDir) throws QueryJBuildException {
    @NotNull
    final List<Future<?>> result;

    if (templates != null) {
        result = new ArrayList<>(templates.size());

        @NotNull
        final ExecutorService threadPool = Executors.newFixedThreadPool(threadCount);

        @NotNull
        final CyclicBarrier round = new CyclicBarrier(threadCount);

        @NotNull
        AtomicInteger index = new AtomicInteger(0);

        int intIndex;

        @Nullable
        final Log t_Log = UniqueLogFactory.getLog(AbstractTemplateWritingHandler.class);

        for (@Nullable
        final T t_Template : templates) {
            if (t_Template != null) {
                intIndex = index.incrementAndGet();

                if (intIndex <= threadCount) {
                    if (t_Log != null) {
                        t_Log.info("Starting a new thread " + intIndex + "/" + threadCount);
                    }

                    result.add(threadPool.submit((Runnable) buildGeneratorThread(t_Template, templateGenerator,
                            retrieveOutputDir(t_Template.getTemplateContext(), rootDir, parameters), rootDir,
                            charset, intIndex, round, parameters)));
                } else {
                    if (t_Log != null) {
                        t_Log.info("No threads available " + intIndex + "/" + threadCount);
                    }

                    index = new AtomicInteger(0);

                    try {
                        round.await();
                    } catch (@NotNull final InterruptedException interrupted) {
                        if (t_Log != null) {
                            t_Log.info("Thread pool interrupted while waiting", interrupted);
                        }
                    } catch (@NotNull final BrokenBarrierException brokenBarrier) {
                        if (t_Log != null) {
                            t_Log.info(BROKEN_BARRIER_LITERAL, brokenBarrier);
                        }
                    }

                    if (t_Log != null) {
                        t_Log.info("Resetting thread pool (shutdown? " + threadPool.isShutdown() + ")");
                    }

                    round.reset();
                }
            }
        }
    } else {
        result = new ArrayList<>(0);
    }

    return result;
}

From source file:com.tc.objectserver.impl.ObjectRequestManagerTest.java

public void testMultipleRequestResponseObjects() {
    final TestObjectManager objectManager = new TestObjectManager(persistor.getManagedObjectPersistor());
    final TestDSOChannelManager channelManager = new TestDSOChannelManager();
    final TestClientStateManager clientStateManager = new TestClientStateManager();
    final TestSink requestSink = new TestSink();
    final TestSink respondSink = new TestSink();
    final ObjectRequestManagerImpl objectRequestManager = new ObjectRequestManagerImpl(objectManager,
            channelManager, clientStateManager, requestSink, respondSink, new ObjectStatsRecorder());

    final int objectsToBeRequested = 100;
    int numberOfRequestsMade = objectsToBeRequested / ObjectRequestManagerImpl.SPLIT_SIZE;
    if (objectsToBeRequested % ObjectRequestManagerImpl.SPLIT_SIZE > 0) {
        numberOfRequestsMade++;//from   ww  w . j  a v a  2 s.  c o m
    }
    final ObjectIDSet ids = createObjectIDSet(objectsToBeRequested);

    final List<ObjectRequestThread> objectRequestThreadList = new ArrayList<ObjectRequestThread>();
    final int numberOfRequestThreads = 10;
    final CyclicBarrier requestBarrier = new CyclicBarrier(numberOfRequestThreads);

    for (int i = 0; i < numberOfRequestThreads; i++) {
        final ClientID clientID = new ClientID(i);
        final ObjectRequestThread objectRequestThread = new ObjectRequestThread(requestBarrier,
                objectRequestManager, clientID, new ObjectRequestID(i), ids, LOOKUP_STATE.CLIENT);
        objectRequestThreadList.add(objectRequestThread);
    }

    // let's now start until all the request threads
    for (final ObjectRequestThread thread : objectRequestThreadList) {
        thread.start();
    }

    // now wait for all the threads
    for (final ObjectRequestThread thread : objectRequestThreadList) {
        try {
            thread.join();
        } catch (final InterruptedException e) {
            throw new AssertionError(e);
        }
    }

    System.out.println("done doing requests.");
    assertEquals(respondSink.size(), numberOfRequestsMade);
    assertEquals(objectRequestManager.getTotalRequestedObjects(), objectsToBeRequested);
    assertEquals(objectRequestManager.getObjectRequestCacheClientSize(), numberOfRequestThreads);

    final List<ObjectResponseThread> objectResponseThreadList = new ArrayList<ObjectResponseThread>();
    final int numberOfResponseThreads = 1;
    final CyclicBarrier responseBarrier = new CyclicBarrier(numberOfResponseThreads);

    for (int i = 0; i < numberOfResponseThreads; i++) {
        final ObjectResponseThread objectResponseThread = new ObjectResponseThread(responseBarrier,
                objectRequestManager, respondSink);
        objectResponseThreadList.add(objectResponseThread);
    }

    // let's now start until all the response threads
    for (final ObjectResponseThread thread : objectResponseThreadList) {
        thread.start();
    }

    // now wait for all the threads
    for (final ObjectResponseThread thread : objectResponseThreadList) {
        try {
            thread.join();
        } catch (final InterruptedException e) {
            throw new AssertionError(e);
        }
    }

    final Set sendSet = TestRequestManagedObjectResponseMessage.sendSet;
    assertEquals(10, sendSet.size());

    int i = 0;
    for (final Iterator iter = sendSet.iterator(); iter.hasNext(); i++) {
        final TestRequestManagedObjectResponseMessage message = (TestRequestManagedObjectResponseMessage) iter
                .next();
        System.out.println("ChannelID: " + message.getChannelID().toLong());
        assertEquals(message.getChannelID().toLong(), i);

    }

    assertEquals(objectRequestManager.getTotalRequestedObjects(), 0);
    assertEquals(objectRequestManager.getObjectRequestCacheClientSize(), 0);

}

From source file:edu.iu.subgraph.colorcount_HJ.java

/**
 * @brief initialize local graph //  w  w w.  j  a v  a 2  s.  c om
 *
 * @param local_graph 
 * @param global_max_v_id
 * @param thread_num
 * @param core_num
 * @param affinity
 * @param calc_auto
 * @param do_gdd
 * @param do_vert
 * @param verb
 */
void init(SCCollectiveMapper mapper, Context context, Graph local_graph, int global_max_v_id, int thread_num,
        int core_num, int tpc, String affinity, boolean do_gdd, boolean do_vert, boolean verb) {
    // assign params
    this.mapper = mapper;
    this.context = context;
    this.g = local_graph;
    this.max_abs_id = global_max_v_id;
    this.thread_num = thread_num;
    this.core_num = core_num;
    this.tpc = tpc;
    this.affinity = affinity;
    this.do_graphlet_freq = do_gdd;
    this.do_vert_output = do_vert;
    this.verbose = verb;

    // init members 
    this.labels_g = this.g.labels;
    this.labeled = this.g.labeled;
    this.num_verts_graph = this.g.num_vertices();
    this.colors_g = new int[this.num_verts_graph];

    this.cc_ato = new double[this.thread_num];
    this.count_local_root = new double[this.thread_num];
    this.count_comm_root = new double[this.thread_num];

    this.dt = new dynamic_table_array();
    this.barrier = new CyclicBarrier(this.thread_num);

    if (do_graphlet_freq || do_vert_output) {
        //ToDo for graphlet freq and vert output
    }

}

From source file:com.bt.aloha.batchtest.BatchTest.java

public void run(String scenarioName) {
    this.numberCompleted = 0;
    this.numberSucceeded = 0;
    this.results = new ConcurrentHashMap<String, Result>();
    normalizeWeightings();/* w ww.  j  a  v  a2s .  c  o m*/
    totalTime = 0;
    CyclicBarrier barrier = new CyclicBarrier(numberOfConcurrentStarts);
    for (int i = 0; i < numberOfRuns; i++) {
        try {
            if (manager != null)
                manager.doApplicationContextStartStop();
            long startTime = System.currentTimeMillis();
            Thread.sleep(1000);
            String[] beans = new String[numberOfConcurrentStarts];
            BatchTestScenario[] concurrentScenarios = new BatchTestScenario[numberOfConcurrentStarts];
            for (int j = 0; j < numberOfConcurrentStarts; j++) {
                beans[j] = scenarioName == null ? pickScenarioName() : scenarioName;

                BatchTestScenario s = (BatchTestScenario) applicationContext.getBean(beans[j]);
                ((BatchTestScenarioBase) s).setCallCollection(callCollection);
                ((BatchTestScenarioBase) s).setAudioFileUri(audioFileUri);

                if (s.getBatchTestScenarioResultListener() == null)
                    s.setBatchTestScenarioResultListener(this);
                concurrentScenarios[j] = s;
            }

            CountDownLatch latch = new CountDownLatch(numberOfConcurrentStarts);
            for (int j = 0; j < numberOfConcurrentStarts; j++) {
                if (concurrentScenarios[j] == null)
                    break;
                RunScenario rs = new RunScenario(concurrentScenarios[j], beans[j], barrier, latch);
                executorService.execute(rs);
            }
            latch.await();
            barrier.reset();
            totalTime += System.currentTimeMillis() - startTime;
        } catch (Throwable t) {
            log.error(String.format("Test run %d threw an exception", i), t);
        }
    }

    waitForAllToFinish(maximumScenarioCompletionWaitTimeSeconds);
    log.info("Finishing...");
    if (results.size() < 1) {
        log.info("NO scenarios run!");
        return;
    }
    numberSucceeded = 0;
    for (String o : results.keySet()) {
        String scenario = o.split(":")[0];
        Result res = results.get(o);
        Analysis result = analysis.get(scenario);
        if (result != null) {
            if (res.isSucceeded()) {
                result.setSuccessCount(result.getSuccessCount() + 1);
            } else {
                result.setFailureCount(result.getFailureCount() + 1);
            }
            log.info(String.format("Scenario %s %s", o, res.toString()));
            System.err.println(String.format("Scenario %s %s", o, res.toString()));
            numberCompleted++;
            if (res.isSucceeded())
                numberSucceeded++;
            else
                failedScenarios.add(o);
        } else {
            log.error("unable to find result for scenario " + scenario);
        }
    }
    log.info(numberSucceeded + " successful scenarios, " + successRate() + "% passed");
    log.info(numberCompleted());
    if (manager != null) {
        log.info("Access to ApplicationContext1 #: " + manager.getCountReturnedAppCtx1());
        log.info("Access to ApplicationContext2 #: " + manager.getCountReturnedAppCtx2());
    }
    resetDb();
}

From source file:org.springframework.integration.zookeeper.metadata.ZookeeperMetadataStoreTests.java

@Test
public void testListenerInvokedOnRemoteChanges() throws Exception {
    String testKey = "ZookeeperMetadataStoreTests";

    CuratorFramework otherClient = createNewClient();
    ZookeeperMetadataStore otherMetadataStore = new ZookeeperMetadataStore(otherClient);

    // register listeners
    final List<List<String>> notifiedChanges = new ArrayList<List<String>>();
    final Map<String, CyclicBarrier> barriers = new HashMap<String, CyclicBarrier>();
    barriers.put("add", new CyclicBarrier(2));
    barriers.put("remove", new CyclicBarrier(2));
    barriers.put("update", new CyclicBarrier(2));
    metadataStore.addListener(new MetadataStoreListenerAdapter() {
        @Override/*from w ww .  j a  va  2s  .c  o  m*/
        public void onAdd(String key, String value) {
            notifiedChanges.add(Arrays.asList("add", key, value));
            waitAtBarrier("add", barriers);
        }

        @Override
        public void onRemove(String key, String oldValue) {
            notifiedChanges.add(Arrays.asList("remove", key, oldValue));
            waitAtBarrier("remove", barriers);
        }

        @Override
        public void onUpdate(String key, String newValue) {
            notifiedChanges.add(Arrays.asList("update", key, newValue));
            waitAtBarrier("update", barriers);
        }
    });

    // the tests themselves
    barriers.get("add").reset();
    otherMetadataStore.put(testKey, "Integration");
    waitAtBarrier("add", barriers);
    assertThat(notifiedChanges, hasSize(1));
    assertThat(notifiedChanges.get(0), IsIterableContainingInOrder.contains("add", testKey, "Integration"));

    otherMetadataStore.putIfAbsent(testKey, "Integration++");
    // there is no update and therefore we expect no changes
    assertThat(notifiedChanges, hasSize(1));

    barriers.get("update").reset();
    otherMetadataStore.put(testKey, "Integration-2");
    waitAtBarrier("update", barriers);
    assertThat(notifiedChanges, hasSize(2));
    assertThat(notifiedChanges.get(1),
            IsIterableContainingInOrder.contains("update", testKey, "Integration-2"));

    barriers.get("update").reset();
    otherMetadataStore.replace(testKey, "Integration-2", "Integration-3");
    waitAtBarrier("update", barriers);
    assertThat(notifiedChanges, hasSize(3));
    assertThat(notifiedChanges.get(2),
            IsIterableContainingInOrder.contains("update", testKey, "Integration-3"));

    otherMetadataStore.replace(testKey, "Integration-2", "Integration-none");
    assertThat(notifiedChanges, hasSize(3));

    barriers.get("remove").reset();
    otherMetadataStore.remove(testKey);
    waitAtBarrier("remove", barriers);
    assertThat(notifiedChanges, hasSize(4));
    assertThat(notifiedChanges.get(3),
            IsIterableContainingInOrder.contains("remove", testKey, "Integration-3"));

    // sleep and try to see if there were any other updates - if there any pending updates, we should catch them by now
    Thread.sleep(1000);
    assertThat(notifiedChanges, hasSize(4));
}

From source file:org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.java

@Test(timeout = 20000)
public void testRebootedDuringCommit() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.setInt(MRJobConfig.MR_AM_MAX_ATTEMPTS, 2);
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(conf);//from w w  w . ja  va2 s  . c  o m
    dispatcher.start();
    CyclicBarrier syncBarrier = new CyclicBarrier(2);
    OutputCommitter committer = new WaitingOutputCommitter(syncBarrier, true);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();

    AppContext mockContext = mock(AppContext.class);
    when(mockContext.isLastAMRetry()).thenReturn(true);
    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
    JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, mockContext);
    completeJobTasks(job);
    assertJobState(job, JobStateInternal.COMMITTING);

    syncBarrier.await();
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_AM_REBOOT));
    assertJobState(job, JobStateInternal.REBOOT);
    // return the external state as ERROR since this is last retry.
    Assert.assertEquals(JobState.RUNNING, job.getState());
    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
    Assert.assertEquals(JobState.ERROR, job.getState());

    dispatcher.stop();
    commitHandler.stop();
}

From source file:org.apache.bookkeeper.metadata.etcd.EtcdRegistrationTest.java

private void testConcurrentRegistration(boolean readonly) throws Exception {
    final String bookieId;
    if (readonly) {
        bookieId = runtime.getMethodName() + "-readonly:3181";
    } else {/*from w  w  w .  j a v a  2  s  .co  m*/
        bookieId = runtime.getMethodName() + ":3181";
    }
    final int numBookies = 10;
    @Cleanup("shutdown")
    ExecutorService executor = Executors.newFixedThreadPool(numBookies);
    final CyclicBarrier startBarrier = new CyclicBarrier(numBookies);
    final CyclicBarrier completeBarrier = new CyclicBarrier(numBookies);
    final CompletableFuture<Void> doneFuture = new CompletableFuture<>();
    final AtomicInteger numSuccesses = new AtomicInteger(0);
    final AtomicInteger numFailures = new AtomicInteger(0);
    for (int i = 0; i < numBookies; i++) {
        executor.submit(() -> {
            try (EtcdRegistrationManager regMgr = new EtcdRegistrationManager(newEtcdClient(), scope, 1)) {
                try {
                    startBarrier.await();
                    regMgr.registerBookie(bookieId, readonly);
                    numSuccesses.incrementAndGet();
                } catch (InterruptedException e) {
                    log.warn("Interrupted at waiting for the other threads to start", e);
                } catch (BrokenBarrierException e) {
                    log.warn("Start barrier is broken", e);
                } catch (BookieException e) {
                    numFailures.incrementAndGet();
                }
                try {
                    completeBarrier.await();
                } catch (InterruptedException e) {
                    log.warn("Interrupted at waiting for the other threads to complete", e);
                } catch (BrokenBarrierException e) {
                    log.warn("Complete barrier is broken", e);
                }
                FutureUtils.complete(doneFuture, null);
            }
        });
    }
    doneFuture.join();
    assertEquals(1, numSuccesses.get());
    assertEquals(numBookies - 1, numFailures.get());
}

From source file:com.adaptris.core.PoolingWorkflow.java

private void populatePool() throws CoreException {
    int size = minIdle();
    ExecutorService populator = Executors.newCachedThreadPool();
    try {//w  ww. j av  a  2s .  c  o m
        final CyclicBarrier barrier = new CyclicBarrier(size + 1);
        log.trace("Need more ({}) children as soon as possible to handle work. Get to it", size);
        final List<Worker> workers = new ArrayList<>(size);
        for (int i = 0; i < size; i++) {
            populator.execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        Worker w = objectPool.borrowObject();
                        workers.add(w);
                        barrier.await(initWaitTimeMs(), TimeUnit.MILLISECONDS);
                    } catch (Exception e) {
                        barrier.reset();
                    }
                }
            });
        }
        barrier.await(initWaitTimeMs(), TimeUnit.MILLISECONDS);
        for (Worker worker : workers) {
            objectPool.returnObject(worker);
        }
    } catch (Exception e) {
        throw new CoreException(e);
    } finally {
        populator.shutdownNow();
    }
}