Example usage for java.util.concurrent CyclicBarrier CyclicBarrier

List of usage examples for java.util.concurrent CyclicBarrier CyclicBarrier

Introduction

In this page you can find the example usage for java.util.concurrent CyclicBarrier CyclicBarrier.

Prototype

public CyclicBarrier(int parties) 

Source Link

Document

Creates a new CyclicBarrier that will trip when the given number of parties (threads) are waiting upon it, and does not perform a predefined action when the barrier is tripped.

Usage

From source file:org.apache.hadoop.mapreduce.v2.app.launcher.TestContainerLauncherImpl.java

@SuppressWarnings({ "rawtypes", "unchecked" })
@Test(timeout = 5000)/*from ww w . jav a2 s . co  m*/
public void testContainerCleaned() throws Exception {
    LOG.info("STARTING testContainerCleaned");

    CyclicBarrier startLaunchBarrier = new CyclicBarrier(2);
    CyclicBarrier completeLaunchBarrier = new CyclicBarrier(2);

    AppContext mockContext = mock(AppContext.class);

    EventHandler mockEventHandler = mock(EventHandler.class);
    when(mockContext.getEventHandler()).thenReturn(mockEventHandler);

    ContainerManagementProtocolClient mockCM = new ContainerManagerForTest(startLaunchBarrier,
            completeLaunchBarrier);
    ContainerLauncherImplUnderTest ut = new ContainerLauncherImplUnderTest(mockContext, mockCM);

    Configuration conf = new Configuration();
    ut.init(conf);
    ut.start();
    try {
        ContainerId contId = makeContainerId(0l, 0, 0, 1);
        TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
        String cmAddress = "127.0.0.1:8000";
        StartContainersResponse startResp = recordFactory.newRecordInstance(StartContainersResponse.class);
        startResp.setAllServicesMetaData(serviceResponse);

        LOG.info("inserting launch event");
        ContainerRemoteLaunchEvent mockLaunchEvent = mock(ContainerRemoteLaunchEvent.class);
        when(mockLaunchEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
        when(mockLaunchEvent.getContainerID()).thenReturn(contId);
        when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        when(mockLaunchEvent.getContainerToken()).thenReturn(createNewContainerToken(contId, cmAddress));
        ut.handle(mockLaunchEvent);

        startLaunchBarrier.await();

        LOG.info("inserting cleanup event");
        ContainerLauncherEvent mockCleanupEvent = mock(ContainerLauncherEvent.class);
        when(mockCleanupEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
        when(mockCleanupEvent.getContainerID()).thenReturn(contId);
        when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        ut.handle(mockCleanupEvent);

        completeLaunchBarrier.await();

        ut.waitForPoolToIdle();

        ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
        verify(mockEventHandler, atLeast(2)).handle(arg.capture());
        boolean containerCleaned = false;

        for (int i = 0; i < arg.getAllValues().size(); i++) {
            LOG.info(arg.getAllValues().get(i).toString());
            Event currentEvent = arg.getAllValues().get(i);
            if (currentEvent.getType() == TaskAttemptEventType.TA_CONTAINER_CLEANED) {
                containerCleaned = true;
            }
        }
        assert (containerCleaned);

    } finally {
        ut.stop();
    }
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java

@Test
public void shouldEvalInMultipleThreads() throws Exception {
    final GremlinExecutor gremlinExecutor = GremlinExecutor.build().create();

    final CyclicBarrier barrier = new CyclicBarrier(2);
    final AtomicInteger i1 = new AtomicInteger(0);
    final AtomicBoolean b1 = new AtomicBoolean(false);
    final Thread t1 = new Thread(() -> {
        try {/*  ww  w.j a  v a 2  s  . c  om*/
            barrier.await();
            i1.set((Integer) gremlinExecutor.eval("1+1").get());
        } catch (Exception ex) {
            b1.set(true);
        }
    });

    final AtomicInteger i2 = new AtomicInteger(0);
    final AtomicBoolean b2 = new AtomicBoolean(false);
    final Thread t2 = new Thread(() -> {
        try {
            barrier.await();
            i2.set((Integer) gremlinExecutor.eval("1+1").get());
        } catch (Exception ex) {
            b2.set(true);
        }
    });

    t1.start();
    t2.start();

    t1.join();
    t2.join();

    assertEquals(2, i1.get());
    assertEquals(2, i2.get());
    assertFalse(b1.get());
    assertFalse(b2.get());

    gremlinExecutor.close();
}

From source file:at.alladin.rmbt.client.RMBTClient.java

public TestResult runTest() throws InterruptedException {
    System.out.println("starting test...");

    long txBytes = 0;
    long rxBytes = 0;
    final long timeStampStart = System.nanoTime();

    if (testStatus.get() != TestStatus.ERROR && testThreadPool != null) {

        if (trafficService != null) {
            txBytes = trafficService.getTotalTxBytes();
            rxBytes = trafficService.getTotalRxBytes();
        }/*from   www .  j  a  v a2  s  . c  om*/

        resetSpeed();
        downBitPerSec.set(-1);
        upBitPerSec.set(-1);
        pingNano.set(-1);
        initNano.set(-1);

        final long waitTime = params.getStartTime() - System.currentTimeMillis();
        if (waitTime > 0) {
            setStatus(TestStatus.WAIT);
            log(String.format(Locale.US, "we have to wait %d ms...", waitTime));
            Thread.sleep(waitTime);
            log(String.format(Locale.US, "...done.", waitTime));
        } else
            log(String.format(Locale.US, "luckily we do not have to wait.", waitTime));

        setStatus(TestStatus.INIT);
        statusBeforeError.set(null);

        if (testThreadPool.isShutdown())
            throw new IllegalStateException("RMBTClient already shut down");
        log("starting test...");

        final int numThreads = params.getNumThreads();

        aborted.set(false);

        result = new TotalTestResult();

        if (params.isEncryption())
            sslSocketFactory = createSSLSocketFactory();

        log(String.format(Locale.US, "Host: %s; Port: %s; Enc: %s", params.getHost(), params.getPort(),
                params.isEncryption()));
        log(String.format(Locale.US, "starting %d threads...", numThreads));

        final CyclicBarrier barrier = new CyclicBarrier(numThreads);

        @SuppressWarnings("unchecked")
        final Future<ThreadTestResult>[] results = new Future[numThreads];

        final int storeResults = (int) (params.getDuration() * 1000000000L / MIN_DIFF_TIME);

        final AtomicBoolean fallbackToOneThread = new AtomicBoolean();

        for (int i = 0; i < numThreads; i++) {
            testTasks[i] = new RMBTTest(this, params, i, barrier, storeResults, MIN_DIFF_TIME,
                    fallbackToOneThread);
            results[i] = testThreadPool.submit(testTasks[i]);
        }

        try {

            long shortestPing = Long.MAX_VALUE;

            // wait for all threads first
            for (int i = 0; i < numThreads; i++)
                results[i].get();

            if (aborted.get())
                return null;

            final long[][] allDownBytes = new long[numThreads][];
            final long[][] allDownNsecs = new long[numThreads][];
            final long[][] allUpBytes = new long[numThreads][];
            final long[][] allUpNsecs = new long[numThreads][];

            int realNumThreads = 0;
            log("");
            for (int i = 0; i < numThreads; i++) {
                final ThreadTestResult testResult = results[i].get();

                if (testResult != null) {
                    realNumThreads++;

                    log(String.format(Locale.US, "Thread %d: Download: bytes: %d time: %.3f s", i,
                            ThreadTestResult.getLastEntry(testResult.down.bytes),
                            ThreadTestResult.getLastEntry(testResult.down.nsec) / 1e9));
                    log(String.format(Locale.US, "Thread %d: Upload:   bytes: %d time: %.3f s", i,
                            ThreadTestResult.getLastEntry(testResult.up.bytes),
                            ThreadTestResult.getLastEntry(testResult.up.nsec) / 1e9));

                    final long ping = testResult.ping_shortest;
                    if (ping < shortestPing)
                        shortestPing = ping;

                    if (!testResult.pings.isEmpty())
                        result.pings.addAll(testResult.pings);

                    allDownBytes[i] = testResult.down.bytes;
                    allDownNsecs[i] = testResult.down.nsec;
                    allUpBytes[i] = testResult.up.bytes;
                    allUpNsecs[i] = testResult.up.nsec;

                    result.totalDownBytes += testResult.totalDownBytes;
                    result.totalUpBytes += testResult.totalUpBytes;

                    // aggregate speedItems
                    result.speedItems.addAll(testResult.speedItems);
                }
            }

            result.calculateDownload(allDownBytes, allDownNsecs);
            result.calculateUpload(allUpBytes, allUpNsecs);

            log("");
            log(String.format(Locale.US, "Total calculated bytes down: %d", result.bytes_download));
            log(String.format(Locale.US, "Total calculated time down:  %.3f s", result.nsec_download / 1e9));
            log(String.format(Locale.US, "Total calculated bytes up:   %d", result.bytes_upload));
            log(String.format(Locale.US, "Total calculated time up:    %.3f s", result.nsec_upload / 1e9));

            // get Connection Info from thread 1 (one thread must run)
            result.ip_local = results[0].get().ip_local;
            result.ip_server = results[0].get().ip_server;
            result.port_remote = results[0].get().port_remote;
            result.encryption = results[0].get().encryption;

            result.num_threads = realNumThreads;

            result.ping_shortest = shortestPing;

            result.speed_download = result.getDownloadSpeedBitPerSec() / 1e3;
            result.speed_upload = result.getUploadSpeedBitPerSec() / 1e3;

            log("");
            log(String.format(Locale.US, "Total Down: %.0f kBit/s", result.getDownloadSpeedBitPerSec() / 1e3));
            log(String.format(Locale.US, "Total UP:   %.0f kBit/s", result.getUploadSpeedBitPerSec() / 1e3));
            log(String.format(Locale.US, "Ping:       %.2f ms", shortestPing / 1e6));

            if (controlConnection != null) {
                log("");
                final String testId = controlConnection.getTestId();
                final String testUUID = params.getUUID();
                final long testTime = controlConnection.getTestTime();
                log(String.format(Locale.US, "uid=%s, time=%s, uuid=%s\n", testId,
                        new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS", Locale.US).format(new Date(testTime)),
                        testUUID));
            }

            downBitPerSec.set(Math.round(result.getDownloadSpeedBitPerSec()));
            upBitPerSec.set(Math.round(result.getUploadSpeedBitPerSec()));

            log("end.");
            setStatus(TestStatus.SPEEDTEST_END);

            if (trafficService != null) {
                txBytes = trafficService.getTotalTxBytes() - txBytes;
                rxBytes = trafficService.getTotalRxBytes() - rxBytes;
                result.setTotalTrafficMeasurement(
                        new TestMeasurement(rxBytes, txBytes, timeStampStart, System.nanoTime()));
                result.setMeasurementMap(measurementMap);
            }

            return result;
        } catch (final ExecutionException e) {
            log(e);
            abortTest(true);
            return null;
        } catch (final InterruptedException e) {
            log("RMBTClient interrupted!");
            abortTest(false);
            throw e;
        }
    } else {
        setStatus(TestStatus.SPEEDTEST_END);

        return null;
    }
}

From source file:com.tc.objectserver.impl.ObjectRequestManagerTest.java

public void testMissingObjects() {

    final TestObjectManager objectManager = new TestObjectManager(persistor.getManagedObjectPersistor()) {

        @Override// w  w  w .j a  v a2 s.  c o  m
        public boolean lookupObjectsAndSubObjectsFor(final NodeID nodeID,
                final ObjectManagerResultsContext responseContext, final int maxCount) {

            final Set ids = responseContext.getLookupIDs();
            final Map<ObjectID, ManagedObject> resultsMap = new HashMap<ObjectID, ManagedObject>();
            final ObjectIDSet missing = new BitSetObjectIDSet(ids);

            final ObjectManagerLookupResults results = new ObjectManagerLookupResultsImpl(resultsMap,
                    TCCollections.EMPTY_OBJECT_ID_SET, missing);
            responseContext.setResults(results);

            return false;
        }
    };
    final TestDSOChannelManager channelManager = new TestDSOChannelManager();
    final TestClientStateManager clientStateManager = new TestClientStateManager();
    final TestSink requestSink = new TestSink();
    final TestSink respondSink = new TestSink();
    final ObjectRequestManagerImpl objectRequestManager = new ObjectRequestManagerImpl(objectManager,
            channelManager, clientStateManager, requestSink, respondSink, new ObjectStatsRecorder());

    final int objectsToBeRequested = 100;
    int numberOfRequestsMade = objectsToBeRequested / ObjectRequestManagerImpl.SPLIT_SIZE;
    if (objectsToBeRequested % ObjectRequestManagerImpl.SPLIT_SIZE > 0) {
        numberOfRequestsMade++;
    }
    final ObjectIDSet ids = createObjectIDSet(objectsToBeRequested);

    final List<ObjectRequestThread> objectRequestThreadList = new ArrayList<ObjectRequestThread>();
    final int numberOfRequestThreads = 10;
    final CyclicBarrier requestBarrier = new CyclicBarrier(numberOfRequestThreads);

    for (int i = 0; i < numberOfRequestThreads; i++) {
        final ClientID clientID = new ClientID(i);
        final ObjectRequestThread objectRequestThread = new ObjectRequestThread(requestBarrier,
                objectRequestManager, clientID, new ObjectRequestID(i), ids, LOOKUP_STATE.CLIENT);
        objectRequestThreadList.add(objectRequestThread);
    }

    // let's now start until all the request threads
    for (final ObjectRequestThread thread : objectRequestThreadList) {
        thread.start();
    }

    // now wait for all the threads
    for (final ObjectRequestThread thread : objectRequestThreadList) {
        try {
            thread.join();
        } catch (final InterruptedException e) {
            throw new AssertionError(e);
        }
    }

    System.out.println("done doing requests.");
    assertEquals(respondSink.size(), numberOfRequestsMade);
    assertEquals(objectRequestManager.getTotalRequestedObjects(), objectsToBeRequested);
    assertEquals(objectRequestManager.getObjectRequestCacheClientSize(), numberOfRequestThreads);

    final List<ObjectResponseThread> objectResponseThreadList = new ArrayList<ObjectResponseThread>();
    final int numberOfResponseThreads = 1;
    final CyclicBarrier responseBarrier = new CyclicBarrier(numberOfResponseThreads);

    for (int i = 0; i < numberOfResponseThreads; i++) {
        final ObjectResponseThread objectResponseThread = new ObjectResponseThread(responseBarrier,
                objectRequestManager, respondSink);
        objectResponseThreadList.add(objectResponseThread);
    }

    // let's now start until all the response threads
    for (final ObjectResponseThread thread : objectResponseThreadList) {
        thread.start();
    }

    // now wait for all the threads
    for (final ObjectResponseThread thread : objectResponseThreadList) {
        try {
            thread.join();
        } catch (final InterruptedException e) {
            throw new AssertionError(e);
        }
    }

    final Set sendSet = TestObjectsNotFoundMessage.sendSet;
    assertEquals(10, sendSet.size());

    int i = 0;
    for (final Iterator iter = sendSet.iterator(); iter.hasNext(); i++) {
        final TestObjectsNotFoundMessage message = (TestObjectsNotFoundMessage) iter.next();
        System.out.println("ChannelID: " + message.getChannelID().toLong());
        assertEquals(message.getChannelID().toLong(), i);

    }

    assertEquals(objectRequestManager.getTotalRequestedObjects(), 0);
    assertEquals(objectRequestManager.getObjectRequestCacheClientSize(), 0);

}

From source file:org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.java

@Test(timeout = 20000)
public void testKilledDuringCommit() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(conf);//from   ww  w. j  a va2  s.com
    dispatcher.start();
    CyclicBarrier syncBarrier = new CyclicBarrier(2);
    OutputCommitter committer = new WaitingOutputCommitter(syncBarrier, true);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();

    JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
    completeJobTasks(job);
    assertJobState(job, JobStateInternal.COMMITTING);

    syncBarrier.await();
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_KILL));
    assertJobState(job, JobStateInternal.KILLED);
    dispatcher.stop();
    commitHandler.stop();
}

From source file:org.apache.hadoop.util.TestStringUtils.java

@Test
//Multithreaded Test GetFormattedTimeWithDiff()
public void testGetFormattedTimeWithDiff() throws InterruptedException {
    ExecutorService executorService = Executors.newFixedThreadPool(16);
    final CyclicBarrier cyclicBarrier = new CyclicBarrier(10);
    for (int i = 0; i < 10; i++) {

        executorService.execute(new Runnable() {
            @Override/*from w ww  .  j a va  2 s. c om*/
            public void run() {
                try {
                    cyclicBarrier.await();
                } catch (InterruptedException | BrokenBarrierException e) {
                    //Ignored
                }
                final long end = System.currentTimeMillis();
                final long start = end - 30000;
                String formattedTime1 = StringUtils.getFormattedTimeWithDiff(FAST_DATE_FORMAT, start, end);
                String formattedTime2 = StringUtils.getFormattedTimeWithDiff(FAST_DATE_FORMAT, start, end);
                assertTrue("Method returned inconsistent results indicative of" + " a race condition",
                        formattedTime1.equals(formattedTime2));

            }
        });
    }

    executorService.shutdown();
    executorService.awaitTermination(50, TimeUnit.SECONDS);
}

From source file:es.udc.gii.common.eaf.algorithm.parallel.evaluation.DistributedEvaluation.java

protected void master(EvolutionaryAlgorithm algorithm, List<Individual> individuals,
        List<ObjectiveFunction> functions, List<Constraint> constraints) {

    /* Initialize the global state. */
    this.functions = functions;
    this.constraints = constraints;
    this.popSize = individuals.size();

    this.individualsToEvaluate = individuals;
    this.firtstInd = 0;
    this.evaluatedIndividuals = 0;

    if (this.barrier == null) {
        this.barrier = new CyclicBarrier(2);
    }//  w w w  .j  a va2  s . c  o  m

    boolean setChunkSizeToCero = false;
    if (getChunkSize() == 0) {
        setChunkSizeToCero = true;

        int size = individuals.size();
        int tSize = getTopology().getSize();

        if (size < tSize) {
            setChunkSize(1);
        } else {
            setChunkSize(size / tSize);
        }
    }

    notifyObservers(CURRENT_EVALUATION_STARTED, this);

    /* Initialize the communication thread. */
    if (communicationThread == null) {
        communicationThread = new Thread(new CommunicationThread(), "CommThread");
        communicationThread.setPriority(Thread.MAX_PRIORITY);
        Thread.currentThread().setPriority(Thread.MIN_PRIORITY);
        commThreadMustWait = true;
        communicationThread.start();
    } else {
        synchronized (this.communicationThread) {
            commThreadMustWait = false;
            this.communicationThread.notify();
        }
    }

    /* Run the evaluation thread (current thread, no especial thread is created) */
    evaluationThread(algorithm);

    if (setChunkSizeToCero) {
        setChunkSize(0);
    }

    notifyObservers(CURRENT_EVALUATION_ENDED, this);
    setState(CURRENT_EVALUATION_NOT_STARTED);
}

From source file:org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.java

@Test(timeout = 20000)
public void testUnusableNodeTransition() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.setInt(MRJobConfig.NUM_REDUCES, 1);
    DrainDispatcher dispatcher = new DrainDispatcher();
    dispatcher.init(conf);//  ww w . jav a2 s  .co  m
    dispatcher.start();
    CyclicBarrier syncBarrier = new CyclicBarrier(2);
    OutputCommitter committer = new TestingOutputCommitter(syncBarrier, true);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();

    final JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
    // add a special task event handler to put the task back to running in case
    // of task rescheduling/killing
    EventHandler<TaskAttemptEvent> taskAttemptEventHandler = new EventHandler<TaskAttemptEvent>() {
        @Override
        public void handle(TaskAttemptEvent event) {
            if (event.getType() == TaskAttemptEventType.TA_KILL) {
                job.decrementSucceededMapperCount();
            }
        }
    };
    dispatcher.register(TaskAttemptEventType.class, taskAttemptEventHandler);

    // replace the tasks with spied versions to return the right attempts
    Map<TaskId, Task> spiedTasks = new HashMap<TaskId, Task>();
    List<NodeReport> nodeReports = new ArrayList<NodeReport>();
    Map<NodeReport, TaskId> nodeReportsToTaskIds = new HashMap<NodeReport, TaskId>();
    for (Map.Entry<TaskId, Task> e : job.tasks.entrySet()) {
        TaskId taskId = e.getKey();
        Task task = e.getValue();
        if (taskId.getTaskType() == TaskType.MAP) {
            // add an attempt to the task to simulate nodes
            NodeId nodeId = mock(NodeId.class);
            TaskAttempt attempt = mock(TaskAttempt.class);
            when(attempt.getNodeId()).thenReturn(nodeId);
            TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
            when(attempt.getID()).thenReturn(attemptId);
            // create a spied task
            Task spied = spy(task);
            doReturn(attempt).when(spied).getAttempt(any(TaskAttemptId.class));
            spiedTasks.put(taskId, spied);

            // create a NodeReport based on the node id
            NodeReport report = mock(NodeReport.class);
            when(report.getNodeState()).thenReturn(NodeState.UNHEALTHY);
            when(report.getNodeId()).thenReturn(nodeId);
            nodeReports.add(report);
            nodeReportsToTaskIds.put(report, taskId);
        }
    }
    // replace the tasks with the spied tasks
    job.tasks.putAll(spiedTasks);

    // complete all mappers first
    for (TaskId taskId : job.tasks.keySet()) {
        if (taskId.getTaskType() == TaskType.MAP) {
            // generate a task attempt completed event first to populate the
            // nodes-to-succeeded-attempts map
            TaskAttemptCompletionEvent tce = Records.newRecord(TaskAttemptCompletionEvent.class);
            TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
            tce.setAttemptId(attemptId);
            tce.setStatus(TaskAttemptCompletionEventStatus.SUCCEEDED);
            job.handle(new JobTaskAttemptCompletedEvent(tce));
            // complete the task itself
            job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
            Assert.assertEquals(JobState.RUNNING, job.getState());
        }
    }

    // add an event for a node transition
    NodeReport firstMapperNodeReport = nodeReports.get(0);
    NodeReport secondMapperNodeReport = nodeReports.get(1);
    job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(firstMapperNodeReport)));
    dispatcher.await();
    // complete the reducer
    for (TaskId taskId : job.tasks.keySet()) {
        if (taskId.getTaskType() == TaskType.REDUCE) {
            job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
        }
    }
    // add another event for a node transition for the other mapper
    // this should not trigger rescheduling
    job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(secondMapperNodeReport)));
    // complete the first mapper that was rescheduled
    TaskId firstMapper = nodeReportsToTaskIds.get(firstMapperNodeReport);
    job.handle(new JobTaskEvent(firstMapper, TaskState.SUCCEEDED));
    // verify the state is moving to committing
    assertJobState(job, JobStateInternal.COMMITTING);

    // let the committer complete and verify the job succeeds
    syncBarrier.await();
    assertJobState(job, JobStateInternal.SUCCEEDED);

    dispatcher.stop();
    commitHandler.stop();
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestDataNodeHotSwapVolumes.java

/**
 * Test the case that remove a data volume on a particular DataNode when the
 * volume is actively being written.//ww  w.  j  av  a2s.  c  o m
 * @param dataNodeIdx the index of the DataNode to remove a volume.
 */
private void testRemoveVolumeBeingWrittenForDatanode(int dataNodeIdx) throws IOException,
        ReconfigurationException, TimeoutException, InterruptedException, BrokenBarrierException {
    // Starts DFS cluster with 3 DataNodes to form a pipeline.
    startDFSCluster(1, 3);

    final short REPLICATION = 3;
    final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
    final FileSystem fs = cluster.getFileSystem();
    final Path testFile = new Path("/test");
    final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck();

    FSDataOutputStream out = fs.create(testFile, REPLICATION);

    Random rb = new Random(0);
    byte[] writeBuf = new byte[BLOCK_SIZE / 2]; // half of the block.
    rb.nextBytes(writeBuf);
    out.write(writeBuf);
    out.hflush();

    // Make FsDatasetSpi#finalizeBlock a time-consuming operation. So if the
    // BlockReceiver releases volume reference before finalizeBlock(), the blocks
    // on the volume will be removed, and finalizeBlock() throws IOE.
    final FsDatasetSpi<? extends FsVolumeSpi> data = dn.data;
    dn.data = Mockito.spy(data);
    doAnswer(new Answer<Object>() {
        public Object answer(InvocationOnMock invocation) throws IOException, InterruptedException {
            Thread.sleep(1000);
            // Bypass the argument to FsDatasetImpl#finalizeBlock to verify that
            // the block is not removed, since the volume reference should not
            // be released at this point.
            data.finalizeBlock((ExtendedBlock) invocation.getArguments()[0]);
            return null;
        }
    }).when(dn.data).finalizeBlock(any(ExtendedBlock.class));

    final CyclicBarrier barrier = new CyclicBarrier(2);

    List<String> oldDirs = getDataDirs(dn);
    final String newDirs = oldDirs.get(1); // Remove the first volume.
    final List<Exception> exceptions = new ArrayList<>();
    Thread reconfigThread = new Thread() {
        public void run() {
            try {
                barrier.await();
                dn.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
            } catch (ReconfigurationException | InterruptedException | BrokenBarrierException e) {
                exceptions.add(e);
            }
        }
    };
    reconfigThread.start();

    barrier.await();
    rb.nextBytes(writeBuf);
    out.write(writeBuf);
    out.hflush();
    out.close();

    reconfigThread.join();

    // Verify the file has sufficient replications.
    DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
    // Read the content back
    byte[] content = DFSTestUtil.readFileBuffer(fs, testFile);
    assertEquals(BLOCK_SIZE, content.length);

    // If an IOException thrown from BlockReceiver#run, it triggers
    // DataNode#checkDiskError(). So we can test whether checkDiskError() is called,
    // to see whether there is IOException in BlockReceiver#run().
    assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck());

    if (!exceptions.isEmpty()) {
        throw new IOException(exceptions.get(0).getCause());
    }
}

From source file:rpc.TestRPC.java

@Test(timeout = 30000)
public void testRPCInterrupted() throws IOException, InterruptedException {
    final Configuration conf = new Configuration();
    Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl())
            .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(null)
            .build();//w  w  w  .jav  a 2 s.c  o  m

    server.start();

    int numConcurrentRPC = 200;
    InetSocketAddress addr = NetUtils.getConnectAddress(server);
    final CyclicBarrier barrier = new CyclicBarrier(numConcurrentRPC);
    final CountDownLatch latch = new CountDownLatch(numConcurrentRPC);
    final AtomicBoolean leaderRunning = new AtomicBoolean(true);
    final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
    Thread leaderThread = null;

    for (int i = 0; i < numConcurrentRPC; i++) {
        final int num = i;
        final TestProtocol proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
        Thread rpcThread = new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    barrier.await();
                    while (num == 0 || leaderRunning.get()) {
                        proxy.slowPing(false);
                    }

                    proxy.slowPing(false);
                } catch (Exception e) {
                    if (num == 0) {
                        leaderRunning.set(false);
                    } else {
                        error.set(e);
                    }

                    LOG.error(e);
                } finally {
                    latch.countDown();
                }
            }
        });
        rpcThread.start();

        if (leaderThread == null) {
            leaderThread = rpcThread;
        }
    }
    // let threads get past the barrier
    Thread.sleep(1000);
    // stop a single thread
    while (leaderRunning.get()) {
        leaderThread.interrupt();
    }

    latch.await();

    // should not cause any other thread to get an error
    assertTrue("rpc got exception " + error.get(), error.get() == null);
    server.stop();
}