Example usage for java.util.concurrent CyclicBarrier await

List of usage examples for java.util.concurrent CyclicBarrier await

Introduction

In this page you can find the example usage for java.util.concurrent CyclicBarrier await.

Prototype

public int await() throws InterruptedException, BrokenBarrierException 

Source Link

Document

Waits until all #getParties parties have invoked await on this barrier.

Usage

From source file:org.apache.hadoop.mapreduce.v2.app.launcher.TestContainerLauncherImpl.java

@SuppressWarnings({ "rawtypes", "unchecked" })
@Test(timeout = 5000)//w w  w .j ava  2 s . co  m
public void testContainerCleaned() throws Exception {
    LOG.info("STARTING testContainerCleaned");

    CyclicBarrier startLaunchBarrier = new CyclicBarrier(2);
    CyclicBarrier completeLaunchBarrier = new CyclicBarrier(2);

    AppContext mockContext = mock(AppContext.class);

    EventHandler mockEventHandler = mock(EventHandler.class);
    when(mockContext.getEventHandler()).thenReturn(mockEventHandler);

    ContainerManagementProtocolClient mockCM = new ContainerManagerForTest(startLaunchBarrier,
            completeLaunchBarrier);
    ContainerLauncherImplUnderTest ut = new ContainerLauncherImplUnderTest(mockContext, mockCM);

    Configuration conf = new Configuration();
    ut.init(conf);
    ut.start();
    try {
        ContainerId contId = makeContainerId(0l, 0, 0, 1);
        TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
        String cmAddress = "127.0.0.1:8000";
        StartContainersResponse startResp = recordFactory.newRecordInstance(StartContainersResponse.class);
        startResp.setAllServicesMetaData(serviceResponse);

        LOG.info("inserting launch event");
        ContainerRemoteLaunchEvent mockLaunchEvent = mock(ContainerRemoteLaunchEvent.class);
        when(mockLaunchEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
        when(mockLaunchEvent.getContainerID()).thenReturn(contId);
        when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        when(mockLaunchEvent.getContainerToken()).thenReturn(createNewContainerToken(contId, cmAddress));
        ut.handle(mockLaunchEvent);

        startLaunchBarrier.await();

        LOG.info("inserting cleanup event");
        ContainerLauncherEvent mockCleanupEvent = mock(ContainerLauncherEvent.class);
        when(mockCleanupEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
        when(mockCleanupEvent.getContainerID()).thenReturn(contId);
        when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        ut.handle(mockCleanupEvent);

        completeLaunchBarrier.await();

        ut.waitForPoolToIdle();

        ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
        verify(mockEventHandler, atLeast(2)).handle(arg.capture());
        boolean containerCleaned = false;

        for (int i = 0; i < arg.getAllValues().size(); i++) {
            LOG.info(arg.getAllValues().get(i).toString());
            Event currentEvent = arg.getAllValues().get(i);
            if (currentEvent.getType() == TaskAttemptEventType.TA_CONTAINER_CLEANED) {
                containerCleaned = true;
            }
        }
        assert (containerCleaned);

    } finally {
        ut.stop();
    }
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.TestClientRMService.java

@Test(timeout = 4000)
public void testConcurrentAppSubmit()
        throws IOException, InterruptedException, BrokenBarrierException, YarnException, Exception {
    YarnScheduler yarnScheduler = mockYarnScheduler();
    RMContext rmContext = mock(RMContext.class);
    mockRMContext(yarnScheduler, rmContext);
    RMStateStore stateStore = mock(RMStateStore.class);
    when(rmContext.getStateStore()).thenReturn(stateStore);
    RMAppManager appManager = new RMAppManager(rmContext, yarnScheduler, null,
            mock(ApplicationACLsManager.class), new Configuration());

    final ApplicationId appId1 = getApplicationId(100);
    final ApplicationId appId2 = getApplicationId(101);
    final SubmitApplicationRequest submitRequest1 = mockSubmitAppRequest(appId1, null, null);
    final SubmitApplicationRequest submitRequest2 = mockSubmitAppRequest(appId2, null, null);

    final CyclicBarrier startBarrier = new CyclicBarrier(2);
    final CyclicBarrier endBarrier = new CyclicBarrier(2);

    @SuppressWarnings("rawtypes")
    EventHandler eventHandler = new EventHandler() {
        @Override/*from   ww  w .  jav  a 2s .c om*/
        public void handle(Event rawEvent) {
            if (rawEvent instanceof RMAppEvent) {
                RMAppEvent event = (RMAppEvent) rawEvent;
                if (event.getApplicationId().equals(appId1)) {
                    try {
                        startBarrier.await();
                        endBarrier.await();
                    } catch (BrokenBarrierException e) {
                        LOG.warn("Broken Barrier", e);
                    } catch (InterruptedException e) {
                        LOG.warn("Interrupted while awaiting barriers", e);
                    }
                }
            }
        }
    };

    when(rmContext.getDispatcher().getEventHandler()).thenReturn(eventHandler);

    final ClientRMService rmService = new ClientRMService(rmContext, yarnScheduler, appManager, null, null,
            null);
    rmService.serviceInit(new Configuration());

    // submit an app and wait for it to block while in app submission
    Thread t = new Thread() {
        @Override
        public void run() {
            try {
                rmService.submitApplication(submitRequest1);
            } catch (YarnException e) {
            }
        }
    };
    t.start();

    // submit another app, so go through while the first app is blocked
    startBarrier.await();
    rmService.submitApplication(submitRequest2);
    endBarrier.await();
    t.join();
}

From source file:lockstep.LockstepServer.java

/**
 * Implements the handshake protocol server side, setting up the UDP 
 * connection, queues and threads for a specific client.
 * To be run in parallel threads, one for each client, as they need
 * to synchronize to correctly setup the lockstep protocol.
 * It signals success through a latch or failure through interruption to the
 * server thread./*from   w  w w.  java 2 s.  c  om*/
 * 
 * @param tcpSocket Connection with the client, to be used in handshake only
 * @param firstFrameNumber Frame number to initialize the lockstep protocol
 * @param barrier Used for synchronization with concurrent handshake sessions
 * @param latch Used to signal the successful completion of the handshake session.
 * @param server Used to signal failure of the handshake sessions, via interruption.
 */
private void serverHandshakeProtocol(Socket tcpSocket, int firstFrameNumber, CyclicBarrier barrier,
        CountDownLatch latch, LockstepServer server) {
    try (ObjectOutputStream oout = new ObjectOutputStream(tcpSocket.getOutputStream());) {
        oout.flush();
        try (ObjectInputStream oin = new ObjectInputStream(tcpSocket.getInputStream());) {
            //Receive hello message from client and reply
            LOG.info("Waiting an hello from " + tcpSocket.getInetAddress().getHostAddress());
            oout.flush();
            ClientHello hello = (ClientHello) oin.readObject();
            LOG.info("Received an hello from " + tcpSocket.getInetAddress().getHostAddress());
            DatagramSocket udpSocket = new DatagramSocket();
            openSockets.add(udpSocket);
            InetSocketAddress clientUDPAddress = new InetSocketAddress(
                    tcpSocket.getInetAddress().getHostAddress(), hello.clientUDPPort);
            udpSocket.connect(clientUDPAddress);

            int assignedClientID;
            do {
                assignedClientID = (new Random()).nextInt(100000) + 10000;
            } while (!this.clientIDs.add(assignedClientID));

            LOG.info("Assigned hostID " + assignedClientID + " to "
                    + tcpSocket.getInetAddress().getHostAddress() + ", sending helloReply");
            ServerHelloReply helloReply = new ServerHelloReply(udpSocket.getLocalPort(), assignedClientID,
                    clientsNumber, firstFrameNumber);
            oout.writeObject(helloReply);

            ConcurrentHashMap<Integer, TransmissionQueue> clientTransmissionFrameQueues = new ConcurrentHashMap<>();
            this.transmissionFrameQueueTree.put(assignedClientID, clientTransmissionFrameQueues);

            ACKSet clientAckQueue = new ACKSet();
            ackQueues.put(assignedClientID, clientAckQueue);

            clientReceiveSetup(assignedClientID, udpSocket, firstFrameNumber, clientTransmissionFrameQueues);

            barrier.await();

            //Send second reply
            ClientsAnnouncement announcement = new ClientsAnnouncement();
            announcement.clientIDs = ArrayUtils.toPrimitive(this.clientIDs.toArray(new Integer[0]));
            oout.writeObject(announcement);

            clientTransmissionSetup(assignedClientID, firstFrameNumber, udpSocket,
                    clientTransmissionFrameQueues);

            //Wait for other handshakes to reach final step
            barrier.await();
            oout.writeObject(new SimulationStart());

            //Continue with execution
            latch.countDown();
        }
    } catch (IOException | ClassNotFoundException ioEx) {
        LOG.fatal("Exception at handshake with client");
        LOG.fatal(ioEx);
        server.interrupt();
    } catch (InterruptedException | BrokenBarrierException inEx) {
        //Interruptions come from failure in parallel handshake sessions, and signal termination
    }
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerTest.java

@Test
public void testConcurrentOpenCursor() throws Exception {
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("testConcurrentOpenCursor");

    final AtomicReference<ManagedCursor> cursor1 = new AtomicReference<>(null);
    final AtomicReference<ManagedCursor> cursor2 = new AtomicReference<>(null);
    final CyclicBarrier barrier = new CyclicBarrier(2);
    final CountDownLatch latch = new CountDownLatch(2);

    cachedExecutor.execute(() -> {/*www .  jav  a 2s .  c  o  m*/
        try {
            barrier.await();
        } catch (Exception e) {
        }
        ledger.asyncOpenCursor("c1", new OpenCursorCallback() {

            @Override
            public void openCursorFailed(ManagedLedgerException exception, Object ctx) {
                latch.countDown();
            }

            @Override
            public void openCursorComplete(ManagedCursor cursor, Object ctx) {
                cursor1.set(cursor);
                latch.countDown();
            }
        }, null);
    });

    cachedExecutor.execute(() -> {
        try {
            barrier.await();
        } catch (Exception e) {
        }
        ledger.asyncOpenCursor("c1", new OpenCursorCallback() {

            @Override
            public void openCursorFailed(ManagedLedgerException exception, Object ctx) {
                latch.countDown();
            }

            @Override
            public void openCursorComplete(ManagedCursor cursor, Object ctx) {
                cursor2.set(cursor);
                latch.countDown();
            }
        }, null);
    });

    latch.await();
    assertNotNull(cursor1.get());
    assertNotNull(cursor2.get());
    assertEquals(cursor1.get(), cursor2.get());

    ledger.close();
}

From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.TestResourceLocalizationService.java

@Test(timeout = 20000)
@SuppressWarnings("unchecked") // mocked generics
public void testFailedPublicResource() throws Exception {
    List<Path> localDirs = new ArrayList<Path>();
    String[] sDirs = new String[4];
    for (int i = 0; i < 4; ++i) {
        localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
        sDirs[i] = localDirs.get(i).toString();
    }//from   w w w  .  j  av  a  2s  .  co  m
    conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);

    DrainDispatcher dispatcher = new DrainDispatcher();
    EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class);
    dispatcher.register(ApplicationEventType.class, applicationBus);
    EventHandler<ContainerEvent> containerBus = mock(EventHandler.class);
    dispatcher.register(ContainerEventType.class, containerBus);

    ContainerExecutor exec = mock(ContainerExecutor.class);
    DeletionService delService = mock(DeletionService.class);
    LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
    dirsHandler.init(conf);

    dispatcher.init(conf);
    dispatcher.start();

    try {
        ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService,
                dirsHandler, nmContext);
        ResourceLocalizationService spyService = spy(rawService);
        doReturn(mockServer).when(spyService).createServer();
        doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));

        spyService.init(conf);
        spyService.start();

        final String user = "user0";
        final String userFolder = "user0Folder";
        // init application
        final Application app = mock(Application.class);
        final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3);
        when(app.getUser()).thenReturn(user);
        when(app.getUserFolder()).thenReturn(userFolder);
        when(app.getAppId()).thenReturn(appId);
        spyService.handle(
                new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
        dispatcher.await();

        // init container.
        final Container c = getMockContainer(appId, 42, user, userFolder);

        // init resources
        Random r = new Random();
        long seed = r.nextLong();
        System.out.println("SEED: " + seed);
        r.setSeed(seed);

        // cause chmod to fail after a delay
        final CyclicBarrier barrier = new CyclicBarrier(2);
        doAnswer(new Answer<Void>() {
            public Void answer(InvocationOnMock invocation) throws IOException {
                try {
                    barrier.await();
                } catch (InterruptedException e) {
                } catch (BrokenBarrierException e) {
                }
                throw new IOException("forced failure");
            }
        }).when(spylfs).setPermission(isA(Path.class), isA(FsPermission.class));

        // Queue up two localization requests for the same public resource
        final LocalResource pubResource = getPublicMockedResource(r);
        final LocalResourceRequest pubReq = new LocalResourceRequest(pubResource);

        Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>();
        req.put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq));

        Set<LocalResourceRequest> pubRsrcs = new HashSet<LocalResourceRequest>();
        pubRsrcs.add(pubReq);

        spyService.handle(new ContainerLocalizationRequestEvent(c, req));
        spyService.handle(new ContainerLocalizationRequestEvent(c, req));
        dispatcher.await();

        // allow the chmod to fail now that both requests have been queued
        barrier.await();
        verify(containerBus, timeout(5000).times(2)).handle(isA(ContainerResourceFailedEvent.class));
    } finally {
        dispatcher.stop();
    }
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestLeafQueue.java

@Test
public void testConcurrentAccess() throws Exception {
    YarnConfiguration conf = new YarnConfiguration();
    RMStorageFactory.setConfiguration(conf);
    YarnAPIStorageFactory.setConfiguration(conf);
    DBUtility.InitializeDB();/*from  w  w w .j  a v  a  2 s  . co m*/
    MockRM rm = new MockRM();
    rm.init(conf);
    rm.start();

    final String queue = "default";
    final String user = "user";
    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    final LeafQueue defaultQueue = (LeafQueue) cs.getQueue(queue);

    final List<FiCaSchedulerApp> listOfApps = createListOfApps(10000, user, defaultQueue);

    final CyclicBarrier cb = new CyclicBarrier(2);
    final List<ConcurrentModificationException> conException = new ArrayList<ConcurrentModificationException>();

    Thread submitAndRemove = new Thread(new Runnable() {

        @Override
        public void run() {

            for (FiCaSchedulerApp fiCaSchedulerApp : listOfApps) {
                defaultQueue.submitApplicationAttempt(fiCaSchedulerApp, user);
            }
            try {
                cb.await();
            } catch (Exception e) {
                // Ignore
            }
            for (FiCaSchedulerApp fiCaSchedulerApp : listOfApps) {
                defaultQueue.finishApplicationAttempt(fiCaSchedulerApp, queue);
            }
        }
    }, "SubmitAndRemoveApplicationAttempt Thread");

    Thread getAppsInQueue = new Thread(new Runnable() {
        List<ApplicationAttemptId> apps = new ArrayList<ApplicationAttemptId>();

        @Override
        public void run() {
            try {
                try {
                    cb.await();
                } catch (Exception e) {
                    // Ignore
                }
                defaultQueue.collectSchedulerApplications(apps);
            } catch (ConcurrentModificationException e) {
                conException.add(e);
            }
        }

    }, "GetAppsInQueue Thread");

    submitAndRemove.start();
    getAppsInQueue.start();

    submitAndRemove.join();
    getAppsInQueue.join();

    assertTrue("ConcurrentModificationException is thrown", conException.isEmpty());
    rm.stop();

}

From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestCapacityScheduler.java

@Test(timeout = 30000)
public void testAllocateDoesNotBlockOnSchedulerLock() throws Exception {
    final YarnConfiguration conf = new YarnConfiguration();
    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
    MyContainerManager containerManager = new MyContainerManager();
    final MockRMWithAMS rm = new MockRMWithAMS(conf, containerManager);
    rm.start();/*w w  w  . j  av  a2  s  .c o  m*/

    MockNM nm1 = rm.registerNode("localhost:1234", 5120);

    Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>(2);
    acls.put(ApplicationAccessType.VIEW_APP, "*");
    RMApp app = rm.submitApp(1024, "appname", "appuser", acls);

    nm1.nodeHeartbeat(true);

    RMAppAttempt attempt = app.getCurrentAppAttempt();
    ApplicationAttemptId applicationAttemptId = attempt.getAppAttemptId();
    int msecToWait = 10000;
    int msecToSleep = 100;
    while (attempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED && msecToWait > 0) {
        LOG.info("Waiting for AppAttempt to reach LAUNCHED state. " + "Current state is "
                + attempt.getAppAttemptState());
        Thread.sleep(msecToSleep);
        msecToWait -= msecToSleep;
    }
    Assert.assertEquals(attempt.getAppAttemptState(), RMAppAttemptState.LAUNCHED);

    // Create a client to the RM.
    final YarnRPC rpc = YarnRPC.create(conf);

    UserGroupInformation currentUser = UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
    Credentials credentials = containerManager.getContainerCredentials();
    final InetSocketAddress rmBindAddress = rm.getApplicationMasterService().getBindAddress();
    Token<? extends TokenIdentifier> amRMToken = MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,
            credentials.getAllTokens());
    currentUser.addToken(amRMToken);
    ApplicationMasterProtocol client = currentUser.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {
        @Override
        public ApplicationMasterProtocol run() {
            return (ApplicationMasterProtocol) rpc.getProxy(ApplicationMasterProtocol.class, rmBindAddress,
                    conf);
        }
    });

    RegisterApplicationMasterRequest request = RegisterApplicationMasterRequest.newInstance("localhost", 12345,
            "");
    client.registerApplicationMaster(request);

    // Allocate a container
    List<ResourceRequest> asks = Collections.singletonList(
            ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(2 * GB), 1));
    AllocateRequest allocateRequest = AllocateRequest.newInstance(0, 0.0f, asks, null, null);
    client.allocate(allocateRequest);

    // Make sure the container is allocated in RM
    nm1.nodeHeartbeat(true);
    ContainerId containerId2 = ContainerId.newContainerId(applicationAttemptId, 2);
    Assert.assertTrue(rm.waitForState(nm1, containerId2, RMContainerState.ALLOCATED, 10 * 1000));

    // Acquire the container
    allocateRequest = AllocateRequest.newInstance(1, 0.0f, null, null, null);
    client.allocate(allocateRequest);

    // Launch the container
    final CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    RMContainer rmContainer = cs.getRMContainer(containerId2);
    rmContainer.handle(new RMContainerEvent(containerId2, RMContainerEventType.LAUNCHED));

    // grab the scheduler lock from another thread
    // and verify an allocate call in this thread doesn't block on it
    final CyclicBarrier barrier = new CyclicBarrier(2);
    Thread otherThread = new Thread(new Runnable() {
        @Override
        public void run() {
            synchronized (cs) {
                try {
                    barrier.await();
                    barrier.await();
                } catch (InterruptedException | BrokenBarrierException e) {
                    e.printStackTrace();
                }
            }
        }
    });
    otherThread.start();
    barrier.await();
    List<ContainerId> release = Collections.singletonList(containerId2);
    allocateRequest = AllocateRequest.newInstance(2, 0.0f, null, release, null);
    client.allocate(allocateRequest);
    barrier.await();
    otherThread.join();

    rm.stop();
}

From source file:org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.java

@Test(timeout = 20000)
public void testUnusableNodeTransition() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.setInt(MRJobConfig.NUM_REDUCES, 1);
    DrainDispatcher dispatcher = new DrainDispatcher();
    dispatcher.init(conf);/*from   w ww .ja v a 2 s .  c  om*/
    dispatcher.start();
    CyclicBarrier syncBarrier = new CyclicBarrier(2);
    OutputCommitter committer = new TestingOutputCommitter(syncBarrier, true);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();

    final JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
    // add a special task event handler to put the task back to running in case
    // of task rescheduling/killing
    EventHandler<TaskAttemptEvent> taskAttemptEventHandler = new EventHandler<TaskAttemptEvent>() {
        @Override
        public void handle(TaskAttemptEvent event) {
            if (event.getType() == TaskAttemptEventType.TA_KILL) {
                job.decrementSucceededMapperCount();
            }
        }
    };
    dispatcher.register(TaskAttemptEventType.class, taskAttemptEventHandler);

    // replace the tasks with spied versions to return the right attempts
    Map<TaskId, Task> spiedTasks = new HashMap<TaskId, Task>();
    List<NodeReport> nodeReports = new ArrayList<NodeReport>();
    Map<NodeReport, TaskId> nodeReportsToTaskIds = new HashMap<NodeReport, TaskId>();
    for (Map.Entry<TaskId, Task> e : job.tasks.entrySet()) {
        TaskId taskId = e.getKey();
        Task task = e.getValue();
        if (taskId.getTaskType() == TaskType.MAP) {
            // add an attempt to the task to simulate nodes
            NodeId nodeId = mock(NodeId.class);
            TaskAttempt attempt = mock(TaskAttempt.class);
            when(attempt.getNodeId()).thenReturn(nodeId);
            TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
            when(attempt.getID()).thenReturn(attemptId);
            // create a spied task
            Task spied = spy(task);
            doReturn(attempt).when(spied).getAttempt(any(TaskAttemptId.class));
            spiedTasks.put(taskId, spied);

            // create a NodeReport based on the node id
            NodeReport report = mock(NodeReport.class);
            when(report.getNodeState()).thenReturn(NodeState.UNHEALTHY);
            when(report.getNodeId()).thenReturn(nodeId);
            nodeReports.add(report);
            nodeReportsToTaskIds.put(report, taskId);
        }
    }
    // replace the tasks with the spied tasks
    job.tasks.putAll(spiedTasks);

    // complete all mappers first
    for (TaskId taskId : job.tasks.keySet()) {
        if (taskId.getTaskType() == TaskType.MAP) {
            // generate a task attempt completed event first to populate the
            // nodes-to-succeeded-attempts map
            TaskAttemptCompletionEvent tce = Records.newRecord(TaskAttemptCompletionEvent.class);
            TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
            tce.setAttemptId(attemptId);
            tce.setStatus(TaskAttemptCompletionEventStatus.SUCCEEDED);
            job.handle(new JobTaskAttemptCompletedEvent(tce));
            // complete the task itself
            job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
            Assert.assertEquals(JobState.RUNNING, job.getState());
        }
    }

    // add an event for a node transition
    NodeReport firstMapperNodeReport = nodeReports.get(0);
    NodeReport secondMapperNodeReport = nodeReports.get(1);
    job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(firstMapperNodeReport)));
    dispatcher.await();
    // complete the reducer
    for (TaskId taskId : job.tasks.keySet()) {
        if (taskId.getTaskType() == TaskType.REDUCE) {
            job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
        }
    }
    // add another event for a node transition for the other mapper
    // this should not trigger rescheduling
    job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(secondMapperNodeReport)));
    // complete the first mapper that was rescheduled
    TaskId firstMapper = nodeReportsToTaskIds.get(firstMapperNodeReport);
    job.handle(new JobTaskEvent(firstMapper, TaskState.SUCCEEDED));
    // verify the state is moving to committing
    assertJobState(job, JobStateInternal.COMMITTING);

    // let the committer complete and verify the job succeeds
    syncBarrier.await();
    assertJobState(job, JobStateInternal.SUCCEEDED);

    dispatcher.stop();
    commitHandler.stop();
}