Example usage for java.util.concurrent ExecutorService execute

List of usage examples for java.util.concurrent ExecutorService execute

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService execute.

Prototype

void execute(Runnable command);

Source Link

Document

Executes the given command at some time in the future.

Usage

From source file:org.wso2.appserver.integration.lazy.loading.artifacts.WebApplicationGhostDeploymentTestCase.java

@Test(groups = "wso2.as.lazy.loading", description = "Send concurrent requests  when tenant context is loaded."
        + " But Web-App is in Ghost form. All request should  get expected output", dependsOnMethods = "testConcurrentWebAPPInvocationsWhenTenantContextNotLoadedInGhostDeployment", enabled = false)
public void testConcurrentWebAPPInvocationsWhenTenantContextLoadedInGhostDeployment() throws Exception {
    //This test method case disable because of CARBON-15270
    serverManager.restartGracefully();/*w ww.j a v  a2s  . c  o  m*/
    responseDataList.clear();
    responseDetailedInfoList.clear();
    assertFalse(getTenantStatus(tenantDomain1).isTenantContextLoaded(),
            "Tenant context is  loaded before access. Tenant name: " + tenantDomain1);

    HttpResponse httpResponseApp2 = HttpURLConnectionClient.sendGetRequest(tenant1WebApp2URL, null);
    assertTrue(httpResponseApp2.getData().contains(WEB_APP2_RESPONSE),
            "Invocation of Web-App fail :" + tenant1WebApp2URL);
    assertTrue(getTenantStatus(tenantDomain1).isTenantContextLoaded(),
            "Tenant context is  not loaded after access. Tenant name: " + tenantDomain1);

    WebAppStatusBean webAppStatusTenant1WebApp2 = getWebAppStatus(tenantDomain1, WEB_APP_FILE_NAME2);
    assertTrue(webAppStatusTenant1WebApp2.isWebAppStarted(),
            "Web-App: " + WEB_APP_FILE_NAME2 + " is not started in Tenant:" + tenantDomain1);
    assertFalse(webAppStatusTenant1WebApp2.isWebAppGhost(),
            "Web-App: " + WEB_APP_FILE_NAME2 + " is in ghost mode after invoking in Tenant:" + tenantDomain1);

    WebAppStatusBean webAppStatusTenant1WebApp1 = getWebAppStatus(tenantDomain1, WEB_APP_FILE_NAME1);
    assertTrue(webAppStatusTenant1WebApp1.isWebAppStarted(),
            "Web-App: " + WEB_APP_FILE_NAME1 + " is not started in Tenant:" + tenantDomain1);
    assertTrue(webAppStatusTenant1WebApp1.isWebAppGhost(), "Web-App: " + WEB_APP_FILE_NAME1
            + " is in not ghost mode before invoking in Tenant:" + tenantDomain1);

    ExecutorService executorService = Executors.newFixedThreadPool(CONCURRENT_THREAD_COUNT);
    log.info("Concurrent invocation Start");
    log.info("Expected Response Data:" + WEB_APP1_RESPONSE);
    for (int i = 0; i < CONCURRENT_THREAD_COUNT; i++) {
        final int requestId = i;
        executorService.execute(new Runnable() {

            public void run() {
                HttpResponse httpResponse = null;
                try {
                    httpResponse = HttpURLConnectionClient.sendGetRequest(tenant1WebApp1URL, null);
                } catch (IOException e) {
                    log.error("Error  when sending a  get request  for :" + tenant1WebApp1URL, e);
                }
                synchronized (this) {
                    String responseDetailedInfo;
                    String responseData;
                    if (httpResponse != null) {
                        responseDetailedInfo = "Request ID " + requestId + "Response Data :"
                                + httpResponse.getData() + "\tResponse Code:" + httpResponse.getResponseCode();
                        responseData = httpResponse.getData();
                    } else {
                        responseDetailedInfo = "Request ID " + requestId
                                + "Response Data : NULL Object return from " + "HttpURLConnectionClient";
                        responseData = "NULL Object return";
                    }
                    responseDataList.add(responseData);
                    log.info(responseDetailedInfo);
                    responseDetailedInfoList.add(responseDetailedInfo);
                }
            }

        });
    }
    executorService.shutdown();
    executorService.awaitTermination(5, TimeUnit.MINUTES);
    log.info("Concurrent invocation End");
    int correctResponseCount = 0;
    for (String responseData : responseDataList) {
        if (WEB_APP1_RESPONSE.equals(responseData)) {
            correctResponseCount += 1;
        }
    }
    StringBuilder allDetailResponseStringBuffer = new StringBuilder();
    allDetailResponseStringBuffer.append("\n");
    for (String responseInfo : responseDetailedInfoList) {
        allDetailResponseStringBuffer.append(responseInfo);
        allDetailResponseStringBuffer.append("\n");
    }
    String allDetailResponse = allDetailResponseStringBuffer.toString();
    webAppStatusTenant1WebApp1 = getWebAppStatus(tenantDomain1, WEB_APP_FILE_NAME1);
    assertTrue(webAppStatusTenant1WebApp1.getTenantStatus().isTenantContextLoaded(),
            " Tenant Context " + "is not loaded. Tenant:" + tenantDomain1);
    assertTrue(webAppStatusTenant1WebApp1.isWebAppStarted(),
            "Web-App: " + WEB_APP_FILE_NAME1 + " is not started in Tenant:" + tenantDomain1);
    assertFalse(webAppStatusTenant1WebApp1.isWebAppGhost(),
            "Web-App: " + WEB_APP_FILE_NAME1 + " is in ghost mode after invoking in Tenant:" + tenantDomain1);
    assertEquals(correctResponseCount, CONCURRENT_THREAD_COUNT,
            allDetailResponse + "All the concurrent" + " requests not get correct response.");

}

From source file:org.metaeffekt.dcc.controller.commands.AbstractUnitBasedCommand.java

@Override
protected void doExecute(final boolean force, final boolean parallel, final Id<UnitId> limitToUnitId) {
    LOG.info("Executing command [{}] ...", getCommandVerb());
    final List<ConfigurationUnit> units = getExecutionContext().getProfile().getUnits(false);

    boolean[] unitFound = new boolean[1];
    unitFound[0] = false;/*from w ww .j  ava2 s . co m*/

    final Map<Id<?>, Throwable> exceptions = new ConcurrentHashMap<>();
    final UnitDependencies unitDependencies = getExecutionContext().getProfile().getUnitDependencies();

    final List<List<ConfigurationUnit>> groupLists = unitDependencies.evaluateDependencyGroups(units);

    // some commands require to execute the command in reverse order (eg. stop command)
    if (isReversive()) {
        Collections.reverse(groupLists);
    }

    // process the resulting group list
    for (List<ConfigurationUnit> group : groupLists) {

        // we always run in an executor (mainly due to logging)
        final ExecutorService executor;
        if (parallel && group.size() > 1) {
            executor = Executors.newFixedThreadPool(Math.min(NUMBER_OF_THREADS, group.size()));
        } else {
            executor = Executors.newFixedThreadPool(1);
        }

        // execute commands (queue)
        for (final ConfigurationUnit unit : group) {
            executor.execute(() -> executeCommand(force, limitToUnitId, unitFound, unit, exceptions));
        }

        awaitTerminationOrCancelOnException(executor, exceptions);

        // execute update status sequentially
        group.stream().forEach(unit -> updateStatus(limitToUnitId, unit));

        // in case we have exceptions; we stop the remaining executions
        if (!exceptions.isEmpty()) {
            LOG.warn("Skipping execution of further commands due to previous error.");
            break;
        }
    }

    handleExceptions(exceptions);

    // handle the case the unit id that the execution was limited to was not reached / found.
    if (exceptions.isEmpty() && limitToUnitId != null && !unitFound[0]) {
        throw new IllegalArgumentException(String.format(
                "  Command [%s] not executable for unit [%s]. Either the unit does not exist or the command does not"
                        + " apply for the unit.",
                getCommandVerb(), limitToUnitId));
    }
}

From source file:org.apache.hive.hcatalog.templeton.tool.LaunchMapper.java

private void executeWatcher(ExecutorService pool, Configuration conf, JobID jobid, InputStream in,
        String statusdir, String name) throws IOException {
    Watcher w = new Watcher(conf, jobid, in, statusdir, name);
    pool.execute(w);
}

From source file:terrastore.metrics.PerformanceTest.java

@Test
public void writeOnly() throws Exception {
    final String bucket = UUID.randomUUID().toString();

    int warmup = 1000;
    int writes = 1000;

    final ExecutorService threadPool = Executors.newFixedThreadPool(CONCURRENCY);
    final CountDownLatch termination = new CountDownLatch(writes);

    final String payload = getPayload();
    warmUp(warmup, bucket, payload);//from   w  ww. j ava2s. com

    System.err.println("Starting writeOnly performance test.");

    long start = System.currentTimeMillis();
    for (int i = warmup; i < warmup + writes; i++) {
        final int index = i;
        threadPool.execute(new Runnable() {

            public void run() {
                try {
                    PutMethod putValue = null;
                    putValue = makePutMethod(NODE1_PORT, bucket + "/value" + index);
                    putValue.setRequestEntity(new StringRequestEntity(payload, "application/json", null));
                    HTTP_CLIENT.executeMethod(putValue);
                    assertEquals(HttpStatus.SC_NO_CONTENT, putValue.getStatusCode());
                    putValue.releaseConnection();
                    termination.countDown();
                } catch (Exception ex) {
                    throw new RuntimeException(ex);
                }
            }
        });
    }

    threadPool.shutdown();
    termination.await(Integer.MAX_VALUE, TimeUnit.SECONDS);

    long elapsed = System.currentTimeMillis() - start;

    System.err.println("Elapsed time in millis: " + elapsed);
}

From source file:org.apache.nifi.controller.scheduling.TestStandardProcessScheduler.java

/**
 * Validates the atomic nature of ControllerServiceNode.disable(..) method
 * which must never trigger @OnDisabled, regardless of how many threads may
 * have a reference to the underlying ProcessScheduler and
 * ControllerServiceNode./*from ww  w.  j  av  a 2 s.  c  o  m*/
 */
@Test
public void validateDisabledServiceCantBeDisabled() throws Exception {
    final ProcessScheduler scheduler = createScheduler();
    final StandardControllerServiceProvider provider = new StandardControllerServiceProvider(controller,
            scheduler, null, stateMgrProvider, variableRegistry, nifiProperties);
    final ControllerServiceNode serviceNode = provider.createControllerService(
            SimpleTestService.class.getName(), "1", systemBundle.getBundleDetails().getCoordinate(), null,
            false);
    final SimpleTestService ts = (SimpleTestService) serviceNode.getControllerServiceImplementation();
    final ExecutorService executor = Executors.newCachedThreadPool();

    final AtomicBoolean asyncFailed = new AtomicBoolean();
    for (int i = 0; i < 1000; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    scheduler.disableControllerService(serviceNode);
                    assertFalse(serviceNode.isActive());
                } catch (final Exception e) {
                    e.printStackTrace();
                    asyncFailed.set(true);
                }
            }
        });
    }
    // need to sleep a while since we are emulating async invocations on
    // method that is also internally async
    Thread.sleep(500);
    executor.shutdown();
    assertFalse(asyncFailed.get());
    assertEquals(0, ts.disableInvocationCount());
}

From source file:gridool.db.partitioning.phihash.csv.normal.CsvPartitioningTask.java

private final void invokeShuffle(@Nonnull final ExecutorService shuffleExecPool,
        @Nonnull final ArrayQueue<String> queue) {
    assert (kernel != null);
    final String[] lines = queue.toArray(String.class);
    final String fileName = csvFileName;
    if (isFirstShuffle) {
        PartitioningJobConf conf = new PartitioningJobConf(lines, fileName, true, primaryForeignKeys, jobConf);
        runShuffleJob(kernel, conf, assignMap, deploymentGroup);
        this.isFirstShuffle = false;
    } else {// w ww .  ja v a 2  s  . c  om
        shuffleExecPool.execute(new Runnable() {
            public void run() {
                PartitioningJobConf conf = new PartitioningJobConf(lines, fileName, false, primaryForeignKeys,
                        jobConf);
                runShuffleJob(kernel, conf, assignMap, deploymentGroup);
            }
        });
    }
}

From source file:org.apache.nifi.controller.scheduling.TestStandardProcessScheduler.java

/**
 * Validates the atomic nature of ControllerServiceNode.enable() method
 * which must only trigger @OnEnabled once, regardless of how many threads
 * may have a reference to the underlying ProcessScheduler and
 * ControllerServiceNode./*from  ww  w.j  a va2 s.c o m*/
 */
@Test
public void validateServiceEnablementLogicHappensOnlyOnce() throws Exception {
    final ProcessScheduler scheduler = createScheduler();
    final StandardControllerServiceProvider provider = new StandardControllerServiceProvider(controller,
            scheduler, null, stateMgrProvider, variableRegistry, nifiProperties);
    final ControllerServiceNode serviceNode = provider.createControllerService(
            SimpleTestService.class.getName(), "1", systemBundle.getBundleDetails().getCoordinate(), null,
            false);
    assertFalse(serviceNode.isActive());
    final SimpleTestService ts = (SimpleTestService) serviceNode.getControllerServiceImplementation();
    final ExecutorService executor = Executors.newCachedThreadPool();

    final AtomicBoolean asyncFailed = new AtomicBoolean();
    for (int i = 0; i < 1000; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    scheduler.enableControllerService(serviceNode);
                    assertTrue(serviceNode.isActive());
                } catch (final Exception e) {
                    e.printStackTrace();
                    asyncFailed.set(true);
                }
            }
        });
    }
    // need to sleep a while since we are emulating async invocations on
    // method that is also internally async
    Thread.sleep(500);
    executor.shutdown();
    assertFalse(asyncFailed.get());
    assertEquals(1, ts.enableInvocationCount());
}

From source file:org.apache.nifi.controller.scheduling.TestStandardProcessScheduler.java

/**
 * Validates the atomic nature of ControllerServiceNode.disable() method
 * which must only trigger @OnDisabled once, regardless of how many threads
 * may have a reference to the underlying ProcessScheduler and
 * ControllerServiceNode./*  w w  w  .j  ava 2s  .com*/
 */
@Test
public void validateEnabledServiceCanOnlyBeDisabledOnce() throws Exception {
    final ProcessScheduler scheduler = createScheduler();
    final StandardControllerServiceProvider provider = new StandardControllerServiceProvider(controller,
            scheduler, null, stateMgrProvider, variableRegistry, nifiProperties);
    final ControllerServiceNode serviceNode = provider.createControllerService(
            SimpleTestService.class.getName(), "1", systemBundle.getBundleDetails().getCoordinate(), null,
            false);
    final SimpleTestService ts = (SimpleTestService) serviceNode.getControllerServiceImplementation();
    scheduler.enableControllerService(serviceNode);
    assertTrue(serviceNode.isActive());
    final ExecutorService executor = Executors.newCachedThreadPool();

    final AtomicBoolean asyncFailed = new AtomicBoolean();
    for (int i = 0; i < 1000; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    scheduler.disableControllerService(serviceNode);
                    assertFalse(serviceNode.isActive());
                } catch (final Exception e) {
                    e.printStackTrace();
                    asyncFailed.set(true);
                }
            }
        });
    }
    // need to sleep a while since we are emulating async invocations on
    // method that is also internally async
    Thread.sleep(500);
    executor.shutdown();
    assertFalse(asyncFailed.get());
    assertEquals(1, ts.disableInvocationCount());
}

From source file:org.wso2.carbon.bps.perf.rest.RestClientTestDep.java

public void execute() throws Exception {

    instanceCount = Integer.parseInt(config.getProperty("instances"));
    serverURL = config.getProperty("serverURL");
    numTreads = Integer.parseInt(config.getProperty("threads"));
    outPath = config.getProperty("results");
    File outFolder = new File(outPath);
    if (!outFolder.exists()) {
        log.info("Results folder " + outFolder.getAbsolutePath() + " does not exist. Creating a new folder...");
        outFolder.mkdirs();/*w w  w .j  a  va 2s.  c  om*/
    }
    File testReportFile = new File(outFolder, "summary.csv");
    StringBuffer summaryBuffer = new StringBuffer();
    summaryBuffer.append("Server URL," + serverURL + "\n");
    summaryBuffer.append("Number of process instances," + instanceCount + "\n");
    summaryBuffer.append("Number of threads," + numTreads + "\n\n\n");
    summaryBuffer.append("Process ID,Total time,TPS,Average execution time\n\n");
    FileUtils.write(testReportFile, summaryBuffer.toString());

    List<ProcessConfig> processConfigs = new ArrayList<>();
    boolean processFound = true;
    String processRef = "process";
    int processNum = 1;
    while (processFound) {
        String processProp = config.getProperty(processRef + processNum);
        if (processProp == null) {
            break;
        }
        ProcessConfig processConfig = new ProcessConfig(processProp, null);
        String[] processParts = processProp.split("\\|");
        processConfig.setKey(processParts[0].trim());
        if (processParts.length > 1) {
            String[] varParts = processParts[1].split(";");
            for (String varPart : varParts) {
                String name = varPart.split(",")[0];
                String value = varPart.split(",")[1];
                processConfig.addVariable(name, value);
            }
        }
        processConfigs.add(processConfig);
    }

    for (ProcessConfig processConfig : processConfigs) {
        ActivitiRestClient client = new ActivitiRestClient(serverURL, numTreads);

        List<RestProcessExecutor> processExecutors = new ArrayList<>(instanceCount);
        ExecutorService executorService = Executors.newFixedThreadPool(numTreads);

        long stime = System.currentTimeMillis();
        for (int i = 0; i < instanceCount; i++) {
            RestProcessExecutor processExecutor = new RestProcessExecutor(null, processConfig.getId(),
                    processConfig.getStartupVariables(), client, i);
            processExecutors.add(processExecutor);
            executorService.execute(processExecutor);
        }

        executorService.shutdown();
        try {
            executorService.awaitTermination(1, TimeUnit.HOURS);
        } catch (InterruptedException e) {
            String msg = "Error occurred while waiting for executors to terminate.";
            log.error(msg, e);
        }

        long etime = System.currentTimeMillis();

        StringBuffer buf = new StringBuffer();
        double externalTPS = (double) instanceCount * 1000 / (double) (etime - stime);
        double totalDuration = 0;
        buf.append("Instance durations\n");
        for (RestProcessExecutor processExecutor : processExecutors) {
            buf.append(processExecutor.getExternalDuration() + "\n");
            totalDuration += processExecutor.getExternalDuration();
        }
        double avgExeTimeEngine = totalDuration / instanceCount;

        log.info("Process " + processConfig.getId() + " completed with duration: " + (etime - stime)
                + " ms | TPS: " + externalTPS + " | Average execution time: " + avgExeTimeEngine);
        String processRecord = processConfig.getId() + "," + (etime - stime) + "," + externalTPS + ","
                + avgExeTimeEngine + "\n";
        FileWriter fileWriter = new FileWriter(testReportFile, true);
        fileWriter.write(processRecord);
        fileWriter.close();

        buf.append("\n\nTPS," + externalTPS + "\n\n");

        File processReportFile = new File(outFolder, processConfig.getId() + ".csv");
        FileUtils.write(processReportFile, buf.toString());
        client.close();
    }

    //        Map<String, Object> vars = new HashMap<String, Object>();
    //        vars.put("testCount", new Long(1));
    //
    //        List<RestProcessExecutor> processExecutors = new ArrayList<>(instanceCount);
    //        ExecutorService executorService = Executors.newFixedThreadPool(numTreads);
    //
    //        long stime = System.currentTimeMillis();
    //        for (int i = 0; i < instanceCount; i++) {
    //            RestProcessExecutor processExecutor = new RestProcessExecutor(processKey, processId, vars, client, i);
    //            processExecutors.add(processExecutor);
    //            executorService.execute(processExecutor);
    //        }
    //
    //        executorService.shutdown();
    //        try {
    //            executorService.awaitTermination(1, TimeUnit.HOURS);
    //        } catch (InterruptedException e) {
    //            String msg = "Error occurred while waiting for executors to terminate.";
    //            log.error(msg, e);
    //        }
    //
    //        long etime = System.currentTimeMillis();
    //
    //        List<Long> startTimes = new ArrayList<Long>();
    //        List<Long> endTimes = new ArrayList<Long>();

    //        HistoryService h = engine.getHistoryService();
    //        for (HistoricProcessInstance hp : h.createHistoricProcessInstanceQuery().list()) {
    //            long startTime = hp.getStartTime().getTime();
    //            long endTime = hp.getStartTime().getTime();
    //            startTimes.add(startTime);
    //            endTimes.add(endTime);
    //            long duration = endTime - startTime;
    //            System.out.println("Duration: " + duration + " ms");
    //        }
    //        Collections.sort(startTimes);
    //        Collections.sort(endTimes);
    //
    //        long testStartTime = startTimes.get(0);
    //        long testEndTime = endTimes.get(endTimes.size() - 1);
    //        System.out.println("Test duration: " + (testEndTime - testStartTime));
    //        double throughput = (double) instanceCount * 1000 / (double) (testEndTime - testStartTime);
    //        System.out.println("TPS: " + throughput);

    //        StringBuffer buf = new StringBuffer();
    //        log.info("External duration: " + (etime - stime));
    //        double externalTPS = (double) instanceCount * 1000 / (double) (etime - stime);
    //        log.info("External TPS: " + externalTPS);
    //        buf.append("TPS," + externalTPS + "\n\n");
    //
    //        double totalDuration = 0;
    //
    //        buf.append("Instance duration\n");
    //        for (RestProcessExecutor processExecutor : processExecutors) {
    //            buf.append(processExecutor.getExternalDuration() + "\n");
    //            totalDuration += processExecutor.getExternalDuration();
    //        }
    //        log.info("Total duration: " + totalDuration);
    //        double avgExeTimeEngine = totalDuration / instanceCount;
    //        log.info("Average execution time (External): " + avgExeTimeEngine);
    //
    //        FileUtils.write(new File(outPath), buf.toString());
    //        client.close();

}

From source file:com.idocbox.flame.Helios.java

/**
 * fire them!//from   w ww  . ja  v  a  2  s.  co m
 * @param ds       data source.
 * @param dsSpliter data source spliter.
 * @param mapper   mapper.
 * @param reducer  reducer.
 * @return
 */
public Collector<Map<K, V>> fire(JobConfig<K, V, T> jobConfig) {

    long start = System.currentTimeMillis();

    Collector<Map<K, V>> resultCollector = null;

    // data source.
    DataSource<T> dataSource = jobConfig.getDataSource();
    // data source spliter.
    DataSourceSpliter<T> dataSourceSpliter = jobConfig.getDataSourceSpliter();
    // mapper worker. root mapper worker.
    MapperWorker<K, V, T> mapperWorker = jobConfig.getMapperWorker();
    // reducer worker. root reducer worker.
    ReducerWorker<K, V> reducerWorker = jobConfig.getReducerWorker();
    // mapper.
    Mapper<K, V, T> mapper = jobConfig.getMapper();
    // reducer.
    Reducer<K, V> reducer = jobConfig.getReducer();
    // keeper.
    Keeper<Collector<Map<K, V>>> keeper = jobConfig.getKeeper();

    // spliting phase.

    //split data source into serveral data source.
    log.info("spliting datasource ...");
    Map<String, DataSource<T>> dsMap = dataSourceSpliter.split(dataSource);

    long m1 = System.currentTimeMillis();
    long cost1 = m1 - start;
    double seconds1 = cost1 / 1000;
    log.info("spliting datasource: cost " + seconds1 + " s");

    // generate worker for mapper.create()
    if (null == dsMap || dsMap.isEmpty()) {
        log.info("Splited data source is empty! exit flame!");
        return null;
    }

    // mapping phase.

    // generate mapper worker.
    log.info("mapping && reducing ...");
    Set<String> dsKeys = dsMap.keySet();
    //mapper thread size.
    int mapperThreadSize = dsKeys.size() > jobConfig.getMaxMapperWorker() ? jobConfig.getMaxMapperWorker()
            : dsKeys.size();
    //create mapper worker thread pool.
    ExecutorService mapperWorkerThreadPool = Executors.newFixedThreadPool(mapperThreadSize);
    int dataSourceSize = 0;
    for (String key : dsKeys) {
        //create mapper worker baby.
        MapperWorker<K, V, T> mapperWorkerBaby = mapperWorker.create(key);

        //assign data source and run the worker.
        DataSource<T> dsUnit = dsMap.get(key);
        if (null != dsUnit) {
            //execute mapper work in thread pool.
            mapperWorkerThreadPool
                    .execute(new MapperWorkerRunable<K, V, T>(mapperWorkerBaby, dsUnit, mapper, keeper));

            dataSourceSize++;
        }
    }
    //shutdown executor service.
    mapperWorkerThreadPool.shutdown();

    // reduce phase.

    //generate reducer worker, assign mapper worker's compute result
    // to reducer worker.

    //mapper thread size.
    //create reducer worker thread pool.
    ExecutorService reducerWorkerThreadPool = Executors.newFixedThreadPool(jobConfig.getMaxReducerWorker());

    //get 2 collector, merge them into one, then passed to reducer.
    Set<ReducerWorker<K, V>> reducerWorkers = new HashSet<ReducerWorker<K, V>>();
    int j = 0;
    int expectedReducTime = dataSourceSize - 1;
    while (true) {//reduce while there is more than one element in set.
        if (mapperWorkerThreadPool.isTerminated()) {
            int count = keeper.count();
            if (count == 0) {//no mapped result.
                log.info("there is no result given by mapper. exit!");
                return null;
            }
        }
        if (j == expectedReducTime) {
            log.info("complete reduce. exit flame.");
            break;
        }

        Set<Collector<Map<K, V>>> collectors = new HashSet<Collector<Map<K, V>>>(2);
        collectors.add(keeper.take());
        collectors.add(keeper.take());

        // get an idle worker.
        ReducerWorker<K, V> reducerWorkerBaby = chooseIdle(reducerWorkers, reducerWorker);

        log.info("reducing, collector size = " + keeper.size());

        reducerWorkerThreadPool
                .execute(new ReducerWorkerRunnable<K, V>(reducerWorkerBaby, collectors, reducer, keeper));

        j++;
    }

    //shutdown reducer worker thread pool.
    reducerWorkerThreadPool.shutdown();

    // collect result phase.
    while (!reducerWorkerThreadPool.isTerminated()) {
        Thread.yield();
    }
    if (null != keeper && keeper.size() == 1) {
        resultCollector = keeper.poll();
    } else {// error occured.
        int size = 0;
        if (null != keeper) {
            size = keeper.size();
        }
        log.info("after reduce, the result collector is not expected! collector size is " + size);
    }

    //return result collector.
    long end = System.currentTimeMillis();
    long cost = end - m1;
    double seconds = cost / 1000;
    log.info("mapping & reducing: cost " + seconds + " s");

    return resultCollector;
}