Example usage for java.util.concurrent ExecutorService submit

List of usage examples for java.util.concurrent ExecutorService submit

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService submit.

Prototype

Future<?> submit(Runnable task);

Source Link

Document

Submits a Runnable task for execution and returns a Future representing that task.

Usage

From source file:com.twitter.hraven.etl.JobFileProcessor.java

/**
 * Run the jobs and wait for all of them to complete.
 * /*from w  w  w . j  a v  a  2 s. com*/
 * @param threadCount
 *          up to how many jobs to run in parallel
 * @param jobRunners
 *          the list of jobs to run.
 * @return whether all jobs completed successfully or not.
 * @throws InterruptedException
 *           when interrupted while running jobs.
 * @throws ExecutionException
 *           when at least one of the jobs could not be scheduled.
 */
private boolean runJobs(int threadCount, List<JobRunner> jobRunners)
        throws InterruptedException, ExecutionException {
    ExecutorService execSvc = Executors.newFixedThreadPool(threadCount);

    if ((jobRunners == null) || (jobRunners.size() == 0)) {
        return true;
    }

    boolean success = true;
    try {
        List<Future<Boolean>> jobFutures = new LinkedList<Future<Boolean>>();
        for (JobRunner jobRunner : jobRunners) {
            Future<Boolean> jobFuture = execSvc.submit(jobRunner);
            jobFutures.add(jobFuture);
        }

        // Wait for all jobs to complete.
        for (Future<Boolean> jobFuture : jobFutures) {
            success = jobFuture.get();
            if (!success) {
                // Stop the presses as soon as we see an error. Note that several
                // other jobs may have already been scheduled. Others will never be
                // scheduled.
                break;
            }
        }
    } finally {
        // Shut down the executor so that the JVM can exit.
        List<Runnable> neverRan = execSvc.shutdownNow();
        if (neverRan != null && neverRan.size() > 0) {
            System.err.println("Interrupted run. Currently running Hadoop jobs will continue unless cancelled. "
                    + neverRan + " jobs never scheduled.");
        }
    }
    return success;
}

From source file:com.netflix.zeno.diff.TypeDiffOperation.java

@SuppressWarnings("unchecked")
public TypeDiff<T> performDiff(DiffSerializationFramework framework, Iterable<T> fromState, Iterable<T> toState,
        int numThreads) {
    Map<Object, T> fromStateObjects = new HashMap<Object, T>();

    for (T obj : fromState) {
        fromStateObjects.put(instruction.getKey(obj), obj);
    }//  w  ww  . j a  v  a  2  s  . c  om

    ArrayList<List<T>> perProcessorWorkList = new ArrayList<List<T>>(numThreads); // each entry is a job
    for (int i = 0; i < numThreads; ++i) {
        perProcessorWorkList.add(new ArrayList<T>());
    }

    Map<Object, Object> toStateKeys = new ConcurrentHashMap<Object, Object>();

    int toIncrCount = 0;
    for (T toObject : toState) {
        perProcessorWorkList.get(toIncrCount % numThreads).add(toObject);
        toIncrCount++;
    }

    ExecutorService executor = Executors.newFixedThreadPool(numThreads, new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            final Thread thread = new Thread(r, "TypeDiff_" + instruction.getTypeIdentifier());
            thread.setDaemon(true);
            return thread;
        }
    });

    try {
        ArrayList<Future<TypeDiff<T>>> workResultList = new ArrayList<Future<TypeDiff<T>>>(
                perProcessorWorkList.size());
        for (final List<T> workList : perProcessorWorkList) {
            if (workList != null && !workList.isEmpty()) {
                workResultList.add(executor.submit(new TypeDiffCallable<T>(framework, instruction,
                        fromStateObjects, toStateKeys, workList)));
            }
        }

        TypeDiff<T> mergedDiff = new TypeDiff<T>(instruction.getTypeIdentifier());
        for (final Future<TypeDiff<T>> future : workResultList) {
            try {
                TypeDiff<T> typeDiff = future.get();
                mergeTypeDiff(mergedDiff, typeDiff);
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }

        for (Map.Entry<Object, T> entry : fromStateObjects.entrySet()) {
            mergedDiff.incrementFrom();
            if (!toStateKeys.containsKey(entry.getKey()))
                mergedDiff.addExtraInFrom(entry.getValue());
        }

        return mergedDiff;

    } finally {
        executor.shutdownNow();
    }
}

From source file:com.palantir.stash.stashbot.managers.JenkinsManager.java

public void updateAllJobs() {

    ExecutorService es = Executors.newCachedThreadPool();
    List<Future<Void>> futures = new LinkedList<Future<Void>>();

    PageRequest pageReq = new PageRequestImpl(0, 500);
    Page<? extends Repository> p = repositoryService.findAll(pageReq);
    while (true) {
        for (Repository r : p.getValues()) {
            Future<Void> f = es.submit(new UpdateAllRepositoryVisitor(jenkinsClientManager, jtm, cpm, r, lf));
            futures.add(f);/*from ww  w . j  a v a  2 s .  co m*/
        }
        if (p.getIsLastPage())
            break;
        pageReq = p.getNextPageRequest();
        p = repositoryService.findAll(pageReq);
    }
    for (Future<Void> f : futures) {
        try {
            f.get(); // don't care about return, just catch exceptions
        } catch (ExecutionException e) {
            log.error("Exception while attempting to create missing jobs for a repo: ", e);
        } catch (InterruptedException e) {
            log.error("Interrupted: this shouldn't happen", e);
        }
    }
}

From source file:com.palantir.stash.stashbot.managers.JenkinsManager.java

public void createMissingJobs() {

    ExecutorService es = Executors.newCachedThreadPool();
    List<Future<Void>> futures = new LinkedList<Future<Void>>();

    PageRequest pageReq = new PageRequestImpl(0, 500);
    Page<? extends Repository> p = repositoryService.findAll(pageReq);
    while (true) {
        for (Repository r : p.getValues()) {
            Future<Void> f = es
                    .submit(new CreateMissingRepositoryVisitor(jenkinsClientManager, jtm, cpm, r, lf));
            futures.add(f);/*  w w w  .  j a  v a 2  s . c  o  m*/
        }
        if (p.getIsLastPage())
            break;
        pageReq = p.getNextPageRequest();
        p = repositoryService.findAll(pageReq);
    }
    for (Future<Void> f : futures) {
        try {
            f.get(); // don't care about return, just catch exceptions
        } catch (ExecutionException e) {
            log.error("Exception while attempting to create missing jobs for a repo: ", e);
        } catch (InterruptedException e) {
            log.error("Interrupted: this shouldn't happen", e);
        }
    }
}

From source file:com.emc.ecs.sync.storage.CasStorageTest.java

private void delete(FPPool pool, List<String> clipIds) throws Exception {
    ExecutorService service = Executors.newFixedThreadPool(CAS_THREADS);

    System.out.print("Deleting clips");

    for (String clipId : clipIds) {
        service.submit(new ClipDeleter(pool, clipId));
    }//  w w  w  . ja  v a 2  s  .c o m

    service.shutdown();
    service.awaitTermination(CAS_SETUP_WAIT_MINUTES, TimeUnit.MINUTES);
    service.shutdownNow();

    System.out.println();
}

From source file:com.oneops.ops.dao.PerfDataAccessor.java

/**
 * Inits the DAOs/connections//from  www  .j a va 2s  .c om
 */
public void init() {
    logger.info("PerfDataAccessor: " + ":" + clusterName + ":" + keyspaceName);
    ExecutorService executor = Executors.newSingleThreadExecutor();
    Future<String> future = executor.submit(this::connectToCluster);

    try {
        logger.info("Started connecting.. with timeOut " + TIMEOUT_IN_SECONDS);
        logger.info(future.get(TIMEOUT_IN_SECONDS, TimeUnit.SECONDS));
        logger.info("Finished connecting!");

    } catch (TimeoutException e) {
        logger.error("no cassandra hosts available - shutting down");
        throw new HectorException("TimeOut occured in getting the cassandra connection");
    } catch (InterruptedException e) {
        e.printStackTrace();
    } catch (ExecutionException e) {
        e.printStackTrace();
    }

    executor.shutdownNow();
    initCluster();
}

From source file:com.jivesoftware.os.amza.service.AmzaService.java

public void compactAllTombstones() throws Exception {
    LOG.info("Manual compact all tombstones requests.");

    ExecutorService compactorPool = BoundedExecutor.newBoundedExecutor(numberOfStripes, "compactor");

    try {//  w ww  .j  ava  2  s  . co  m
        List<Future<?>> runnables = Lists.newArrayList();
        for (int i = 0; i < numberOfStripes; i++) {
            int stripe = i;
            runnables.add(compactorPool.submit(() -> {
                partitionTombstoneCompactor.compactTombstone(true, stripe);
                return null;
            }));
        }
        for (Future<?> runnable : runnables) {
            runnable.get();
        }
    } finally {
        compactorPool.shutdown();
    }
}

From source file:com.emc.ecs.sync.storage.CasStorageTest.java

private String summarize(FPPool pool, List<String> clipIds) throws Exception {
    List<String> summaries = Collections.synchronizedList(new ArrayList<String>());

    ExecutorService service = Executors.newFixedThreadPool(CAS_THREADS);

    System.out.print("Summarizing clips");

    for (String clipId : clipIds) {
        service.submit(new ClipReader(pool, clipId, summaries));
    }//  ww  w .  ja  v  a  2 s  . c  o  m

    service.shutdown();
    service.awaitTermination(CAS_SETUP_WAIT_MINUTES, TimeUnit.MINUTES);
    service.shutdownNow();

    System.out.println();

    Collections.sort(summaries);
    StringBuilder out = new StringBuilder();
    for (String summary : summaries) {
        out.append(summary);
    }
    return out.toString();
}

From source file:com.shonshampain.streamrecorder.util.StreamProxy.java

private void processRequest(HttpRequest request, Socket client) throws IllegalStateException, IOException {
    if (request == null) {
        return;/*from   w w  w  .j  a  va 2  s.c om*/
    }
    String url = request.getRequestLine().getUri();
    int mi = 0;
    Logger.d(DBG, TAG, "processing: " + url);
    Logger.d(DBG, TAG, "request: " + request.getRequestLine());
    for (Header h : request.getAllHeaders()) {
        Logger.d(DBG, TAG, "header: [" + h.getName() + "] = [" + h.getValue() + "]");
    }
    HttpResponse realResponse = download(url);
    if (realResponse == null) {
        startOver(null, client, FailType.CantDownload, null);
        return;
    }

    Logger.d(DBG, TAG, "downloading...");

    final InputStream data = realResponse.getEntity().getContent();
    StatusLine line = realResponse.getStatusLine();
    HttpResponse response = new BasicHttpResponse(line);
    response.setHeaders(realResponse.getAllHeaders());

    Logger.d(DBG, TAG, "reading headers");
    StringBuilder httpString = new StringBuilder();
    httpString.append(response.getStatusLine().toString());

    httpString.append("\n");
    for (Header h : response.getAllHeaders()) {
        if (h.getName().equalsIgnoreCase("Transfer-Encoding")) {
            /* The KCRW stream specifies chunked encoding in their headers,
             * however, when we read their stream, the data gets "unchunked".
             * Therefore, we cannot advertise a chunked encoding unless
             * we actually re-chunk the data; which we are not.
             */
            httpString.append("Accept-Ranges: bytes\r\n");
            httpString.append("Content-Length: 9999999999\r\n");
        } else {
            if (h.getName().equalsIgnoreCase("icy-metaint")) {
                mi = Integer.parseInt(h.getValue());
                Logger.d(DBG, TAG, "Creating new meta data extractor with interval: " + mi);
                mde = new MetaDataExtractor(mi);
            }
            httpString.append(h.getName()).append(": ").append(h.getValue()).append("\r\n");
        }
    }
    httpString.append("\n");
    Logger.d(DBG, TAG, "headers done: [" + httpString + "]");

    try {
        byte[] buffer = httpString.toString().getBytes();
        int readBytes;
        Logger.d(DBG_WRITES, TAG, "writing headers to client");
        client.getOutputStream().write(buffer, 0, buffer.length);

        // Start streaming content.
        final byte[] buff = new byte[1024 * 50];
        boolean endOfStream = false;
        ExecutorService executor = Executors.newFixedThreadPool(1);

        while (isRunning && !endOfStream) {
            Callable<Integer> readTask = new Callable<Integer>() {
                @Override
                public Integer call() throws Exception {
                    return data.read(buff, 0, buff.length);
                }
            };
            Future<Integer> future = executor.submit(readTask);
            try {
                readBytes = future.get(STREAM_STALLED_TIMEOUT, TimeUnit.MILLISECONDS);
            } catch (TimeoutException to) {
                startOver(data, client, FailType.Stall, null);
                return;
            } catch (InterruptedException ie) {
                Logger.e(TAG, "The read operation was interrupted");
                continue;
            }
            endOfStream = readBytes == -1;
            if (!endOfStream) {
                Logger.d(DBG_READS, TAG, "Raw read: " + readBytes + " bytes");
                if (mi > 0) {
                    readBytes = mde.processBuffer(buff, readBytes);
                    Logger.d(DBG_META, TAG,
                            "Status: " + mde.getStatus() + ", running count: " + mde.getRunningCount());
                }
                Logger.d(DBG_WRITES, TAG, "writing " + readBytes + " bytes of content to client");
                client.getOutputStream().write(buff, 0, readBytes);
                if (fileHelper != null) {
                    Logger.d(DBG, TAG, "writing " + readBytes + " bytes of content to file");
                    //                        NotificationHelper.build(context, "StreamRecorder Rip Control", "writing " + readBytes + " bytes", notificationId);
                    fileHelper.write(fos, buff, readBytes);
                }
            }
        }
    } catch (Exception e) {
        startOver(data, client, FailType.Unexpected, e);
    }
}

From source file:com.netsteadfast.greenstep.bsc.command.ScoreCalculationCommand.java

private void processKpisScore(List<VisionVO> visions) throws Exception {
    //long beg = System.currentTimeMillis();
    for (VisionVO vision : visions) {
        for (PerspectiveVO perspective : vision.getPerspectives()) {
            for (ObjectiveVO objective : perspective.getObjectives()) {
                // 2015-04-11 add
                ExecutorService kpiCalculationPool = Executors
                        .newFixedThreadPool(SimpleUtils.getAvailableProcessors(objective.getKpis().size()));
                for (KpiVO kpi : objective.getKpis()) {
                    /* 2015-04-11 rem
                    float score = this.calculationMeasureData(kpi);
                    kpi.setScore(score);
                    kpi.setBgColor( BscScoreColorUtils.getBackgroundColor(score) );
                    kpi.setFontColor( BscScoreColorUtils.getFontColor(score) );
                    *//*from  ww  w  .j  a v a2  s. c o  m*/

                    // 2015-04-11 add
                    ScoreCalculationCallableData data = new ScoreCalculationCallableData();
                    data.setDefaultMode(true);
                    data.setKpi(kpi);
                    data = kpiCalculationPool.submit(new ScoreCalculationCallable(data)).get();

                }
                kpiCalculationPool.shutdown();
            }
        }
    }
    //long end = System.currentTimeMillis();
    //System.out.println( this.getClass().getName() + " use time(MS) = " + (end-beg) );      
}