Example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

List of usage examples for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService.

Prototype

public ExecutorCompletionService(Executor executor) 

Source Link

Document

Creates an ExecutorCompletionService using the supplied executor for base task execution and a LinkedBlockingQueue as a completion queue.

Usage

From source file:com.streamsets.pipeline.stage.origin.jdbc.AbstractTableJdbcSource.java

@Override
public void produce(Map<String, String> lastOffsets, int maxBatchSize) throws StageException {
    int batchSize = Math.min(maxBatchSize, commonSourceConfigBean.maxBatchSize);
    handleLastOffset(new HashMap<>(lastOffsets));
    try {//  w ww  .ja  v  a  2 s .c om
        executorService = new SafeScheduledExecutorService(numberOfThreads,
                TableJdbcRunnable.TABLE_JDBC_THREAD_PREFIX);

        ExecutorCompletionService<Future> completionService = new ExecutorCompletionService<>(executorService);

        final RateLimiter queryRateLimiter = commonSourceConfigBean.creatQueryRateLimiter();

        List<Future> allFutures = new LinkedList<>();
        IntStream.range(0, numberOfThreads).forEach(threadNumber -> {
            JdbcBaseRunnable runnable = new JdbcRunnableBuilder().context(getContext())
                    .threadNumber(threadNumber).batchSize(batchSize).connectionManager(connectionManager)
                    .offsets(offsets).tableProvider(tableOrderProvider)
                    .tableReadContextCache(getTableReadContextCache(connectionManager, offsets))
                    .commonSourceConfigBean(commonSourceConfigBean).tableJdbcConfigBean(tableJdbcConfigBean)
                    .queryRateLimiter(commonSourceConfigBean.creatQueryRateLimiter()).isReconnect(isReconnect)
                    .build();

            toBeInvalidatedThreadCaches.add(runnable.getTableReadContextCache());
            allFutures.add(completionService.submit(runnable, null));
        });

        if (commonSourceConfigBean.allowLateTable) {
            TableSpooler tableSpooler = new TableSpooler();
            executorServiceForTableSpooler = new SafeScheduledExecutorService(1,
                    JdbcBaseRunnable.TABLE_JDBC_THREAD_PREFIX);
            executorServiceForTableSpooler.scheduleWithFixedDelay(tableSpooler, 0,
                    commonSourceConfigBean.newTableQueryInterval, TimeUnit.SECONDS);
        }

        while (!getContext().isStopped()) {
            checkWorkerStatus(completionService);
            final boolean shouldGenerate = tableOrderProvider.shouldGenerateNoMoreDataEvent();
            if (shouldGenerate) {
                final int delay = commonSourceConfigBean.noMoreDataEventDelay;
                if (delay > 0) {
                    Executors.newSingleThreadScheduledExecutor().schedule(new Runnable() {
                        @Override
                        public void run() {
                            jdbcUtil.generateNoMoreDataEvent(getContext());
                        }
                    }, delay, TimeUnit.SECONDS);
                } else {
                    jdbcUtil.generateNoMoreDataEvent(getContext());
                }
            }

            // This loop is only a checker for isStopped() -> hence running it as fast as possible leads to high CPU
            // usage even for no-data passing through use case. We're currently hard coding the sleep for few milliseconds.
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                LOG.debug("Interrupted wait");
            }
        }

        for (Future future : allFutures) {
            try {
                future.get();
            } catch (ExecutionException e) {
                LOG.error(
                        "ExecutionException when attempting to wait for all table JDBC runnables to complete, after context was"
                                + " stopped: {}",
                        e.getMessage(), e);
            } catch (InterruptedException e) {
                LOG.error(
                        "InterruptedException when attempting to wait for all table JDBC runnables to complete, after context "
                                + "was stopped: {}",
                        e.getMessage(), e);
                Thread.currentThread().interrupt();
            }
        }
    } finally {
        if (shutdownExecutorIfNeeded()) {
            Thread.currentThread().interrupt();
        }
    }
}

From source file:org.apache.hama.bsp.TestBSPTaskFaults.java

public void testPing() {
    conf.setInt(TEST_POINT, 0);/*from w  w w.j  a v  a2  s .com*/

    CompletionService<Integer> completionService = new ExecutorCompletionService<Integer>(
            this.testBSPTaskService);
    TestBSPProcessRunner runner = new TestBSPProcessRunner(0, workerServer.getListenerAddress().getPort());

    Future<Integer> future = completionService.submit(runner);

    try {
        future.get(20000, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e1) {
        LOG.error("Interrupted Exception.", e1);
    } catch (ExecutionException e1) {
        LOG.error("ExecutionException Exception.", e1);
    } catch (TimeoutException e) {
        LOG.error("TimeoutException Exception.", e);
    }

    checkIfPingTestPassed();
    groom.setPingCount(0);
    this.testBSPTaskService.shutdownNow();
    runner.destroyProcess();
}

From source file:com.amazon.janusgraph.diskstorage.dynamodb.DynamoDBDelegate.java

public void parallelMutate(List<MutateWorker> workers) throws BackendException {
    CompletionService<Void> completion = new ExecutorCompletionService<>(clientThreadPool);
    List<Future<Void>> futures = Lists.newLinkedList();
    for (MutateWorker worker : workers) {
        futures.add(completion.submit(worker));
    }/* w  w w.  j  a v a 2 s.c  o m*/

    //block on the futures all getting or throwing instead of using a latch as i need to check future status anyway
    boolean interrupted = false;
    try {
        for (int i = 0; i < workers.size(); i++) {
            try {
                completion.take().get(); //Void
            } catch (InterruptedException e) {
                interrupted = true;
                // fail out because janusgraph does not poll this thread for interrupted anywhere
                throw new BackendRuntimeException("was interrupted during parallelMutate");
            } catch (ExecutionException e) {
                throw unwrapExecutionException(e, MUTATE_ITEM);
            }
        }
    } finally {
        for (Future<Void> future : futures) {
            if (!future.isDone()) {
                future.cancel(interrupted /* mayInterruptIfRunning */);
            }
        }
        if (interrupted) {
            // set interrupted on this thread
            Thread.currentThread().interrupt();
        }
    }
}

From source file:com.rapid7.diskstorage.dynamodb.DynamoDBDelegate.java

public List<QueryResultWrapper> parallelQuery(List<QueryWorker> queryWorkers) throws BackendException {
    CompletionService<QueryResultWrapper> completionService = new ExecutorCompletionService<>(clientThreadPool);

    List<Future<QueryResultWrapper>> futures = Lists.newLinkedList();
    for (QueryWorker worker : queryWorkers) {
        futures.add(completionService.submit(worker));
    }//  w w w.  j av a2s.  com

    boolean interrupted = false;
    List<QueryResultWrapper> results = Lists.newLinkedList();
    try {
        for (int i = 0; i < queryWorkers.size(); i++) {
            try {
                QueryResultWrapper result = completionService.take().get();
                results.add(result);
            } catch (InterruptedException e) {
                interrupted = true;
                // fail out because titan does not poll this thread for interrupted anywhere
                throw new BackendRuntimeException("was interrupted during parallelQuery");
            } catch (ExecutionException e) {
                throw unwrapExecutionException(e, QUERY);
            }
        }
    } finally {
        for (Future<QueryResultWrapper> future : futures) {
            if (!future.isDone()) {
                future.cancel(interrupted /* mayInterruptIfRunning */);
            }
        }

        if (interrupted) {
            // set interrupted on this thread and fail out
            Thread.currentThread().interrupt();
        }
    }
    return results;
}

From source file:org.geowebcache.sqlite.MbtilesBlobStore.java

@Override
public boolean delete(TileRange tileRange) throws StorageException {
    // getting the files associated with this tile range
    Map<File, List<long[]>> files = fileManager.getFiles(tileRange);
    if (files.isEmpty()) {
        // no files so nothing to do
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug("Nothing to do.");
        }/*ww  w  .  ja v a  2  s  .  com*/
        return false;
    }
    // let's delete the tiles
    CompletionService completionService = new ExecutorCompletionService(executorService);
    int tasks = 0;
    for (Map.Entry<File, List<long[]>> entry : files.entrySet()) {
        // FIXME: should we tell something to the listeners ?
        File file = entry.getKey();
        if (!file.exists()) {
            // this database file doesn't exists, so nothing to do
            continue;
        }
        if (eagerDelete) {
            // we delete the whole file avoiding fragmentation on the database
            completionService.submit(() -> connectionManager.delete(file), true);
        } else {
            // we need to delete all tiles that belong to the tiles range and are stored in the current file
            for (long[] range : entry.getValue()) {
                if (LOGGER.isDebugEnabled()) {
                    LOGGER.debug(String.format(
                            "Deleting tiles range [minx=%d, miny=%d, maxx=%d, maxxy=%d, zoom=%d] in file '%s'.",
                            range[0], range[1], range[2], range[3], range[4], file));
                }
                completionService.submit(() -> connectionManager.executeSql(file,
                        "DELETE FROM tiles WHERE zoom_level = ? AND tile_column BETWEEN ? AND ? AND tile_row BETWEEN ? AND ?;",
                        range[4], range[0], range[2], range[1], range[3]), true);
            }
        }
        tasks++;
    }
    // let's wait for the tasks to finish
    for (int i = 0; i < tasks; i++) {
        try {
            completionService.take().get();
        } catch (Exception exception) {
            throw Utils.exception(exception, "Something bad happen when deleting tile range.");
        }
    }
    return true;
}

From source file:net.sourceforge.seqware.pipeline.plugins.PluginRunnerIT.java

public void testLatestWorkflowsInternal(List<Integer> accessions) throws IOException {
    String output = ITUtility.runSeqWareJar(
            "-p net.sourceforge.seqware.pipeline.plugins.BundleManager -- --list-installed",
            ReturnValue.SUCCESS);//from  w  w w .j  a  v  a  2 s  .com
    Assert.assertTrue("output should include installed workflows", output.contains("INSTALLED WORKFLOWS"));
    Map<String, WorkflowInfo> latestWorkflows = new HashMap<String, WorkflowInfo>();
    String[] lines = output.split(System.getProperty("line.separator"));
    for (String line : lines) {
        String[] lineParts = line.split("\t");
        try {
            int workflow_accession = Integer.valueOf(lineParts[3]);
            String workflowName = lineParts[0];
            String path = lineParts[4];
            if (path.equals("null")) {
                continue;
            }
            WorkflowInfo wi = new WorkflowInfo(workflow_accession, path, workflowName, lineParts[1]);

            //TODO: check that the permanent workflow actually exists, if not warn and skip
            File fileAtPath = new File(path);
            if (!fileAtPath.exists()) {
                Log.warn("Skipping " + workflowName + ":" + workflow_accession
                        + " , bundle path does not exist at " + path);
                continue;
            }

            if (!latestWorkflows.containsKey(workflowName)) {
                latestWorkflows.put(workflowName, wi);
            } else {
                // contained
                int old = latestWorkflows.get(workflowName).sw_accession;
                if (workflow_accession > old) {
                    latestWorkflows.put(workflowName, wi);
                }
            }
        } catch (Exception e) {
            /**
             * do nothing and skip this line of the BundleManager output
             */
        }
    }
    // setup thread pool
    ExecutorService threadPool = Executors.newFixedThreadPool(latestWorkflows.size());
    CompletionService<String> pool = new ExecutorCompletionService<String>(threadPool);
    for (Entry<String, WorkflowInfo> e : latestWorkflows.entrySet()) {
        System.out.println("Testing " + e.getKey() + " " + e.getValue().sw_accession);

        // if we have an accession list, skip accessions that are not in it
        if (accessions.size() > 0) {
            Integer acc = e.getValue().sw_accession;
            if (!accessions.contains(acc)) {
                System.out.println(
                        "Skipping " + e.getKey() + " " + e.getValue().sw_accession + " due to accession list");
                continue;
            }
        }

        StringBuilder params = new StringBuilder();
        params.append("--bundle ").append(e.getValue().path).append(" ");
        params.append("--version ").append(e.getValue().version).append(" ");
        params.append("--test ");
        File tempFile = File.createTempFile(e.getValue().name, ".out");
        pool.submit(new TestingThread(params.toString(), tempFile));
    }
    for (Entry<String, WorkflowInfo> e : latestWorkflows.entrySet()) {
        try {
            pool.take().get();
        } catch (InterruptedException ex) {
            Log.error(ex);
        } catch (ExecutionException ex) {
            Log.error(ex);
        }
    }
    threadPool.shutdown();
}

From source file:org.apache.hama.bsp.TestBSPTaskFaults.java

public void testPingOnTaskSetupFailure() {

    LOG.info("Testing ping failure case - 1");
    conf.setInt(TEST_POINT, 1);/* w  ww  .  j  av a  2  s  .c  o  m*/

    CompletionService<Integer> completionService = new ExecutorCompletionService<Integer>(
            this.testBSPTaskService);
    TestBSPProcessRunner runner = new TestBSPProcessRunner(1, workerServer.getListenerAddress().getPort());
    Future<Integer> future = completionService.submit(runner);

    try {
        future.get(20000, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e1) {
        LOG.error("Interrupted Exception.", e1);
    } catch (ExecutionException e1) {
        LOG.error("ExecutionException Exception.", e1);
    } catch (TimeoutException e) {
        LOG.error("TimeoutException Exception.", e);
    }

    checkIfPingTestPassed();
    groom.setPingCount(0);
    this.testBSPTaskService.shutdownNow();
    runner.destroyProcess();
}

From source file:org.apache.hadoop.hbase.regionserver.Store.java

/**
 * Creates an unsorted list of StoreFile loaded in parallel
 * from the given directory.//from w  w w .j  a  v a 2  s . c o m
 * @throws IOException
 */
private List<StoreFile> loadStoreFiles() throws IOException {
    ArrayList<StoreFile> results = new ArrayList<StoreFile>();
    FileStatus files[] = getStoreFiles();

    if (files == null || files.length == 0) {
        return results;
    }
    // initialize the thread pool for opening store files in parallel..
    ThreadPoolExecutor storeFileOpenerThreadPool = this.region
            .getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" + this.family.getNameAsString());
    CompletionService<StoreFile> completionService = new ExecutorCompletionService<StoreFile>(
            storeFileOpenerThreadPool);

    int totalValidStoreFile = 0;
    for (int i = 0; i < files.length; i++) {
        // Skip directories.
        if (files[i].isDir()) {
            continue;
        }
        final Path p = files[i].getPath();
        // Check for empty hfile. Should never be the case but can happen
        // after data loss in hdfs for whatever reason (upgrade, etc.): HBASE-646
        // NOTE: that the HFileLink is just a name, so it's an empty file.
        if (!HFileLink.isHFileLink(p) && this.fs.getFileStatus(p).getLen() <= 0) {
            LOG.warn("Skipping " + p + " because its empty. HBASE-646 DATA LOSS?");
            continue;
        }

        // open each store file in parallel
        completionService.submit(new Callable<StoreFile>() {
            public StoreFile call() throws IOException {
                StoreFile storeFile = new StoreFile(fs, p, conf, cacheConf, family.getBloomFilterType(),
                        dataBlockEncoder, isAssistant());
                passSchemaMetricsTo(storeFile);
                storeFile.createReader();
                return storeFile;
            }
        });
        totalValidStoreFile++;
    }

    try {
        for (int i = 0; i < totalValidStoreFile; i++) {
            Future<StoreFile> future = completionService.take();
            StoreFile storeFile = future.get();
            long length = storeFile.getReader().length();
            this.storeSize += length;
            this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes();
            if (LOG.isDebugEnabled()) {
                LOG.debug("loaded " + storeFile.toStringDetailed());
            }
            results.add(storeFile);
        }
    } catch (InterruptedException e) {
        throw new IOException(e);
    } catch (ExecutionException e) {
        throw new IOException(e.getCause());
    } finally {
        storeFileOpenerThreadPool.shutdownNow();
    }

    return results;
}

From source file:com.amazon.janusgraph.diskstorage.dynamodb.DynamoDBDelegate.java

public List<QueryResultWrapper> parallelQuery(List<QueryWorker> queryWorkers) throws BackendException {
    CompletionService<QueryResultWrapper> completionService = new ExecutorCompletionService<>(clientThreadPool);

    List<Future<QueryResultWrapper>> futures = Lists.newLinkedList();
    for (QueryWorker worker : queryWorkers) {
        futures.add(completionService.submit(worker));
    }// ww  w .  ja v a  2  s .c  om

    boolean interrupted = false;
    List<QueryResultWrapper> results = Lists.newLinkedList();
    try {
        for (int i = 0; i < queryWorkers.size(); i++) {
            try {
                QueryResultWrapper result = completionService.take().get();
                results.add(result);
            } catch (InterruptedException e) {
                interrupted = true;
                // fail out because janusgraph does not poll this thread for interrupted anywhere
                throw new BackendRuntimeException("was interrupted during parallelQuery");
            } catch (ExecutionException e) {
                throw unwrapExecutionException(e, QUERY);
            }
        }
    } finally {
        for (Future<QueryResultWrapper> future : futures) {
            if (!future.isDone()) {
                future.cancel(interrupted /* mayInterruptIfRunning */);
            }
        }

        if (interrupted) {
            // set interrupted on this thread and fail out
            Thread.currentThread().interrupt();
        }
    }
    return results;
}

From source file:org.apache.hama.bsp.TestBSPTaskFaults.java

public void testPingOnTaskExecFailure() {

    LOG.info("Testing ping failure case - 2");
    conf.setInt(TEST_POINT, 2);//from www  . ja  v  a2 s .c  o m
    CompletionService<Integer> completionService = new ExecutorCompletionService<Integer>(
            this.testBSPTaskService);
    TestBSPProcessRunner runner = new TestBSPProcessRunner(2, workerServer.getListenerAddress().getPort());
    Future<Integer> future = completionService.submit(runner);

    try {
        future.get(20000, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e1) {
        LOG.error("Interrupted Exception.", e1);
    } catch (ExecutionException e1) {
        LOG.error("ExecutionException Exception.", e1);
    } catch (TimeoutException e) {
        LOG.error("TimeoutException Exception.", e);
    }

    checkIfPingTestPassed();
    groom.setPingCount(0);
    this.testBSPTaskService.shutdownNow();
    runner.destroyProcess();
}