Example usage for java.util.concurrent ThreadPoolExecutor awaitTermination

List of usage examples for java.util.concurrent ThreadPoolExecutor awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor awaitTermination.

Prototype

public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException 

Source Link

Usage

From source file:org.esigate.test.cases.PerformanceTestCase.java

/**
 * Execute la tache avec plusieurs Threads
 * /*from   w  w  w.  j  av a2 s  .  co  m*/
 * @param request
 * @return
 * @throws Exception
 */
private long execute(HttpGetRequestRunnable request, int numberOfRequests, int threads) throws Exception {
    connectionManager = new PoolingHttpClientConnectionManager();
    httpClient = HttpClientBuilder.create().setConnectionManager(connectionManager).setMaxConnTotal(threads)
            .setMaxConnPerRoute(threads).setDefaultRequestConfig(
                    RequestConfig.custom().setConnectTimeout(10000).setSocketTimeout(10000).build())
            .build();
    // Warm up
    request.run();

    BlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>();
    ThreadPoolExecutor threadPool = new ThreadPoolExecutor(threads, threads, 5, TimeUnit.SECONDS, queue);

    long start = System.currentTimeMillis();
    threadPool.prestartAllCoreThreads();
    for (int i = 0; i < numberOfRequests; i++) {
        threadPool.submit(request);
    }
    threadPool.shutdown();

    // wait maximum 20 s
    threadPool.awaitTermination(200, TimeUnit.SECONDS);
    connectionManager.shutdown();

    if (request.exception != null) {
        throw new AssertionFailedError(
                "Exception for request " + request.url + " after " + request.count + " requests",
                request.exception);
    }
    if (threadPool.getCompletedTaskCount() < threadPool.getTaskCount()) {
        // All task were not executed
        String msg = request.url + " : Only " + threadPool.getCompletedTaskCount() + "/"
                + threadPool.getTaskCount() + " have been renderered " + " => Maybe a performance issue";
        threadPool.shutdownNow();
        fail(msg);
    }

    long end = System.currentTimeMillis();
    long execTime = end - start;
    LOG.debug("Executed request " + request.url + " " + numberOfRequests + " times with " + threads
            + " threads in " + execTime + "ms");
    return execTime;

}

From source file:dk.dma.ais.lib.FileConvert.java

/** {@inheritDoc} */
@Override/*from w w w . j a va2  s .  c om*/
protected void run(Injector injector) throws Exception {
    configureFileEnding();

    final EConsumer<String> consumer = new EConsumer<String>() {

        @Override
        public void accept(String s) throws IllegalArgumentException, IllegalAccessException,
                NoSuchFieldException, SecurityException, IOException, InterruptedException {
            Path path = Paths.get(s);
            LOG.debug("Started processing file " + path);

            Path endPath;
            if (keepFileStructure) {
                Path relative;
                relative = path;
                endPath = Paths.get(Paths.get(convertTo).toString(), relative.toString());
                new File(endPath.toString()).mkdirs();
            } else {
                endPath = Paths.get("");
            }

            String filename = path.getFileName().toString();
            if (!filename.endsWith(fileEnding))
                filename = FilenameUtils.removeExtension(filename) + fileEnding;
            Path filePath = Paths.get(endPath.toString(), filename);

            LOG.debug("Output File: " + filePath.toString());

            final OutputStream fos = new FileOutputStream(filePath.toString()); // 2

            final boolean createSituationFolder = !StringUtils.isBlank(kmzSnapshotAt);
            final long snapshotAtEpochMillis = createSituationFolder
                    ? LocalDateTime.parse(kmzSnapshotAt, formatter).toInstant(ZoneOffset.UTC).toEpochMilli()
                    : -1;

            OutputStreamSink<AisPacket> sink;
            if ("kmz".equals(outputSinkFormat)) {
                //AisPacketKMZOutputSink(filter, createSituationFolder, createMovementsFolder, createTracksFolder, isPrimaryTarget, isSecondaryTarget, triggerSnapshot, snapshotDescriptionSupplier, movementInterpolationStep, supplyTitle, supplyDescription, iconHrefSupplier);
                sink = AisPacketOutputSinks.newKmzSink(e -> true, // this.filter = e -> true;
                        createSituationFolder, // this.createSituationFolder = true;
                        true, // createMovementsFolder = true;
                        true, // this.createTracksFolder = true;
                        e -> kmzPrimaryMmsi <= 0 ? false : e.tryGetAisMessage().getUserId() == kmzPrimaryMmsi, // this.isPrimaryTarget = e -> false;
                        e -> kmzSecondaryMmsi <= 0 ? false
                                : e.tryGetAisMessage().getUserId() == kmzSecondaryMmsi, // this.isSecondaryTarget = e -> false;
                        e -> e.getBestTimestamp() >= snapshotAtEpochMillis, // this.triggerSnapshot = e -> false;
                        () -> "Situation at " + kmzSnapshotAt, // this.snapshotDescriptionSupplier = null;
                        () -> 10, // this.title = defaultTitleSupplier;
                        () -> "description", // this.description = defaultDescriptionSupplier;
                        () -> "10", //this.movementInterpolationStep = defaultMovementInterpolationStepSupplier;
                        (shipTypeCargo, navigationalStatus) -> "" // this.iconHrefSupplier = defaultIconHrefSupplier;
                );

            } else
                sink = AisPacketOutputSinks.getOutputSink(outputSinkFormat, columns);

            sink.closeWhenFooterWritten();

            AisPacketReader apis = AisPacketReader.createFromFile(path, false);

            apis.writeTo(fos, sink);
            apis.close();
            fos.close();
        }
    };

    /*
     * Creates a pool of executors, 4 threads. Each thread will open a file using an aispacket reader, 10000 files can be
     * submitted to the queue, afterwards the calling thread will execute the job instead.
     */
    ThreadPoolExecutor threadpoolexecutor = new ThreadPoolExecutor(4, 4, 1, TimeUnit.SECONDS,
            new ArrayBlockingQueue<>(10000), new ThreadPoolExecutor.CallerRunsPolicy());
    for (final String s : sources) {
        threadpoolexecutor.execute(() -> {
            try {
                consumer.accept(s);
            } catch (Exception e) {
                e.printStackTrace();
            }

        });
    }

    threadpoolexecutor.shutdown();
    threadpoolexecutor.awaitTermination(999, TimeUnit.DAYS);
}

From source file:io.anserini.index.IndexCollection.java

public void run() throws IOException, InterruptedException {
    final long start = System.nanoTime();
    LOG.info("Starting indexer...");

    int numThreads = args.threads;

    final Directory dir = FSDirectory.open(indexPath);
    final EnglishAnalyzer analyzer = args.keepStopwords ? new EnglishAnalyzer(CharArraySet.EMPTY_SET)
            : new EnglishAnalyzer();
    final IndexWriterConfig config = new IndexWriterConfig(analyzer);
    config.setSimilarity(new BM25Similarity());
    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    config.setRAMBufferSizeMB(args.memorybufferSize);
    config.setUseCompoundFile(false);/*  w w w  .  java2 s  .c om*/
    config.setMergeScheduler(new ConcurrentMergeScheduler());

    final IndexWriter writer = new IndexWriter(dir, config);

    final ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(numThreads);
    final List<Path> segmentPaths = collection.getFileSegmentPaths();

    final int segmentCnt = segmentPaths.size();
    LOG.info(segmentCnt + " files found in " + collectionPath.toString());
    for (int i = 0; i < segmentCnt; i++) {
        executor.execute(new IndexerThread(writer, collection, segmentPaths.get(i)));
    }

    executor.shutdown();

    try {
        // Wait for existing tasks to terminate
        while (!executor.awaitTermination(1, TimeUnit.MINUTES)) {
            LOG.info(String.format("%.2f percent completed",
                    (double) executor.getCompletedTaskCount() / segmentCnt * 100.0d));
        }
    } catch (InterruptedException ie) {
        // (Re-)Cancel if current thread also interrupted
        executor.shutdownNow();
        // Preserve interrupt status
        Thread.currentThread().interrupt();
    }

    if (segmentCnt != executor.getCompletedTaskCount()) {
        throw new RuntimeException("totalFiles = " + segmentCnt + " is not equal to completedTaskCount =  "
                + executor.getCompletedTaskCount());
    }

    int numIndexed = writer.maxDoc();

    try {
        writer.commit();
        if (args.optimize)
            writer.forceMerge(1);
    } finally {
        try {
            writer.close();
        } catch (IOException e) {
            // It is possible that this happens... but nothing much we can do at this point,
            // so just log the error and move on.
            LOG.error(e);
        }
    }

    LOG.info("Indexed documents: " + counters.indexedDocuments.get());
    LOG.info("Empty documents: " + counters.emptyDocuments.get());
    LOG.info("Errors: " + counters.errors.get());

    final long durationMillis = TimeUnit.MILLISECONDS.convert(System.nanoTime() - start, TimeUnit.NANOSECONDS);
    LOG.info("Total " + numIndexed + " documents indexed in "
            + DurationFormatUtils.formatDuration(durationMillis, "HH:mm:ss"));
}

From source file:Main.java

public void runTest() throws Exception {
    ThreadPoolExecutor tp = new ThreadPoolExecutor(1, 1, 60L, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>());
    tp.setRejectedExecutionHandler(//from  w  ww  .j a  va2 s .co  m
            (Runnable r, ThreadPoolExecutor executor) -> System.out.println("Task rejected: " + r));
    Semaphore oneTaskDone = new Semaphore(0);
    tp.execute(() -> {
        System.out.println("Sleeping");
        try {
            Thread.sleep(300);
        } catch (Exception e) {
            e.printStackTrace();
        }
        System.out.println("Done sleeping");
        oneTaskDone.release();
    });
    tp.execute(new Runnable() {
        @Override
        public void run() {
            System.out.println("Never happends");
        }

        @Override
        public String toString() {
            return "Rejected Runnable";
        }
    });
    oneTaskDone.acquire();
    tp.execute(() -> System.out.println("Running"));
    tp.shutdown();
    tp.awaitTermination(100, TimeUnit.MILLISECONDS);
    System.out.println("Finished");
}

From source file:com.espertech.esper.filter.TestIndexTreeBuilderMultithreaded.java

private void performMultithreadedTest(FilterHandleSetNode topNode, int numberOfThreads, int numberOfRunnables,
        int numberOfSecondsSleep) throws Exception {
    log.info(".performMultithreadedTest Loading thread pool work queue,numberOfRunnables=" + numberOfRunnables);

    ThreadPoolExecutor pool = new ThreadPoolExecutor(0, numberOfThreads, 99999, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>());

    for (int i = 0; i < numberOfRunnables; i++) {
        IndexTreeBuilderRunnable runnable = new IndexTreeBuilderRunnable(eventType, topNode, testFilterSpecs,
                matchedEvents, unmatchedEvents);

        pool.execute(runnable);//from w w  w .j av a 2s .c  o  m
    }

    log.info(".performMultithreadedTest Starting thread pool, threads=" + numberOfThreads);
    pool.setCorePoolSize(numberOfThreads);

    // Sleep X seconds
    sleep(numberOfSecondsSleep);

    log.info(".performMultithreadedTest Completed, numberOfRunnables=" + numberOfRunnables
            + "  numberOfThreads=" + numberOfThreads + "  completed=" + pool.getCompletedTaskCount());

    pool.shutdown();
    pool.awaitTermination(1, TimeUnit.SECONDS);

    assertTrue(pool.getCompletedTaskCount() == numberOfRunnables);
}

From source file:io.anserini.index.IndexWebCollection.java

public int indexWithThreads(int numThreads) throws IOException, InterruptedException {

    LOG.info("Indexing with " + numThreads + " threads to directory '" + indexPath.toAbsolutePath() + "'...");

    final Directory dir = FSDirectory.open(indexPath);

    final IndexWriterConfig iwc = new IndexWriterConfig(new EnglishAnalyzer());

    iwc.setSimilarity(new BM25Similarity());
    iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    iwc.setRAMBufferSizeMB(512);//from ww  w.  j  av  a  2s  .c  o m
    iwc.setUseCompoundFile(false);
    iwc.setMergeScheduler(new ConcurrentMergeScheduler());

    final IndexWriter writer = new IndexWriter(dir, iwc);

    final ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(numThreads);
    final String suffix = Collection.GOV2.equals(collection) ? ".gz" : ".warc.gz";
    final Deque<Path> warcFiles = discoverWarcFiles(docDir, suffix);

    if (doclimit > 0 && warcFiles.size() < doclimit)
        for (int i = doclimit; i < warcFiles.size(); i++)
            warcFiles.removeFirst();

    long totalWarcFiles = warcFiles.size();
    LOG.info(totalWarcFiles + " many " + suffix + " files found under the docs path : " + docDir.toString());

    for (int i = 0; i < 2000; i++) {
        if (!warcFiles.isEmpty())
            executor.execute(new IndexerThread(writer, warcFiles.removeFirst()));
        else {
            if (!executor.isShutdown()) {
                Thread.sleep(30000);
                executor.shutdown();
            }
            break;
        }
    }

    long first = 0;
    //add some delay to let some threads spawn by scheduler
    Thread.sleep(30000);

    try {
        // Wait for existing tasks to terminate
        while (!executor.awaitTermination(1, TimeUnit.MINUTES)) {

            final long completedTaskCount = executor.getCompletedTaskCount();

            LOG.info(String.format("%.2f percentage completed",
                    (double) completedTaskCount / totalWarcFiles * 100.0d));

            if (!warcFiles.isEmpty())
                for (long i = first; i < completedTaskCount; i++) {
                    if (!warcFiles.isEmpty())
                        executor.execute(new IndexerThread(writer, warcFiles.removeFirst()));
                    else {
                        if (!executor.isShutdown())
                            executor.shutdown();
                    }
                }

            first = completedTaskCount;
            Thread.sleep(1000);
        }
    } catch (InterruptedException ie) {
        // (Re-)Cancel if current thread also interrupted
        executor.shutdownNow();
        // Preserve interrupt status
        Thread.currentThread().interrupt();
    }

    if (totalWarcFiles != executor.getCompletedTaskCount())
        throw new RuntimeException("totalWarcFiles = " + totalWarcFiles
                + " is not equal to completedTaskCount =  " + executor.getCompletedTaskCount());

    int numIndexed = writer.maxDoc();

    try {
        writer.commit();
        if (optimize)
            writer.forceMerge(1);
    } finally {
        writer.close();
    }

    return numIndexed;
}

From source file:com.emc.ecs.smart.SmartUploader.java

/**
 * Performs a segmented upload to ECS using the SmartClient and the ECS byte range PUT extensions.  The upload
 * URL will be parsed and the hostname will be enumerated in DNS to see if it contains multiple 'A' records.  If
 * so, those will be used to populate the software load balancer.
 *//*  ww  w  . j  a va2 s  .c  o  m*/
private void doSegmentedUpload() {
    try {
        long start = System.currentTimeMillis();
        fileSize = Files.size(fileToUpload);

        // Verify md5Save file path is legit.
        PrintWriter pw = null;
        try {
            if (saveMD5 != null) {
                pw = new PrintWriter(saveMD5);
            }
        } catch (IOException e) {
            System.err.println("Invalid path specified to save local file MD5: " + e.getMessage());
            System.exit(3);
        }

        // Figure out which segment size to use.
        if (segmentSize == -1) {
            if (fileSize >= LARGE_SEGMENT) {
                segmentSize = LARGE_SEGMENT;
            } else {
                segmentSize = SMALL_SEGMENT;
            }
        }

        // Expand the host
        String host = uploadUrl.getHost();
        InetAddress addr = InetAddress.getByName(host);
        List<String> ipAddresses = new ArrayList<>();
        try {
            ipAddresses = getIPAddresses(host);
        } catch (NamingException e) {
            LogMF.warn(l4j, "Could not resolve hostname: {0}: {1}.  Using as-is.", host, e);
            ipAddresses.add(host);
        }
        LogMF.info(l4j, "Host {0} resolves to {1}", host, ipAddresses);

        // Initialize the SmartClient
        SmartConfig smartConfig = new SmartConfig(ipAddresses.toArray(new String[ipAddresses.size()]));
        // We don't need to update the host list
        smartConfig.setHostUpdateEnabled(false);

        // Configure the load balancer
        Client pingClient = SmartClientFactory.createStandardClient(smartConfig,
                new URLConnectionClientHandler());
        pingClient.addFilter(new HostnameVerifierFilter(uploadUrl.getHost()));
        LoadBalancer loadBalancer = smartConfig.getLoadBalancer();
        EcsHostListProvider hostListProvider = new EcsHostListProvider(pingClient, loadBalancer, null, null);
        hostListProvider.setProtocol(uploadUrl.getProtocol());
        if (uploadUrl.getPort() != -1) {
            hostListProvider.setPort(uploadUrl.getPort());
        }
        smartConfig.setHostListProvider(hostListProvider);

        client = SmartClientFactory.createSmartClient(smartConfig, new URLConnectionClientHandler());

        // Add our retry handler
        client.addFilter(new HostnameVerifierFilter(uploadUrl.getHost()));
        client.addFilter(new MD5CheckFilter());
        client.addFilter(new RetryFilter(retryDelay, retryCount));

        // Create a FileChannel for the upload
        fileChannel = new RandomAccessFile(fileToUpload.toFile(), "r").getChannel();

        System.out.printf("Starting upload at %s\n", new Date().toString());
        // The first upload is done without a range to create the initial object.
        doUploadSegment(0);

        // See how many more segments we have
        int segmentCount = (int) (fileSize / (long) segmentSize);
        long remainder = fileSize % segmentSize;
        if (remainder != 0) {
            // Additional bytes at end
            segmentCount++;
        }

        if (segmentCount > 1) {
            // Build a thread pool to upload the segments.
            ThreadPoolExecutor executor = new ThreadPoolExecutor(threadCount, threadCount, 15, TimeUnit.SECONDS,
                    new LinkedBlockingQueue<Runnable>());

            for (int i = 1; i < segmentCount; i++) {
                executor.execute(new SegmentUpload(i));
            }

            // Wait for completion
            while (true) {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
                if (failed) {
                    // Abort!
                    l4j.warn("Error detected, terminating upload");
                    executor.shutdownNow();
                    break;
                }
                if (executor.getQueue().isEmpty()) {
                    l4j.info("All tasks complete, awaiting shutdown");
                    try {
                        executor.shutdown();
                        executor.awaitTermination(1, TimeUnit.MINUTES);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                    break;
                }
            }
        }

        // Done!
        long elapsed = System.currentTimeMillis() - start;
        printRate(fileSize, elapsed);

        // Release buffers
        LogMF.debug(l4j, "buffer count at end: {0}", buffers.size());
        buffers = new LinkedList<>();
        System.out.printf("\nUpload completed at %s\n", new Date().toString());

        // Verify
        if (verifyUrl != null) {

            System.out.printf("starting remote MD5...\n");

            String objectMD5 = computeObjectMD5();
            System.out.printf("Object MD5 = %s\n", objectMD5);

            System.out.printf("Remote MD5 complete at %s\nStarting local MD5\n", new Date().toString());

            // At this point we don't need the clients anymore.
            l4j.debug("Shutting down SmartClient");
            SmartClientFactory.destroy(client);
            SmartClientFactory.destroy(pingClient);

            String fileMD5 = standardChecksum ? computeFileMD5Standard() : computeFileMD5();
            System.out.printf("\nFile on disk MD5 = %s\n", fileMD5);
            System.out.printf("Local MD5 complete at %s\n", new Date().toString());
            if (!fileMD5.equals(objectMD5)) {
                System.err.printf("ERROR: file MD5 does not match object MD5! %s != %s", fileMD5, objectMD5);
                System.exit(10);
            }

            if (saveMD5 != null && pw != null) {
                pw.write(fileMD5);
                pw.close();
            }

            System.out.printf("\nObject verification passed!\n");
        }

    } catch (IOException e) {
        e.printStackTrace();
        System.exit(4);
    }
}

From source file:knowledgeMiner.preprocessing.KnowledgeMinerPreprocessor.java

/**
 * Loops through all Wikipedia/Ontology indices.
 * /*from w w  w  .  j  a  va2  s.  c  o  m*/
 * @param taskType
 *            The type of task to process.
 * @param heuristics
 *            The heuristics to process with.
 * @param reverseOrder
 */
private void precomputeAll(PrecomputationTaskType taskType, Collection<? extends WeightedHeuristic> heuristics,
        boolean reverseOrder) {
    boolean loopOntology = taskType == PrecomputationTaskType.CYC_TO_WIKI;

    // Set up the iterator
    OntologySocket ontology = (loopOntology) ? ResourceAccess.requestOntologySocket() : null;
    WMISocket wmi = (loopOntology) ? null : ResourceAccess.requestWMISocket();

    // Set up an executor and add all concepts to the execution queue.
    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors
            .newFixedThreadPool(Math.max(1, KnowledgeMiner.getNumThreads()));
    int id = (reverseOrder) ? 35000000 : 0;

    while (true) {
        try {
            // Get next thing
            int nextID = 0;
            if (loopOntology) {
                if (reverseOrder)
                    nextID = ontology.getPrevNode(id);
                else
                    nextID = ontology.getNextNode(id);
            } else {
                if (reverseOrder)
                    nextID = wmi.getPrevArticle(id);
                else
                    nextID = wmi.getNextArticle(id);
            }
            if (nextID < 0)
                break;

            id = nextID;
            if (loopOntology) {
                // If the concept is not a predicate or ephemeral, process
                // it.
                String constant = ontology.findConceptByID(id);
                if (constant != null
                        && ontology.getProperty(id, true, DirectedAcyclicGraph.EPHEMERAL_MARK) == null
                        && !ontology.evaluate(null, CommonConcepts.ISA.getID(), constant,
                                CommonConcepts.PREDICATE.getID())) {
                    PrecomputationTask preTask = new PrecomputationTask(
                            new ConceptModule(new OntologyConcept(constant, id)), heuristics, taskType, this);
                    executor.execute(preTask);
                }
            } else {
                String type = wmi.getPageType(nextID);
                // If it's an article or disambiguation, process it.
                if (type.equals(WMISocket.TYPE_ARTICLE) || type.equals(WMISocket.TYPE_DISAMBIGUATION)) {
                    PrecomputationTask preTask = new PrecomputationTask(new ConceptModule(id), heuristics,
                            taskType, this);
                    executor.execute(preTask);
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    // Wait for completion
    executor.shutdown();
    try {
        if (executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS))
            return;
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
    System.err.println("Error precomputing tasks.");
}

From source file:net.pms.dlna.DLNAResource.java

/**
 * First thing it does it searches for an item matching the given objectID.
 * If children is false, then it returns the found object as the only object in the list.
 * TODO: (botijo) This function does a lot more than this!
 * @param objectId ID to search for.//from   w  w  w.ja  v  a  2s .  c om
 * @param returnChildren State if you want all the children in the returned list.
 * @param start
 * @param count
 * @param renderer Renderer for which to do the actions.
 * @return List of DLNAResource items.
 * @throws IOException
 */
public synchronized List<DLNAResource> getDLNAResources(String objectId, boolean returnChildren, int start,
        int count, RendererConfiguration renderer) throws IOException {
    ArrayList<DLNAResource> resources = new ArrayList<DLNAResource>();
    DLNAResource dlna = search(objectId, count, renderer);

    if (dlna != null) {
        String systemName = dlna.getSystemName();
        dlna.setDefaultRenderer(renderer);

        if (!returnChildren) {
            resources.add(dlna);
            dlna.refreshChildrenIfNeeded();
        } else {
            dlna.discoverWithRenderer(renderer, count, true);

            if (count == 0) {
                count = dlna.getChildren().size();
            }

            if (count > 0) {
                ArrayBlockingQueue<Runnable> queue = new ArrayBlockingQueue<Runnable>(count);

                int nParallelThreads = 3;
                if (dlna instanceof DVDISOFile) {
                    nParallelThreads = 1; // Some DVD drives die wih 3 parallel threads
                }

                ThreadPoolExecutor tpe = new ThreadPoolExecutor(Math.min(count, nParallelThreads), count, 20,
                        TimeUnit.SECONDS, queue);

                for (int i = start; i < start + count; i++) {
                    if (i < dlna.getChildren().size()) {
                        final DLNAResource child = dlna.getChildren().get(i);

                        if (child != null) {
                            tpe.execute(child);
                            resources.add(child);
                        } else {
                            logger.warn("null child at index {} in {}", i, systemName);
                        }
                    }
                }

                try {
                    tpe.shutdown();
                    tpe.awaitTermination(20, TimeUnit.SECONDS);
                } catch (InterruptedException e) {
                    logger.error("error while shutting down thread pool executor for " + systemName, e);
                }

                logger.trace("End of analysis for {}", systemName);
            }
        }
    }

    return resources;
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

@Override
public void run() {
    while (!closed) {
        try {//from   w  w  w . j a v  a2 s  .co  m
            Thread.sleep(60000);
            try {
                ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
                Map<String, String> md = omd.getUserMetadata();
                ObjectMetadata nmd = new ObjectMetadata();
                nmd.setUserMetadata(md);
                md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                md.put("lastupdate", Long.toString(System.currentTimeMillis()));
                md.put("hostname", InetAddress.getLocalHost().getHostName());
                md.put("port", Integer.toString(Main.sdfsCliPort));
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.put("md5sum", st);
                nmd.setContentMD5(st);
                nmd.setContentLength(sz.length);
                nmd.setUserMetadata(md);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
            } catch (Exception e) {
                try {
                    ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
                    Map<String, String> md = omd.getUserMetadata();
                    ObjectMetadata nmd = new ObjectMetadata();
                    nmd.setUserMetadata(md);
                    md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                    md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                    md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                    md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                    md.put("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.put("hostname", InetAddress.getLocalHost().getHostName());
                    md.put("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.put("md5sum", st);
                    nmd.setContentMD5(st);
                    nmd.setContentLength(sz.length);
                    nmd.setUserMetadata(md);

                    this.updateObject(binm, nmd);
                } catch (Exception e1) {
                    SDFSLogger.getLog().error("unable to update metadata for " + binm, e);
                }
            }

            if (this.deletes.size() > 0) {
                SDFSLogger.getLog().info("running garbage collection");
                RejectedExecutionHandler executionHandler = new BlockPolicy();
                BlockingQueue<Runnable> worksQueue = new SynchronousQueue<Runnable>();
                ThreadPoolExecutor executor = new ThreadPoolExecutor(1, Main.dseIOThreads, 10, TimeUnit.SECONDS,
                        worksQueue, executionHandler);
                this.delLock.lock();
                HashMap<Long, Integer> odel = null;
                try {
                    odel = this.deletes;
                    this.deletes = new HashMap<Long, Integer>();
                    // SDFSLogger.getLog().info("delete hash table size of "
                    // + odel.size());
                } finally {
                    this.delLock.unlock();
                }
                Set<Long> iter = odel.keySet();
                for (Long k : iter) {
                    DeleteObject obj = new DeleteObject();
                    obj.k = k;
                    obj.odel = odel;
                    obj.st = this;
                    executor.execute(obj);
                }
                executor.shutdown();
                while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                    SDFSLogger.getLog().debug("Awaiting deletion task completion of threads.");
                }
                SDFSLogger.getLog().info("done running garbage collection");
            }
        } catch (InterruptedException e) {
            break;
        } catch (Exception e) {
            SDFSLogger.getLog().error("error in delete thread", e);
        }
    }

}