Example usage for java.util.concurrent ExecutorService shutdown

List of usage examples for java.util.concurrent ExecutorService shutdown

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdown.

Prototype

void shutdown();

Source Link

Document

Initiates an orderly shutdown in which previously submitted tasks are executed, but no new tasks will be accepted.

Usage

From source file:MSUmpire.LCMSPeakStructure.LCMSPeakDIAMS2.java

private void PrepareMGF_UnfragmentIon() throws IOException {
    String mgffile4 = FilenameUtils.getFullPath(ParentmzXMLName) + GetQ3Name() + ".mgf.temp";
    //        FileWriter mgfWriter4 = new FileWriter(mgffile4, true);
    final BufferedWriter mgfWriter4 = DIAPack.get_file(DIAPack.OutputFile.Mgf_Q3, mgffile4);

    //        FileWriter mapwriter3 = new FileWriter(FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName) + ".ScanClusterMapping_Q3", true);
    final BufferedWriter mapwriter3 = DIAPack.get_file(DIAPack.OutputFile.ScanClusterMapping_Q3,
            FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName)
                    + ".ScanClusterMapping_Q3");

    ArrayList<PseudoMSMSProcessing> ScanList = new ArrayList<>();
    ExecutorService executorPool = Executors.newFixedThreadPool(NoCPUs);
    for (PeakCluster ms2cluster : PeakClusters) {
        ArrayList<PrecursorFragmentPairEdge> frags = UnFragIonClu2Cur.get(ms2cluster.Index);
        if (frags != null && DIA_MZ_Range.getX() <= ms2cluster.TargetMz()
                && DIA_MZ_Range.getY() >= ms2cluster.TargetMz()) {
            //            if (DIA_MZ_Range.getX() <= ms2cluster.TargetMz() && DIA_MZ_Range.getY() >= ms2cluster.TargetMz() && UnFragIonClu2Cur.containsKey(ms2cluster.Index)) {
            //                ArrayList<PrecursorFragmentPairEdge> frags = UnFragIonClu2Cur.get(ms2cluster.Index);
            ms2cluster.GroupedFragmentPeaks.addAll(frags);
            PseudoMSMSProcessing mSMSProcessing = new PseudoMSMSProcessing(ms2cluster, parameter);
            executorPool.execute(mSMSProcessing);
            ScanList.add(mSMSProcessing);
        }/*from  ww w  .  j  a va2  s . c  o m*/
    }
    executorPool.shutdown();
    try {
        executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException e) {
        Logger.getRootLogger().info("interrupted..");
    }

    for (PseudoMSMSProcessing mSMSProcessing : ScanList) {
        if (MatchedFragmentMap.size() > 0) {
            mSMSProcessing.RemoveMatchedFrag(MatchedFragmentMap);
        }
        XYPointCollection Scan = mSMSProcessing.GetScan();

        if (Scan != null && Scan.PointCount() > parameter.MinFrag) {
            parentDIA.Q3Scan++;
            //                StringBuilder mgfString = new StringBuilder();
            //                mgfString.append("BEGIN IONS\n");
            //                mgfString.append("PEPMASS=" + mSMSProcessing.Precursorcluster.TargetMz() + "\n");
            //                mgfString.append("CHARGE=" + mSMSProcessing.Precursorcluster.Charge + "+\n");
            //                mgfString.append("RTINSECONDS=" + mSMSProcessing.Precursorcluster.PeakHeightRT[0] * 60f + "\n");
            //                mgfString.append("TITLE=").append(GetQ3Name()).append(".").append(parentDIA.Q3Scan).append(".").append(parentDIA.Q3Scan).append(".").append(mSMSProcessing.Precursorcluster.Charge).append("\n");
            //                //mgfString.append("TITLE=" + WindowID + ";ClusterIndex:" + mSMSProcessing.ms2cluster.Index + "\n");
            //                //mgfString.append("TITLE=" GetQ3Name() + WindowID + ";ClusterIndex:" + mSMSProcessing.ms2cluster.Index + "\n");
            //
            //                for (int i = 0; i < Scan.PointCount(); i++) {
            //                    mgfString.append(Scan.Data.get(i).getX()).append(" ").append(Scan.Data.get(i).getY()).append("\n");
            //                }
            //                mgfString.append("END IONS\n\n");
            //                mgfWriter4.write(mgfString.toString());

            mgfWriter4.append("BEGIN IONS\n")
                    .append("PEPMASS=" + mSMSProcessing.Precursorcluster.TargetMz() + "\n")
                    .append("CHARGE=" + mSMSProcessing.Precursorcluster.Charge + "+\n")
                    .append("RTINSECONDS=" + mSMSProcessing.Precursorcluster.PeakHeightRT[0] * 60f + "\n")
                    .append("TITLE=").append(GetQ3Name()).append(".").append(Integer.toString(parentDIA.Q3Scan))
                    .append(".").append(Integer.toString(parentDIA.Q3Scan)).append(".")
                    .append(Integer.toString(mSMSProcessing.Precursorcluster.Charge)).append("\n");
            //mgfWriter4.append("TITLE=" + WindowID + ";ClusterIndex:" + mSMSProcessing.ms2cluster.Index + "\n");
            //mgfWriter4.append("TITLE=" GetQ3Name() + WindowID + ";ClusterIndex:" + mSMSProcessing.ms2cluster.Index + "\n");

            for (int i = 0; i < Scan.PointCount(); i++) {
                mgfWriter4.append(Float.toString(Scan.Data.get(i).getX())).append(" ")
                        .append(Float.toString(Scan.Data.get(i).getY())).append("\n");
            }
            mgfWriter4.append("END IONS\n\n");

            mapwriter3.write(
                    parentDIA.Q3Scan + ";" + WindowID + ";" + mSMSProcessing.Precursorcluster.Index + "\n");
        }
        mSMSProcessing.Precursorcluster.GroupedFragmentPeaks.clear();
    }
    //        mgfWriter4.close();
    //        mapwriter3.close();
}

From source file:com.easarrive.image.thumbor.executer.service.impl.SQSNotificationHandlerForThumbor.java

private List<NotificationHandleResult<Message, Boolean>> ergodicS3EventMessageRecord(final Message message,
        S3EventMessageContent messageContent) throws AWSPluginException {
    List<S3EventMessageRecord> recordList = messageContent.getRecords();
    if (recordList == null) {
        throw new AWSPluginException("The S3EventMessageRecord list is null");
    }/* w w w.  jav a 2  s  .co m*/
    if (recordList.size() < 1) {
        throw new AWSPluginException("The S3EventMessageRecord list is empty");
    }

    ExecutorService executorService = Executors.newCachedThreadPool();
    List<Future<NotificationHandleResult<Message, Boolean>>> resultList = new ArrayList<Future<NotificationHandleResult<Message, Boolean>>>();
    for (final S3EventMessageRecord s3EventMessageRecord : recordList) {
        Future<NotificationHandleResult<Message, Boolean>> future = executorService.submit(
                new SQSNotificationHandlerForThumborCallable(message, s3EventMessageRecord, generatePicture));
        resultList.add(future);
    }

    List<NotificationHandleResult<Message, Boolean>> resutList = new ArrayList<NotificationHandleResult<Message, Boolean>>();
    //??
    for (Future<NotificationHandleResult<Message, Boolean>> fs : resultList) {
        try {
            NotificationHandleResult<Message, Boolean> result = fs.get();
            if (logger.isInfoEnabled()) {
                logger.info("The SQS message record ({}) result to handle is {}", result.getMessageId(),
                        result.getData());
            }
            resutList.add(result);
            if (!result.getData()) {
                break;
            }
        } catch (Exception e) {
            if (logger.isErrorEnabled()) {
                logger.error(e.getMessage(), e);
            }
        } finally {
            //????????
            executorService.shutdown();
        }
    }
    return resutList;
}

From source file:com.bittorrent.mpetazzoni.common.Torrent.java

private static String hashFiles(List<File> files) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(Torrent.PIECE_LENGTH);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        logger.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(),
                threads, (int) (Math.ceil((double) file.length() / Torrent.PIECE_LENGTH)) });

        length += file.length();/*from   www .j av a 2  s .co m*/

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / Torrent.PIECE_LENGTH));
    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:com.o2d.pkayjava.editor.proxy.ProjectManager.java

public void importImagesIntoProject(final Array<FileHandle> files, ProgressHandler progressHandler) {
    if (files == null) {
        return;/*from   w  ww. j  a va  2s.c om*/
    }
    handler = progressHandler;
    currentPercent = 0;
    ExecutorService executor = Executors.newSingleThreadExecutor();
    executor.execute(() -> {
        copyImageFilesForAllResolutionsIntoProject(files, true);
        ResolutionManager resolutionManager = facade.retrieveProxy(ResolutionManager.NAME);
        resolutionManager.rePackProjectImagesForAllResolutions();
    });
    executor.execute(() -> {
        changePercentBy(100 - currentPercent);
        try {
            Thread.sleep(500);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

        handler.progressComplete();
    });
    executor.shutdown();
}

From source file:edu.lternet.pasta.dml.util.DocumentDownloadUtil.java

public InputStream downloadDocument(final String id, final EcogridEndPointInterface endpoint) throws Exception {

    init();//  w ww .j a  v  a2 s .c om

    log.debug("starting the download");

    ExecutorService service = Executors.newSingleThreadExecutor();
    service.execute(new Runnable() {
        public void run() {
            long startTime = System.currentTimeMillis();

            try {
                if (endpoint instanceof AuthenticatedEcogridEndPointInterface) {
                    AuthenticatedQueryServiceGetToStreamClient authenticatedEcogridClient = new AuthenticatedQueryServiceGetToStreamClient(
                            new URL(((AuthenticatedEcogridEndPointInterface) endpoint)
                                    .getMetacatAuthenticatedEcogridEndPoint()));
                    authenticatedEcogridClient.get(id,
                            ((AuthenticatedEcogridEndPointInterface) endpoint).getSessionId(), outputStream);
                } else {
                    QueryServiceGetToStreamClient ecogridClient = new QueryServiceGetToStreamClient(
                            new URL(endpoint.getMetacatEcogridEndPoint()));
                    ecogridClient.get(id, outputStream);
                }
                outputStream.close();

                long endTime = System.currentTimeMillis();
                log.debug((endTime - startTime) + " ms to download document data");

                log.debug("Done downloading id=" + id);

            } catch (Exception e) {
                log.error("Error getting document from ecogrid: " + e.getMessage());
                e.printStackTrace();
            }

        }
    });

    //wait for the download to complete
    service.shutdown();
    service.awaitTermination(0, TimeUnit.SECONDS);

    log.debug("done with the download");

    return inputStream;

}

From source file:com.google.api.ads.adwords.awreporting.processors.onfile.ReportProcessorOnFile.java

private <R extends Report> void processFiles(String mccAccountId, Class<R> reportBeanClass,
        Collection<File> localFiles, ReportDefinitionDateRangeType dateRangeType, String dateStart,
        String dateEnd) {//from ww w  .j  a va2s.  c  o  m

    final CountDownLatch latch = new CountDownLatch(localFiles.size());
    ExecutorService executorService = Executors.newFixedThreadPool(numberOfReportProcessors);

    // Processing Report Local Files
    LOGGER.info(" Processing reports...");

    Stopwatch stopwatch = Stopwatch.createStarted();

    for (File file : localFiles) {
        LOGGER.trace(".");
        try {

            // We need to create a csvToBean and mappingStrategy for each thread
            ModifiedCsvToBean<R> csvToBean = new ModifiedCsvToBean<R>();
            MappingStrategy<R> mappingStrategy = new AnnotationBasedMappingStrategy<R>(reportBeanClass);

            LOGGER.debug("Parsing file: " + file.getAbsolutePath());
            RunnableProcessorOnFile<R> runnableProcesor = new RunnableProcessorOnFile<R>(file, csvToBean,
                    mappingStrategy, dateRangeType, dateStart, dateEnd, mccAccountId, persister,
                    reportRowsSetSize);
            runnableProcesor.setLatch(latch);
            executorService.execute(runnableProcesor);

        } catch (Exception e) {
            LOGGER.error("Ignoring file (Error when processing): " + file.getAbsolutePath());
            e.printStackTrace();
        }
    }

    try {
        latch.await();
    } catch (InterruptedException e) {
        LOGGER.error(e.getMessage());
        e.printStackTrace();
    }
    executorService.shutdown();
    stopwatch.stop();
    LOGGER.info("*** Finished processing all reports in " + (stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000)
            + " seconds ***\n");
}

From source file:com.blacklocus.qs.worker.util.log.SamplingQSLogServiceTest.java

@Test
public void testSampledLifeCycle() throws InterruptedException {

    // With these parameters, by far most logger interactions should be filtered out, very few sampled in.
    final int numThreads = 64, iterations = 200, processingJitterMaxMs = 16, noSoonerThanMs = 100;

    final Set<String> sampledTaskIds = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());

    final QSLogService logService = Mockito.mock(QSLogService.class);
    // track which logging interactions were allowed through (sampled in)
    Mockito.doAnswer(new Answer() {
        @Override//from  w w w  .j a  v a2 s .co m
        public Object answer(InvocationOnMock invocation) throws Throwable {
            sampledTaskIds.add(((QSTaskModel) invocation.getArguments()[0]).taskId);
            return null;
        }
    }).when(logService).startedTask(Matchers.any(QSTaskModel.class));
    Mockito.doAnswer(new Answer() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            sampledTaskIds.add(((QSLogModel) invocation.getArguments()[0]).taskId);
            return null; //TODO jason
        }
    }).when(logService).log(Matchers.any(QSLogModel.class));
    Mockito.doAnswer(new Answer() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            sampledTaskIds.add(((QSTaskModel) invocation.getArguments()[0]).taskId);
            return null; //TODO jason
        }
    }).when(logService).completedTask(Matchers.any(QSTaskModel.class));

    Predicate<QSTaskModel> taskPredicate = SamplingPredicates.noSoonerThan(noSoonerThanMs,
            TimeUnit.MILLISECONDS);
    final QSLogService sampledLogService = new SamplingQSLogService(logService, taskPredicate);

    long startNs = System.nanoTime();
    ExecutorService threads = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        threads.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                LOG.debug("Thread start {}", Thread.currentThread().getName());
                for (int i = 0; i < iterations; i++) {

                    String taskId = UUID.randomUUID().toString();

                    // simulate task processing, some have logs, some don't, processing time varies between each step
                    QSTaskModel task = new QSTaskModel();
                    task.taskId = taskId;
                    Thread.sleep(RandomUtils.nextInt(processingJitterMaxMs));

                    sampledLogService.startedTask(task);
                    Thread.sleep(RandomUtils.nextInt(processingJitterMaxMs));

                    // random number of associated logs [0, 2]
                    for (int j = RandomUtils.nextInt(2); j > 0; j--) {
                        QSLogModel log = new QSLogModel();
                        log.taskId = taskId;
                        sampledLogService.log(log);
                        Thread.sleep(RandomUtils.nextInt(processingJitterMaxMs));
                    }

                    sampledLogService.completedTask(task);
                }
                LOG.debug("Thread end {}", Thread.currentThread().getName());
                return null;

            }
        });
    }
    threads.shutdown();
    threads.awaitTermination(1, TimeUnit.MINUTES);
    long endNs = System.nanoTime();

    // Theoretical maximum number of sampled in task logging
    long durationMs = TimeUnit.NANOSECONDS.toMillis(endNs - startNs);
    long expectedMax = durationMs / noSoonerThanMs + 1; // +1 for time@0: sampled in
    LOG.debug("Run duration: {}ms  no sooner than: {}ms", durationMs, noSoonerThanMs);
    LOG.debug("Expected max sampled in: {}  Actually sampled: {}", expectedMax, sampledTaskIds.size());
    Assert.assertTrue(expectedMax >= sampledTaskIds.size());
}

From source file:ga.rugal.jpt.common.tracker.common.Torrent.java

private static String hashFiles(List<File> files, int pieceLenght) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(pieceLenght);
    List<Future<String>> results = new LinkedList<>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        LOG.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(), threads,
                (int) (Math.ceil((double) file.length() / pieceLenght)) });

        length += file.length();/*www . j  a v  a  2  s  . c  om*/

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    LOG.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / pieceLenght));
    LOG.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:org.eclipse.rdf4j.http.server.ProtocolTest.java

/**
 * Test for SES-1861//from   w w w .jav a 2  s.  c  om
 * 
 * @throws Exception
 */
@Test
public void testConcurrentNamespaceUpdates() throws Exception {
    int limitCount = 1000;
    int limitPrefix = 50;

    Random prng = new Random();

    // String repositoryLocation =
    // Protocol.getRepositoryLocation("http://localhost:8080/openrdf-sesame",
    // "Test-NativeStore");
    String repositoryLocation = TestServer.REPOSITORY_URL;

    ExecutorService threadPool = Executors.newFixedThreadPool(20,
            new ThreadFactoryBuilder().setNameFormat("rdf4j-protocoltest-%d").build());

    for (int count = 0; count < limitCount; count++) {
        final int number = count;
        final int i = prng.nextInt(limitPrefix);
        final String prefix = "prefix" + i;
        final String ns = "http://example.org/namespace" + i;

        final String location = Protocol.getNamespacePrefixLocation(repositoryLocation, prefix);

        Runnable runner = new Runnable() {

            public void run() {
                try {
                    if (number % 2 == 0) {
                        putNamespace(location, ns);
                    } else {
                        deleteNamespace(location);
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                    fail("Failed in test: " + number);
                }
            }
        };
        threadPool.execute(runner);
    }
    threadPool.shutdown();
    threadPool.awaitTermination(30000, TimeUnit.MILLISECONDS);
    threadPool.shutdownNow();
}