Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters, the default thread factory and the default rejected execution handler.

Usage

From source file:com.android.camera.one.v2.OneCameraZslImpl.java

/**
 * Instantiates a new camera based on Camera 2 API.
 *
 * @param device The underlying Camera 2 device.
 * @param characteristics The device's characteristics.
 * @param pictureSize the size of the final image to be taken.
 */// w ww. j a va2  s .  co m
OneCameraZslImpl(CameraDevice device, CameraCharacteristics characteristics, Size pictureSize) {
    Log.v(TAG, "Creating new OneCameraZslImpl");

    mDevice = device;
    mCharacteristics = characteristics;
    mLensRange = LensRangeCalculator.getDiopterToRatioCalculator(characteristics);
    mDirection = new CameraDirectionProvider(mCharacteristics);
    mFullSizeAspectRatio = calculateFullSizeAspectRatio(characteristics);

    mCameraThread = new HandlerThread("OneCamera2");
    // If this thread stalls, it will delay viewfinder frames.
    mCameraThread.setPriority(Thread.MAX_PRIORITY);
    mCameraThread.start();
    mCameraHandler = new Handler(mCameraThread.getLooper());

    mCameraListenerThread = new HandlerThread("OneCamera2-Listener");
    mCameraListenerThread.start();
    mCameraListenerHandler = new Handler(mCameraListenerThread.getLooper());

    // TODO: Encoding on multiple cores results in preview jank due to
    // excessive GC.
    int numEncodingCores = CameraUtil.getNumCpuCores();
    mImageSaverThreadPool = new ThreadPoolExecutor(numEncodingCores, numEncodingCores, 10, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>());

    mCaptureManager = new ImageCaptureManager(MAX_CAPTURE_IMAGES, mCameraListenerHandler,
            mImageSaverThreadPool);
    mCaptureManager.setCaptureReadyListener(new ImageCaptureManager.CaptureReadyListener() {
        @Override
        public void onReadyStateChange(boolean capturePossible) {
            mReadyStateManager.setInput(ReadyStateRequirement.CAPTURE_MANAGER_READY, capturePossible);
        }
    });

    // Listen for changes to auto focus state and dispatch to
    // mFocusStateListener.
    mCaptureManager.addMetadataChangeListener(CaptureResult.CONTROL_AF_STATE,
            new ImageCaptureManager.MetadataChangeListener() {
                @Override
                public void onImageMetadataChange(Key<?> key, Object oldValue, Object newValue,
                        CaptureResult result) {
                    FocusStateListener listener = mFocusStateListener;
                    if (listener != null) {
                        listener.onFocusStatusUpdate(AutoFocusHelper.stateFromCamera2State(
                                result.get(CaptureResult.CONTROL_AF_STATE)), result.getFrameNumber());
                    }
                }
            });

    // Allocate the image reader to store all images received from the
    // camera.
    if (pictureSize == null) {
        // TODO The default should be selected by the caller, and
        // pictureSize should never be null.
        pictureSize = getDefaultPictureSize();
    }
    mCaptureImageReader = ImageReader.newInstance(pictureSize.getWidth(), pictureSize.getHeight(),
            sCaptureImageFormat, MAX_CAPTURE_IMAGES);

    mCaptureImageReader.setOnImageAvailableListener(mCaptureManager, mCameraHandler);
    mMediaActionSound.load(MediaActionSound.SHUTTER_CLICK);
}

From source file:edu.cornell.med.icb.goby.modes.SortMode.java

/**
 * Sort the alignment./*from  w  ww.  jav  a2 s  .c  om*/
 *
 * @throws java.io.IOException error reading / writing
 */
@Override
public void execute() throws IOException {
    final String threadId = String.format("%02d", Thread.currentThread().getId());
    if (splitSize <= 0) {
        final long allocatedHeapSize = Runtime.getRuntime().totalMemory();
        final long freeInHeap = Runtime.getRuntime().freeMemory();
        final long maxHeapSize = Runtime.getRuntime().maxMemory();
        final long freeMemory = maxHeapSize - allocatedHeapSize + freeInHeap;//Util.availableMemory();

        splitSize = (long) (freeMemory * memoryPercentageForWork)
                / (long) ((numThreads > 0 ? numThreads : 1) * splitSizeScalingFactor);
        LOG.info(String.format("Maximum memory is %s. Using a split-size of %s",
                ICBStringUtils.humanMemorySize(freeMemory), ICBStringUtils.humanMemorySize(splitSize)));
    }

    final File entriesFile = new File(basename + ".entries");
    if (!entriesFile.exists()) {
        System.err.println("Could not locate alignment .entries file " + entriesFile.toString());
        return;
    }

    final long fileSize = entriesFile.length();

    // Reduce the number of processors by one, as one thread is used by this running program
    // and it will be utilized since we've chosen CallerRunsPolicy
    LOG.debug(String.format("sort-large will run with %d threads (0 == no thread pool)", numThreads));
    final AlignmentReader reader = new AlignmentReaderImpl(basename);
    try {
        reader.readHeader();
        if (reader.isSorted()) {
            LOG.warn("Warning: The input alignment is already sorted.");
        }
    } finally {
        reader.close();
    }
    if (numThreads > 0) {
        executorService = new ThreadPoolExecutor(numThreads, // core thread pool size
                numThreads, // maximum thread pool size
                10, // time to wait before resizing pool
                TimeUnit.MINUTES, new LinkedBlockingQueue<Runnable>());
        //new ArrayBlockingQueue<Runnable>(additionalThreads, true));
        //new ThreadPoolExecutor.CallerRunsPolicy()*/);
    }

    // Setup splits and start first pass sort
    LOG.debug("Splitting file and sorting all splits");
    long numberOfSplits = 0;
    long splitStart = 0;
    boolean lastSplit = false;
    boolean firstSort = true;

    progressSplitSort = new ProgressLogger(LOG, "split-sorts");
    progressSplitSort.displayFreeMemory = true;

    int count = 0;
    ObjectArrayList<Runnable> splits = new ObjectArrayList<Runnable>();
    while (!lastSplit) {
        long splitEnd = splitStart + splitSize;
        if (splitEnd >= fileSize - 1) {
            splitEnd = fileSize - 1;
            lastSplit = true;
        }
        final SortMergeSplit split = new SortMergeSplit(splitStart, splitEnd);
        numberOfSplits++;
        splits.add(sortSplit(split, firstSort));
        firstSort = false;
        splitStart = splitEnd;

    }
    LOG.info(String.format("[%s] Split file into %d pieces", threadId, numberOfSplits));

    progressSplitSort.expectedUpdates = numberOfSplits;
    progressSplitSort.start();
    for (Runnable toRun : splits) {
        if (executorService != null) {
            executorService.submit(toRun);
        } else {
            toRun.run();
        }
    }
    while (numSplitsCompleted.get() != numberOfSplits) {
        // Wait a bit for tasks to finish before finding more to submit
        if (!exceptions.isEmpty()) {
            break;
        }
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    }
    progressSplitSort.done();

    progressMergeSort = new ProgressLogger(LOG, "merges");
    progressMergeSort.displayFreeMemory = true;
    progressMergeSort.expectedUpdates = numberOfSplits;
    progressMergeSort.start();
    // Subsequent sorts
    boolean lastMerge = false;
    boolean done = false;
    while (!done) {

        if (!exceptions.isEmpty()) {
            break;
        }

        // Move any completed sorts back into the splitsToMerge queue
        while (true) {
            final SortMergeSplit sortedSplit = sortedSplits.poll();
            if (sortedSplit != null) {
                splitsToMerge.add(sortedSplit);
                numSortMergesRunning.decrementAndGet();
                splitsToMergeSize.incrementAndGet();
            } else {
                break;
            }
        }
        int splitsToMergeSizeLocal = splitsToMergeSize.get();
        if (lastMerge && splitsToMergeSizeLocal == 1) {
            // We're done
            break;
        }

        final int numSplitsForMerge;
        if (splitsToMergeSizeLocal == numberOfSplits && numberOfSplits <= filesPerMerge) {
            numSplitsForMerge = (int) numberOfSplits;
            lastMerge = true;
        } else if (splitsToMergeSizeLocal == 0) {
            // Nothing to sort this iteration
            numSplitsForMerge = 0;
        } else if (splitsToMergeSizeLocal == 1) {
            // Just one thing to sort, but it's not then complete merge yet. Wait.
            numSplitsForMerge = 0;
        } else if (splitsToMergeSizeLocal > filesPerMerge) {
            numSplitsForMerge = filesPerMerge;
        } else {
            // Equal to or less than filesPerMerge. Perhaps the last merge?
            final List<SortMergeSplitFileRange> ranges = mergeMultiSplitRangeLists(splitsToMerge);
            if (ranges.size() == 1 && ranges.get(0).isRange(0, fileSize - 1)) {
                // Last merge.
                lastMerge = true;
                numSplitsForMerge = splitsToMergeSizeLocal;
            } else if (splitsToMergeSizeLocal == filesPerMerge) {
                // We have enough to merge, but it's not the last merge
                numSplitsForMerge = splitsToMergeSizeLocal;
            } else {
                // We don't have enough to merge and it's not the last merge
                numSplitsForMerge = 0;
            }
        }

        if (numSplitsForMerge > 0) {
            final List<SortMergeSplit> toMerge = new ArrayList<SortMergeSplit>(numSplitsForMerge);
            for (int i = 0; i < numSplitsForMerge; i++) {
                splitsToMergeSizeLocal = splitsToMergeSize.decrementAndGet();
                toMerge.add(splitsToMerge.poll());
            }
            LOG.debug(String.format("[%s] %d items in queue to sort after removing %d for sorting", threadId,
                    splitsToMergeSizeLocal, numSplitsForMerge));
            mergeSplits(toMerge, lastMerge);

        } else {
            // Wait a bit for tasks to finish before finding more to submit
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
            }
        }
    }

    if (executorService != null) {
        LOG.debug(String.format("[%s] Waiting for threads to finish.", threadId));
        // accept no new tasks, but wait for all of the executor threads to finish :
        executorService.shutdown();
        try {
            while (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
            }
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    }

    progressMergeSort.stop();

    if (!filesToDelete.isEmpty()) {
        // These files weren't deleted after merge for some reason. We'll try again one more time.
        while (true) {
            final File cleanupFile = filesToDelete.poll();
            if (cleanupFile == null) {
                break;
            } else {
                deleteFile(cleanupFile, false);
            }
        }
    }

    if (exceptions.isEmpty()) {

        System.err.println("Sort completed");

        final SortMergeSplit fullFile = splitsToMerge.poll();
        LOG.info(String.format("%s made up from %d splits", fullFile, fullFile.numFiles));
        LOG.info(String.format("Took %d secondary sort/merges", numMergesExecuted.get()));
    } else {
        LOG.error("Potentially multiple exceptions follow");
        for (final Throwable t : exceptions) {
            LOG.error(t);
        }
    }

}

From source file:de.tudarmstadt.lt.seg.app.Segmenter.java

private void run_parallel() throws Exception {

    InputStream in = System.in;
    if (!"-".equals(_filename_in))
        in = new FileInputStream(_filename_in);
    Stream<String> liter = new BufferedReader(new InputStreamReader(in, Charset.defaultCharset())).lines();

    ThreadLocal<ISentenceSplitter> sentenceSplitter = ThreadLocal.withInitial(() -> {
        try {//from   w w w.j a  v a2s. co m
            return newSentenceSplitter();
        } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
            throw new RuntimeException(e);
        }
    });
    ThreadLocal<ITokenizer> tokenizer = ThreadLocal.withInitial(() -> {
        try {
            return newTokenizer();
        } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
            throw new RuntimeException(e);
        }
    });

    final PrintWriter[] w = new PrintWriter[_parallelism];
    // init writers
    for (int i = 0; i < _parallelism; i++) {
        OutputStream out = System.out;
        if (!"-".equals(_filename_out)) {
            out = new FileOutputStream(String.format("%s_%d", _filename_out, i));
        }
        w[i] = new PrintWriter(new OutputStreamWriter(out, Charset.defaultCharset()));
    }

    BlockingQueue<Runnable> queue = new ArrayBlockingQueue<Runnable>(_parallelism * 2, true);
    ExecutorService es = new ThreadPoolExecutor(_parallelism, _parallelism, 0L, TimeUnit.MILLISECONDS, queue);

    AtomicLong lc = new AtomicLong(0);
    liter.forEach((line) -> {
        // don't try to submit new threads, wait until the thread queue has some capacity again
        while (queue.remainingCapacity() == 0)
            try {
                Thread.sleep(10);
            } catch (InterruptedException e) {
                /**/}
        es.submit(() -> {
            final long docid = lc.incrementAndGet();
            if (docid % 1000 == 0)
                System.err.format("Processing line %d ('%s')%n", docid, _filename_in);
            final int w_i = (int) (docid % _parallelism);
            split_and_tokenize(new StringReader(line.trim()), String.format("%s:%d", _filename_in, docid),
                    sentenceSplitter.get(), tokenizer.get(), _level_filter, _level_normalize, _merge_types,
                    _merge_tokens, _separator_sentence, _separator_token, _separator_desc, w[w_i]);

        });
    });
    es.shutdown();
    es.awaitTermination(Integer.MAX_VALUE, TimeUnit.DAYS);

    // TODO: the stream parallelism version does not work because it submits too many threads at once
    //      AtomicLong lc = new AtomicLong(0);
    //      ForkJoinPool forkJoinPool = new ForkJoinPool(_parallelism);
    //      forkJoinPool.submit(() -> 
    //         liter.parallel().forEach((line) -> {
    //            final long docid = lc.incrementAndGet();
    //            if(docid % 1000 == 0)
    //               System.err.format("Processing line %d ('%s')%n", docid, _filename_in);
    //   
    //            String l = line.replace("\\t", "\t").replace("\\n", "\n");
    //            split_and_tokenize(
    //                  new StringReader(l),
    //                  String.format("%s:%d", _filename_in, docid),
    //                  sentenceSplitter.get(), 
    //                  tokenizer.get(), 
    //                  _level_filter,
    //                  _level_normalize,
    //                  _merge_types,
    //                  _merge_tokens,
    //                  _separator_sentence,
    //                  _separator_token,
    //                  _separator_desc,
    //                  w);
    //      })).get();

}

From source file:com.taobao.android.builder.tools.sign.LocalSignHelper.java

private static ApkCreatorFactory createFactory() {
    ZFileOptions options = new ZFileOptions();
    options.setNoTimestamps(true);//from  w  w w.  ja va  2  s  .  co  m
    options.setCoverEmptySpaceUsingExtraField(true);
    ThreadPoolExecutor compressionExecutor = new ThreadPoolExecutor(0, /* Number of always alive threads */
            2, 100, TimeUnit.MILLISECONDS, new LinkedBlockingDeque<>());
    options.setCompressor(
            new BestAndDefaultDeflateExecutorCompressor(compressionExecutor, options.getTracker(), 1.0));
    options.setAutoSortFiles(true);
    return new ApkZFileCreatorFactory(options);

}

From source file:org.nuxeo.ecm.automation.server.jaxrs.batch.BatchManagerFixture.java

@Test
public void testFileConcurrency() throws Exception {

    // Initialize a batch
    BatchManager bm = Framework.getService(BatchManager.class);
    String batchId = bm.initBatch();

    // Add files concurrently
    int nbFiles = 100;
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(5, 5, 500L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(nbFiles + 1));

    for (int i = 0; i < nbFiles; i++) {
        final String fileIndex = String.valueOf(i);
        tpe.submit(new Runnable() {
            @Override// ww w  . j a  va 2s.  c o m
            public void run() {
                try {
                    bm.addStream(batchId, fileIndex,
                            new ByteArrayInputStream(
                                    ("SomeContent_" + fileIndex).getBytes(StandardCharsets.UTF_8)),
                            fileIndex + ".txt", "text/plain");
                } catch (IOException e) {
                    fail(e.getMessage());
                }
            }
        });
    }

    tpe.shutdown();
    boolean finish = tpe.awaitTermination(20, TimeUnit.SECONDS);
    assertTrue("timeout", finish);

    // Check blobs
    List<Blob> blobs = bm.getBlobs(batchId);
    assertEquals(nbFiles, blobs.size());
    // Test indexes 0, 9, 99, ..., nbFiles - 1
    int nbDigits = (int) (Math.log10(nbFiles) + 1);
    int divisor = nbFiles;
    for (int i = 0; i < nbDigits; i++) {
        int fileIndex = nbFiles / divisor - 1;
        assertEquals(fileIndex + ".txt", blobs.get(fileIndex).getFilename());
        assertEquals("SomeContent_" + fileIndex, blobs.get(fileIndex).getString());
        divisor = divisor / 10;
    }

    // Check storage size
    TransientStore ts = bm.getTransientStore();
    assertTrue(((AbstractTransientStore) ts).getStorageSize() > 12 * nbFiles);

    // Clean batch
    bm.clean(batchId);
    assertEquals(ts.getStorageSizeMB(), 0);
}

From source file:com.emc.ecs.smart.SmartUploader.java

/**
 * Performs a segmented upload to ECS using the SmartClient and the ECS byte range PUT extensions.  The upload
 * URL will be parsed and the hostname will be enumerated in DNS to see if it contains multiple 'A' records.  If
 * so, those will be used to populate the software load balancer.
 *//*from   w  ww. j ava 2  s. com*/
private void doSegmentedUpload() {
    try {
        long start = System.currentTimeMillis();
        fileSize = Files.size(fileToUpload);

        // Verify md5Save file path is legit.
        PrintWriter pw = null;
        try {
            if (saveMD5 != null) {
                pw = new PrintWriter(saveMD5);
            }
        } catch (IOException e) {
            System.err.println("Invalid path specified to save local file MD5: " + e.getMessage());
            System.exit(3);
        }

        // Figure out which segment size to use.
        if (segmentSize == -1) {
            if (fileSize >= LARGE_SEGMENT) {
                segmentSize = LARGE_SEGMENT;
            } else {
                segmentSize = SMALL_SEGMENT;
            }
        }

        // Expand the host
        String host = uploadUrl.getHost();
        InetAddress addr = InetAddress.getByName(host);
        List<String> ipAddresses = new ArrayList<>();
        try {
            ipAddresses = getIPAddresses(host);
        } catch (NamingException e) {
            LogMF.warn(l4j, "Could not resolve hostname: {0}: {1}.  Using as-is.", host, e);
            ipAddresses.add(host);
        }
        LogMF.info(l4j, "Host {0} resolves to {1}", host, ipAddresses);

        // Initialize the SmartClient
        SmartConfig smartConfig = new SmartConfig(ipAddresses.toArray(new String[ipAddresses.size()]));
        // We don't need to update the host list
        smartConfig.setHostUpdateEnabled(false);

        // Configure the load balancer
        Client pingClient = SmartClientFactory.createStandardClient(smartConfig,
                new URLConnectionClientHandler());
        pingClient.addFilter(new HostnameVerifierFilter(uploadUrl.getHost()));
        LoadBalancer loadBalancer = smartConfig.getLoadBalancer();
        EcsHostListProvider hostListProvider = new EcsHostListProvider(pingClient, loadBalancer, null, null);
        hostListProvider.setProtocol(uploadUrl.getProtocol());
        if (uploadUrl.getPort() != -1) {
            hostListProvider.setPort(uploadUrl.getPort());
        }
        smartConfig.setHostListProvider(hostListProvider);

        client = SmartClientFactory.createSmartClient(smartConfig, new URLConnectionClientHandler());

        // Add our retry handler
        client.addFilter(new HostnameVerifierFilter(uploadUrl.getHost()));
        client.addFilter(new MD5CheckFilter());
        client.addFilter(new RetryFilter(retryDelay, retryCount));

        // Create a FileChannel for the upload
        fileChannel = new RandomAccessFile(fileToUpload.toFile(), "r").getChannel();

        System.out.printf("Starting upload at %s\n", new Date().toString());
        // The first upload is done without a range to create the initial object.
        doUploadSegment(0);

        // See how many more segments we have
        int segmentCount = (int) (fileSize / (long) segmentSize);
        long remainder = fileSize % segmentSize;
        if (remainder != 0) {
            // Additional bytes at end
            segmentCount++;
        }

        if (segmentCount > 1) {
            // Build a thread pool to upload the segments.
            ThreadPoolExecutor executor = new ThreadPoolExecutor(threadCount, threadCount, 15, TimeUnit.SECONDS,
                    new LinkedBlockingQueue<Runnable>());

            for (int i = 1; i < segmentCount; i++) {
                executor.execute(new SegmentUpload(i));
            }

            // Wait for completion
            while (true) {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
                if (failed) {
                    // Abort!
                    l4j.warn("Error detected, terminating upload");
                    executor.shutdownNow();
                    break;
                }
                if (executor.getQueue().isEmpty()) {
                    l4j.info("All tasks complete, awaiting shutdown");
                    try {
                        executor.shutdown();
                        executor.awaitTermination(1, TimeUnit.MINUTES);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                    break;
                }
            }
        }

        // Done!
        long elapsed = System.currentTimeMillis() - start;
        printRate(fileSize, elapsed);

        // Release buffers
        LogMF.debug(l4j, "buffer count at end: {0}", buffers.size());
        buffers = new LinkedList<>();
        System.out.printf("\nUpload completed at %s\n", new Date().toString());

        // Verify
        if (verifyUrl != null) {

            System.out.printf("starting remote MD5...\n");

            String objectMD5 = computeObjectMD5();
            System.out.printf("Object MD5 = %s\n", objectMD5);

            System.out.printf("Remote MD5 complete at %s\nStarting local MD5\n", new Date().toString());

            // At this point we don't need the clients anymore.
            l4j.debug("Shutting down SmartClient");
            SmartClientFactory.destroy(client);
            SmartClientFactory.destroy(pingClient);

            String fileMD5 = standardChecksum ? computeFileMD5Standard() : computeFileMD5();
            System.out.printf("\nFile on disk MD5 = %s\n", fileMD5);
            System.out.printf("Local MD5 complete at %s\n", new Date().toString());
            if (!fileMD5.equals(objectMD5)) {
                System.err.printf("ERROR: file MD5 does not match object MD5! %s != %s", fileMD5, objectMD5);
                System.exit(10);
            }

            if (saveMD5 != null && pw != null) {
                pw.write(fileMD5);
                pw.close();
            }

            System.out.printf("\nObject verification passed!\n");
        }

    } catch (IOException e) {
        e.printStackTrace();
        System.exit(4);
    }
}

From source file:org.nuxeo.ecm.automation.server.jaxrs.batch.BatchManagerFixture.java

@Test
public void testChunkConcurrency() throws Exception {

    // Initialize a batch
    BatchManager bm = Framework.getService(BatchManager.class);
    String batchId = bm.initBatch();

    // Add chunks concurrently
    int nbChunks = 100;
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(5, 5, 500L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(nbChunks + 1));

    for (int i = 0; i < nbChunks; i++) {
        final int chunkIndex = i;
        tpe.submit(new Runnable() {
            @Override/*  w ww. ja v  a  2 s.  c o m*/
            public void run() {
                try {
                    bm.addStream(batchId, "0",
                            new ByteArrayInputStream(
                                    ("SomeChunkContent_" + chunkIndex + " ").getBytes(StandardCharsets.UTF_8)),
                            nbChunks, chunkIndex, "MyChunkedFile.txt", "text/plain", 0);
                } catch (IOException e) {
                    fail(e.getMessage());
                }
            }
        });
    }

    tpe.shutdown();
    boolean finish = tpe.awaitTermination(20, TimeUnit.SECONDS);
    assertTrue("timeout", finish);

    // Check chunked file
    Blob blob = bm.getBlob(batchId, "0");
    assertNotNull(blob);
    int nbOccurrences = 0;
    Pattern p = Pattern.compile("SomeChunkContent_");
    Matcher m = p.matcher(blob.getString());
    while (m.find()) {
        nbOccurrences++;
    }
    assertEquals(nbChunks, nbOccurrences);

    // Check storage size
    TransientStore ts = bm.getTransientStore();
    assertTrue(((AbstractTransientStore) ts).getStorageSize() > 17 * nbChunks);

    // Clean batch
    bm.clean(batchId);
    assertEquals(ts.getStorageSizeMB(), 0);
}

From source file:com.vmware.photon.controller.core.Main.java

/**
 * Creates a new Deployer Service Group.
 *
 * @param deployerConfig//from  w ww . j  a  va  2s .  c o m
 * @param apiFeServerSet
 * @param cloudStoreServerSet
 * @param httpClient
 * @return
 */
private static DeployerServiceGroup createDeployerServiceGroup(PhotonControllerConfig photonControllerConfig,
        DeployerConfig deployerConfig, ServerSet apiFeServerSet, ServerSet cloudStoreServerSet,
        CloseableHttpAsyncClient httpClient) {

    logger.info("Creating Deployer Service Group");

    // Set containers config to deployer config
    try {
        deployerConfig.setContainersConfig(new ServiceConfigurator()
                .generateContainersConfig(deployerConfig.getDeployerContext().getConfigDirectory()));
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    final ApiClientFactory apiClientFactory = new ApiClientFactory(apiFeServerSet, httpClient,
            deployerConfig.getDeployerContext().getSharedSecret(),
            deployerConfig.getDeployerContext().isAuthEnabled());

    /**
     * The blocking queue associated with the thread pool executor service
     * controls the rejection policy for new work items: a bounded queue, such as
     * an ArrayBlockingQueue, will cause new work items to be rejected (and thus
     * failed) when the queue length is reached. A LinkedBlockingQueue, which is
     * unbounded, is used here in order to enable the submission of an arbitrary
     * number of work items since this is the pattern expected for the deployer
     * (a large number of work items arrive all at once, and then no more).
     */
    final BlockingQueue<Runnable> blockingQueue = new LinkedBlockingDeque<>();
    final ListeningExecutorService listeningExecutorService = MoreExecutors
            .listeningDecorator(new ThreadPoolExecutor(deployerConfig.getDeployerContext().getCorePoolSize(),
                    deployerConfig.getDeployerContext().getMaximumPoolSize(),
                    deployerConfig.getDeployerContext().getKeepAliveTime(), TimeUnit.SECONDS, blockingQueue));

    final HttpFileServiceClientFactory httpFileServiceClientFactory = new com.vmware.photon.controller.core.Main.HttpFileServiceClientFactoryImpl();
    final AuthHelperFactory authHelperFactory = new com.vmware.photon.controller.core.Main.AuthHelperFactoryImpl();
    final HealthCheckHelperFactory healthCheckHelperFactory = new com.vmware.photon.controller.core.Main.HealthCheckHelperFactoryImpl();
    final ServiceConfiguratorFactory serviceConfiguratorFactory = new com.vmware.photon.controller.core.Main.ServiceConfiguratorFactoryImpl();
    final ZookeeperClientFactory zookeeperServerSetBuilderFactory = new com.vmware.photon.controller.core.Main.ZookeeperClientFactoryImpl();
    final HostManagementVmAddressValidatorFactory hostManagementVmAddressValidatorFactory = new com.vmware.photon.controller.core.Main.HostManagementVmAddressValidatorFactoryImpl();

    final ClusterManagerFactory clusterManagerFactory = new ClusterManagerFactory(listeningExecutorService,
            httpClient, cloudStoreServerSet,
            Paths.get(deployerConfig.getDeployerContext().getScriptDirectory(), CLUSTER_SCRIPTS_DIRECTORY)
                    .toString());

    return new DeployerServiceGroup(deployerConfig.getDeployerContext(), apiClientFactory,
            deployerConfig.getContainersConfig(), listeningExecutorService, httpFileServiceClientFactory,
            authHelperFactory, healthCheckHelperFactory, serviceConfiguratorFactory,
            zookeeperServerSetBuilderFactory, hostManagementVmAddressValidatorFactory, clusterManagerFactory);
}

From source file:de.tu_dortmund.ub.data.dswarm.TaskProcessingUnit.java

private static void executeIngests(final String[] files, final String dataModelID, final String resourceID,
        final String projectName, final String serviceName, final Integer engineThreads,
        final Properties config) throws Exception {

    // create job list
    final LinkedList<Callable<String>> filesToPush = new LinkedList<>();

    int cnt = 0;//from  w ww .  j  a va 2s .  com
    for (final String file : files) {

        cnt++;
        filesToPush.add(new Ingest(config, file, dataModelID, resourceID, projectName, cnt));
    }

    // work on jobs
    final ThreadPoolExecutor pool = new ThreadPoolExecutor(engineThreads, engineThreads, 0L, TimeUnit.SECONDS,
            new LinkedBlockingQueue<>());

    execute(serviceName, filesToPush, pool);
}

From source file:org.batoo.jpa.benchmark.BenchmarkTest.java

private ThreadPoolExecutor postProcess() {
    while (!this.samplingFinished) {
        try {/*from w w w. ja v a2  s. com*/
            Thread.currentThread();
            Thread.sleep(100);
        } catch (final InterruptedException e) {
        }
    }

    final LinkedBlockingQueue<Runnable> processingQueue = new LinkedBlockingQueue<Runnable>(
            this.profilingQueue);

    final int nThreads = Runtime.getRuntime().availableProcessors();
    final ThreadPoolExecutor executor = new ThreadPoolExecutor(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS,
            processingQueue);
    executor.prestartAllCoreThreads();

    return executor;
}