Example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue.

Prototype

public ArrayBlockingQueue(int capacity) 

Source Link

Document

Creates an ArrayBlockingQueue with the given (fixed) capacity and default access policy.

Usage

From source file:com.alibaba.otter.node.etl.load.loader.db.FileLoadAction.java

public void afterPropertiesSet() throws Exception {
    executor = new ThreadPoolExecutor(poolSize, poolSize, 0L, TimeUnit.MILLISECONDS,
            new ArrayBlockingQueue<Runnable>(poolSize * 4), new NamedThreadFactory(WORKER_NAME),
            new ThreadPoolExecutor.CallerRunsPolicy());
}

From source file:com.linkedin.pinot.integration.tests.HybridClusterScanComparisonIntegrationTest.java

protected void runTestLoop(Callable<Object> testMethod, boolean useMultipleThreads) throws Exception {
    // Clean up the Kafka topic
    // TODO jfim: Re-enable this once PINOT-2598 is fixed
    // purgeKafkaTopicAndResetRealtimeTable();

    List<Pair<File, File>> enabledRealtimeSegments = new ArrayList<>();

    // Sort the realtime segments based on their segment name so they get added from earliest to latest
    TreeMap<File, File> sortedRealtimeSegments = new TreeMap<File, File>(new Comparator<File>() {
        @Override//  ww  w. ja  va  2  s .c  om
        public int compare(File o1, File o2) {
            return _realtimeAvroToSegmentMap.get(o1).getName()
                    .compareTo(_realtimeAvroToSegmentMap.get(o2).getName());
        }
    });
    sortedRealtimeSegments.putAll(_realtimeAvroToSegmentMap);

    for (File avroFile : sortedRealtimeSegments.keySet()) {
        enabledRealtimeSegments.add(Pair.of(avroFile, sortedRealtimeSegments.get(avroFile)));

        if (useMultipleThreads) {
            _queryExecutor = new ThreadPoolExecutor(4, 4, 5, TimeUnit.SECONDS,
                    new ArrayBlockingQueue<Runnable>(50), new ThreadPoolExecutor.CallerRunsPolicy());
        }

        // Push avro for the new segment
        LOGGER.info("Pushing Avro file {} into Kafka", avroFile);
        pushAvroIntoKafka(Collections.singletonList(avroFile), KafkaStarterUtils.DEFAULT_KAFKA_BROKER,
                KAFKA_TOPIC);

        // Configure the scan based comparator to use the distinct union of the offline and realtime segments
        configureScanBasedComparator(enabledRealtimeSegments);

        QueryResponse queryResponse = _scanBasedQueryProcessor.processQuery("select count(*) from mytable");

        int expectedRecordCount = queryResponse.getNumDocsScanned();
        waitForRecordCountToStabilizeToExpectedCount(expectedRecordCount,
                System.currentTimeMillis() + getStabilizationTimeMs());

        // Run the actual tests
        LOGGER.info("Running queries");
        testMethod.call();

        if (useMultipleThreads) {
            if (_nQueriesRead == -1) {
                _queryExecutor.shutdown();
                _queryExecutor.awaitTermination(5, TimeUnit.MINUTES);
            } else {
                int totalQueries = _failedQueries.get() + _successfulQueries.get();
                while (totalQueries < _nQueriesRead) {
                    LOGGER.info("Completed " + totalQueries + " out of " + _nQueriesRead + " - waiting");
                    Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS);
                    totalQueries = _failedQueries.get() + _successfulQueries.get();
                }
                if (totalQueries > _nQueriesRead) {
                    throw new RuntimeException("Executed " + totalQueries + " more than " + _nQueriesRead);
                }
                _queryExecutor.shutdown();
            }
        }
        int totalQueries = _failedQueries.get() + _successfulQueries.get();
        doDisplayStatus(totalQueries);

        // Release resources
        _scanBasedQueryProcessor.close();
        _compareStatusFileWriter.write("Status after push of " + avroFile + ":" + System.currentTimeMillis()
                + ":Executed " + _nQueriesRead + " queries, " + _failedQueries + " failures,"
                + _emptyResults.get() + " empty results\n");
    }
}

From source file:com.sittinglittleduck.DirBuster.Manager.java

private void createTheThreads() {
    // workers = new Worker[workerCount];

    workers.removeAllElements();//  w  w w.java 2  s .c o  m
    parseWorkers.removeAllElements();

    for (int i = 0; i < workerCount; i++) {
        workers.addElement(new Worker(i, this));
        // workers[i] = new Worker(this, i);
        // tpes.execute(workers[i]);
    }

    // create the htmlparse threads
    for (int i = 0; i < workerCount; i++) {
        parseWorkers.addElement(new HTMLparse(this));
    }
    // work queue
    workQueue = new ArrayBlockingQueue<WorkUnit>(workerCount * 3);

    // dir to be processed
    dirQueue = new ArrayBlockingQueue<DirToCheck>(100000);

    // queue to hold a list of items to parsed
    parseQueue = new ArrayBlockingQueue<HTMLparseWorkUnit>(200000);

    timer = new Timer();

    // add the fist string on to the queue
    try {
        Vector tempext = extToUse;
        // extToUse.clone().
        dirQueue.put(new DirToCheck(startPoint, tempext));
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}

From source file:org.apache.solr.handler.dataimport.XPathEntityProcessor.java

private Iterator<Map<String, Object>> getRowIterator(final Reader data, final String s) {
    //nothing atomic about it. I just needed a StongReference
    final AtomicReference<Exception> exp = new AtomicReference<>();
    final BlockingQueue<Map<String, Object>> blockingQueue = new ArrayBlockingQueue<>(blockingQueueSize);
    final AtomicBoolean isEnd = new AtomicBoolean(false);
    final AtomicBoolean throwExp = new AtomicBoolean(true);
    publisherThread = new Thread() {
        @Override// w ww .j  av  a2s .c  o m
        public void run() {
            try {
                xpathReader.streamRecords(data, (record, xpath) -> {
                    if (isEnd.get()) {
                        throwExp.set(false);
                        //To end the streaming . otherwise the parsing will go on forever
                        //though consumer has gone away
                        throw new RuntimeException("BREAK");
                    }
                    Map<String, Object> row;
                    try {
                        row = readRow(record, xpath);
                    } catch (Exception e) {
                        isEnd.set(true);
                        return;
                    }
                    offer(row);
                });
            } catch (Exception e) {
                if (throwExp.get())
                    exp.set(e);
            } finally {
                closeIt(data);
                if (!isEnd.get()) {
                    offer(END_MARKER);
                }
            }
        }

        private void offer(Map<String, Object> row) {
            try {
                while (!blockingQueue.offer(row, blockingQueueTimeOut, blockingQueueTimeOutUnits)) {
                    if (isEnd.get())
                        return;
                    LOG.debug("Timeout elapsed writing records.  Perhaps buffer size should be increased.");
                }
            } catch (InterruptedException e) {
                return;
            } finally {
                synchronized (this) {
                    notifyAll();
                }
            }
        }
    };

    publisherThread.start();

    return new Iterator<Map<String, Object>>() {
        private Map<String, Object> lastRow;
        int count = 0;

        @Override
        public boolean hasNext() {
            return !isEnd.get();
        }

        @Override
        public Map<String, Object> next() {
            Map<String, Object> row;

            do {
                try {
                    row = blockingQueue.poll(blockingQueueTimeOut, blockingQueueTimeOutUnits);
                    if (row == null) {
                        LOG.debug("Timeout elapsed reading records.");
                    }
                } catch (InterruptedException e) {
                    LOG.debug("Caught InterruptedException while waiting for row.  Aborting.");
                    isEnd.set(true);
                    return null;
                }
            } while (row == null);

            if (row == END_MARKER) {
                isEnd.set(true);
                if (exp.get() != null) {
                    String msg = "Parsing failed for xml, url:" + s + " rows processed in this xml:" + count;
                    if (lastRow != null)
                        msg += " last row in this xml:" + lastRow;
                    if (ABORT.equals(onError)) {
                        wrapAndThrow(SEVERE, exp.get(), msg);
                    } else if (SKIP.equals(onError)) {
                        wrapAndThrow(DataImportHandlerException.SKIP, exp.get());
                    } else {
                        LOG.warn(msg, exp.get());
                    }
                }
                return null;
            }
            count++;
            return lastRow = row;
        }

        @Override
        public void remove() {
            /*no op*/
        }
    };

}

From source file:org.apache.solr.handler.dataimport.processor.XPathEntityProcessor.java

private Iterator<Map<String, Object>> getRowIterator(final Reader data, final String s) {
    //nothing atomic about it. I just needed a StongReference
    final AtomicReference<Exception> exp = new AtomicReference<Exception>();
    final BlockingQueue<Map<String, Object>> blockingQueue = new ArrayBlockingQueue<Map<String, Object>>(
            blockingQueueSize);//from   w  ww . j  a  v a  2 s .c  om
    final AtomicBoolean isEnd = new AtomicBoolean(false);
    final AtomicBoolean throwExp = new AtomicBoolean(true);
    publisherThread = new Thread() {
        @Override
        public void run() {
            try {
                xpathReader.streamRecords(data, new XPathRecordReader.Handler() {
                    @Override
                    @SuppressWarnings("unchecked")
                    public void handle(Map<String, Object> record, String xpath) {
                        if (isEnd.get()) {
                            throwExp.set(false);
                            //To end the streaming . otherwise the parsing will go on forever
                            //though consumer has gone away
                            throw new RuntimeException("BREAK");
                        }
                        Map<String, Object> row;
                        try {
                            row = readRow(record, xpath);
                        } catch (final Exception e) {
                            isEnd.set(true);
                            return;
                        }
                        offer(row);
                    }
                });
            } catch (final Exception e) {
                if (throwExp.get())
                    exp.set(e);
            } finally {
                closeIt(data);
                if (!isEnd.get()) {
                    offer(END_MARKER);
                }
            }
        }

        private void offer(Map<String, Object> row) {
            try {
                while (!blockingQueue.offer(row, blockingQueueTimeOut, blockingQueueTimeOutUnits)) {
                    if (isEnd.get())
                        return;
                    LOG.debug("Timeout elapsed writing records.  Perhaps buffer size should be increased.");
                }
            } catch (final InterruptedException e) {
                return;
            } finally {
                synchronized (this) {
                    notifyAll();
                }
            }
        }
    };

    publisherThread.start();

    return new Iterator<Map<String, Object>>() {
        private Map<String, Object> lastRow;
        int count = 0;

        @Override
        public boolean hasNext() {
            return !isEnd.get();
        }

        @Override
        public Map<String, Object> next() {
            Map<String, Object> row;

            do {
                try {
                    row = blockingQueue.poll(blockingQueueTimeOut, blockingQueueTimeOutUnits);
                    if (row == null) {
                        LOG.debug("Timeout elapsed reading records.");
                    }
                } catch (final InterruptedException e) {
                    LOG.debug("Caught InterruptedException while waiting for row.  Aborting.");
                    isEnd.set(true);
                    return null;
                }
            } while (row == null);

            if (row == END_MARKER) {
                isEnd.set(true);
                if (exp.get() != null) {
                    String msg = "Parsing failed for xml, url:" + s + " rows processed in this xml:" + count;
                    if (lastRow != null)
                        msg += " last row in this xml:" + lastRow;
                    if (ABORT.equals(onError)) {
                        wrapAndThrow(SEVERE, exp.get(), msg);
                    } else if (SKIP.equals(onError)) {
                        wrapAndThrow(DataImportHandlerException.SKIP, exp.get());
                    } else {
                        LOG.warn(msg, exp.get());
                    }
                }
                return null;
            }
            count++;
            return lastRow = row;
        }

        @Override
        public void remove() {
            /*no op*/
        }
    };

}

From source file:com.bosscs.spark.commons.utils.Utils.java

/**
 * Returns an instance of ThreadPoolExecutor using an bounded queue and blocking when the worker queue is full.
 * @param nThreads thread pool size//from w  w w  . j  a  v  a 2  s . co  m
 * @param queueSize workers queue size
 * @return thread pool executor
 */
public static ExecutorService newBlockingFixedThreadPoolExecutor(int nThreads, int queueSize) {
    BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<>(queueSize);
    RejectedExecutionHandler blockingRejectedExecutionHandler = new RejectedExecutionHandler() {
        @Override
        public void rejectedExecution(Runnable task, ThreadPoolExecutor executor) {
            try {
                executor.getQueue().put(task);
            } catch (InterruptedException e) {
            }
        }

    };

    return new ThreadPoolExecutor(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS, blockingQueue,
            blockingRejectedExecutionHandler);
}

From source file:com.evolveum.midpoint.repo.common.task.AbstractSearchIterativeResultHandler.java

public void createWorkerThreads(Task coordinatorTask, OperationResult opResult) {
    Integer threadsCount = getWorkerThreadsCount(coordinatorTask);
    if (threadsCount == null || threadsCount == 0) {
        return; // nothing to do
    }//  w  w  w . j a  v a2  s . c o m

    int queueSize = threadsCount * 2; // actually, size of threadsCount should be sufficient but it doesn't hurt if queue is larger
    requestQueue = new ArrayBlockingQueue<>(queueSize);

    workerSpecificResults = new ArrayList<>(threadsCount);

    for (int i = 0; i < threadsCount; i++) {
        // we intentionally do not put worker specific result under main operation result until the handler is done
        // (because of concurrency issues - adding subresults vs e.g. putting main result into the task)
        OperationResult workerSpecificResult = new OperationResult(
                taskOperationPrefix + ".handleAsynchronously");
        workerSpecificResult.addContext("subtaskIndex", i + 1);
        workerSpecificResults.add(workerSpecificResult);

        Task subtask = coordinatorTask.createSubtask(new WorkerHandler(workerSpecificResult));
        if (isEnableIterationStatistics()) {
            subtask.resetIterativeTaskInformation(null);
        }
        if (isEnableSynchronizationStatistics()) {
            subtask.resetSynchronizationInformation(null);
        }
        if (isEnableActionsExecutedStatistics()) {
            subtask.resetActionsExecutedInformation(null);
        }
        subtask.setCategory(coordinatorTask.getCategory());
        subtask.setResult(new OperationResult(taskOperationPrefix + ".executeWorker",
                OperationResultStatus.IN_PROGRESS, (String) null));
        subtask.setName("Worker thread " + (i + 1) + " of " + threadsCount);
        subtask.startLightweightHandler();
        LOGGER.trace("Worker subtask {} created", subtask);
    }
}

From source file:org.opencastproject.videosegmenter.impl.VideoSegmenterServiceImpl.java

/**
 * Returns the segments for the movie accessible through the frame grabbing control.
 * /*from   w ww  . j a  v  a2s  .c  o m*/
 * @param video
 *          the mpeg-7 video representation
 * @param dsh
 *          the data source handler
 * @return the list of segments
 * @throws IOException
 *           if accessing a frame fails
 * @throws VideoSegmenterException
 *           if segmentation of the video fails
 */
protected List<Segment> segment(Video video, FrameGrabber dsh) throws IOException, VideoSegmenterException {
    List<Segment> segments = new ArrayList<Segment>();

    int t = 1;
    int lastStableImageTime = 0;
    long startOfSegment = 0;
    int currentSceneStabilityCount = 1;
    boolean sceneChangeImminent = true;
    boolean luckyPunchRecovery = false;
    int segmentCount = 1;
    BufferedImage previousImage = null;
    BufferedImage lastStableImage = null;
    BlockingQueue<Buffer> bufferQueue = new ArrayBlockingQueue<Buffer>(stabilityThreshold + 1);
    long durationInSeconds = video.getMediaTime().getMediaDuration().getDurationInMilliseconds() / 1000;
    Segment contentSegment = video.getTemporalDecomposition().createSegment("segment-" + segmentCount);
    ImageComparator icomp = new ImageComparator(changesThreshold);

    // icomp.setStatistics(true);
    // String imagesPath = PathSupport.concat(new String[] {
    // System.getProperty("java.io.tmpdir"),
    // "videosegments",
    // video.getMediaLocator().getMediaURI().toString().replaceAll("\\W", "-")
    // });
    // icomp.saveImagesTo(new File(imagesPath));

    Buffer buf = dsh.getBuffer();
    while (t < durationInSeconds && buf != null && !buf.isEOM()) {
        BufferedImage bufferedImage = ImageUtils.createImage(buf);
        if (bufferedImage == null)
            throw new VideoSegmenterException("Unable to extract image at time " + t);

        logger.trace("Analyzing video at {} s", t);

        // Compare the new image with our previous sample
        boolean differsFromPreviousImage = icomp.isDifferent(previousImage, bufferedImage, t);

        // We found an image that is different compared to the previous one. Let's see if this image remains stable
        // for some time (STABILITY_THRESHOLD) so we can declare a new scene
        if (differsFromPreviousImage) {
            logger.debug("Found differing image at {} seconds", t);

            // If this is the result of a lucky punch (looking ahead STABILITY_THRESHOLD seconds), then we should
            // really start over an make sure we get the correct beginning of the new scene
            if (!sceneChangeImminent && t - lastStableImageTime > 1) {
                luckyPunchRecovery = true;
                previousImage = lastStableImage;
                bufferQueue.add(buf);
                t = lastStableImageTime;
            } else {
                lastStableImageTime = t - 1;
                lastStableImage = previousImage;
                previousImage = bufferedImage;
                currentSceneStabilityCount = 1;
                t++;
            }
            sceneChangeImminent = true;
        }

        // We are looking ahead and everyhting seems to be fine.
        else if (!sceneChangeImminent) {
            fillLookAheadBuffer(bufferQueue, buf, dsh);
            lastStableImageTime = t;
            t += stabilityThreshold;
            previousImage = bufferedImage;
            lastStableImage = bufferedImage;
        }

        // Seems to be the same image. If we have just recently detected a new scene, let's see if we are able to
        // confirm that this is scene is stable (>= STABILITY_THRESHOLD)
        else if (currentSceneStabilityCount < stabilityThreshold) {
            currentSceneStabilityCount++;
            previousImage = bufferedImage;
            t++;
        }

        // Did we find a new scene?
        else if (currentSceneStabilityCount == stabilityThreshold) {
            lastStableImageTime = t;

            long endOfSegment = t - stabilityThreshold - 1;
            long durationms = (endOfSegment - startOfSegment) * 1000L;

            // Create a new segment if this wasn't the first one
            if (endOfSegment > stabilityThreshold) {
                contentSegment.setMediaTime(new MediaRelTimeImpl(startOfSegment * 1000L, durationms));
                contentSegment = video.getTemporalDecomposition().createSegment("segment-" + ++segmentCount);
                segments.add(contentSegment);
                startOfSegment = endOfSegment;
            }

            // After finding a new segment, likelihood of a stable image is good, let's take a look ahead. Since
            // a processor can't seek, we need to store the buffers in between, in case we need to come back.
            fillLookAheadBuffer(bufferQueue, buf, dsh);
            t += stabilityThreshold;
            previousImage = bufferedImage;
            lastStableImage = bufferedImage;
            currentSceneStabilityCount++;
            sceneChangeImminent = false;
            logger.info("Found new scene at {} s", startOfSegment);
        }

        // Did we find a new scene by looking ahead?
        else if (sceneChangeImminent) {
            // We found a scene change by looking ahead. Now we want to get to the exact position
            lastStableImageTime = t;
            previousImage = bufferedImage;
            lastStableImage = bufferedImage;
            currentSceneStabilityCount++;
            t++;
        }

        // Nothing special, business as usual
        else {
            // If things look stable, then let's look ahead as much as possible without loosing information (which is
            // equal to looking ahead STABILITY_THRESHOLD seconds.
            lastStableImageTime = t;
            fillLookAheadBuffer(bufferQueue, buf, dsh);
            t += stabilityThreshold;
            lastStableImage = bufferedImage;
            previousImage = bufferedImage;
        }

        if (luckyPunchRecovery) {
            buf = bufferQueue.poll();
            luckyPunchRecovery = !bufferQueue.isEmpty();
        } else
            buf = dsh.getBuffer();
    }

    // Finish off the last segment
    long startOfSegmentms = startOfSegment * 1000L;
    long durationms = ((long) durationInSeconds - startOfSegment) * 1000;
    contentSegment.setMediaTime(new MediaRelTimeImpl(startOfSegmentms, durationms));
    segments.add(contentSegment);

    // Print summary
    if (icomp.hasStatistics()) {
        NumberFormat nf = NumberFormat.getNumberInstance();
        nf.setMaximumFractionDigits(2);
        logger.info("Image comparison finished with an average change of {}% in {} comparisons",
                nf.format(icomp.getAvgChange()), icomp.getComparisons());
    }

    // Cleanup
    if (icomp.getSavedImagesDirectory() != null) {
        FileUtils.deleteQuietly(icomp.getSavedImagesDirectory());
    }

    return segments;
}

From source file:gdsc.core.clustering.optics.ProjectedMoleculeSpace.java

/**
 * Create random projections, project points and put points into sets of size
 * about minSplitSize/2.// w w  w . j  a v  a2 s.  co  m
 *
 * @param minSplitSize
 *            minimum size for which a point set is further
 *            partitioned (roughly corresponds to minPts in OPTICS)
 */
public void computeSets(int minSplitSize) {
    splitSets = new TurboList<Split>();

    // Edge cases
    if (minSplitSize < 2 || size <= 1)
        return;

    if (size == 2) {
        // No point performing projections and splits
        splitSets.add(new Split(0, new int[] { 0, 1 }));
        return;
    }

    final int dim = 2;

    // FastOPTICS paper states you can use c0*log(N) sets and c1*log(N) projections.
    // The ELKI framework increase this for the number of dimensions. However I have stuck
    // with the original (as it is less so will be faster).
    // Note: In most computer science contexts log is in base 2.
    int nPointSetSplits, nProject1d;

    nPointSetSplits = getNumberOfSplitSets(nSplits, size);
    nProject1d = getNumberOfProjections(nProjections, size);

    // perform O(log N+log dim) splits of the entire point sets projections
    //nPointSetSplits = (int) (logOProjectionConst * log2(size * dim + 1));
    // perform O(log N+log dim) projections of the point set onto a random line
    //nProject1d = (int) (logOProjectionConst * log2(size * dim + 1));

    if (nPointSetSplits < 1 || nProject1d < 1)
        return; // Nothing to do

    // perform projections of points
    float[][] projectedPoints = new float[nProject1d][];

    long time = System.currentTimeMillis();
    setUpProgress(nProject1d);
    if (tracker != null) {
        tracker.log("Computing projections ...");
    }

    // Multi-thread this for speed
    int nThreads = Math.min(this.nThreads, nPointSetSplits);
    final TurboList<Thread> threads = new TurboList<Thread>(nThreads);

    final BlockingQueue<ProjectionJob> projectionJobs = new ArrayBlockingQueue<ProjectionJob>(nThreads * 2);
    final TurboList<ProjectionWorker> projectionWorkers = new TurboList<ProjectionWorker>(nThreads);
    for (int i = 0; i < nThreads; i++) {
        final ProjectionWorker worker = new ProjectionWorker(projectionJobs, projectedPoints);
        final Thread t = new Thread(worker);
        projectionWorkers.addf(worker);
        threads.addf(t);
        t.start();
    }

    // Create random vectors or uniform distribution
    RandomVectorGenerator vectorGen = (useRandomVectors) ? new UnitSphereRandomVectorGenerator(2, rand) : null;
    final double increment = Math.PI / nProject1d;
    for (int i = 0; i < nProject1d; i++) {
        // Create a random unit vector
        double[] currRp;
        if (useRandomVectors) {
            currRp = vectorGen.nextVector();
        } else {
            // For a 2D vector we can just uniformly distribute them around a semi-circle
            currRp = new double[dim];
            double a = i * increment;
            currRp[0] = Math.sin(a);
            currRp[1] = Math.cos(a);
        }
        put(projectionJobs, new ProjectionJob(i, currRp));
    }
    // Finish all the worker threads by passing in a null job
    for (int i = 0; i < nThreads; i++) {
        put(projectionJobs, new ProjectionJob(-1, null));
    }

    // Wait for all to finish
    for (int i = 0; i < nThreads; i++) {
        try {
            threads.get(i).join();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
    threads.clear();

    if (tracker != null) {
        tracker.progress(1);
        long time2 = System.currentTimeMillis();
        tracker.log("Computed projections ... " + Utils.timeToString(time2 - time));
        time = time2;
        tracker.log("Splitting data ...");
    }

    // split entire point set, reuse projections by shuffling them
    int[] proind = Utils.newArray(nProject1d, 0, 1);
    setUpProgress(nPointSetSplits);

    // The splits do not have to be that random so we can use a pseudo random sequence.
    // The sets will be randomly sized between 1 and minSplitSize. Ensure we have enough 
    // numbers for all the splits.
    double expectedSetSize = (1 + minSplitSize) * 0.5;
    int expectedSets = (int) Math.round(size / expectedSetSize);
    pseudoRandom = new TurboRandomGenerator(Math.max(200, minSplitSize + 2 * expectedSets), rand);

    // Multi-thread this for speed
    final BlockingQueue<SplitJob> splitJobs = new ArrayBlockingQueue<SplitJob>(nThreads * 2);
    final TurboList<SplitWorker> splitWorkers = new TurboList<SplitWorker>(nThreads);
    for (int i = 0; i < nThreads; i++) {
        final SplitWorker worker = new SplitWorker(splitJobs, minSplitSize);
        final Thread t = new Thread(worker);
        splitWorkers.addf(worker);
        threads.addf(t);
        t.start();
    }

    for (int i = 0; i < nPointSetSplits; i++) {
        // shuffle projections
        float[][] shuffledProjectedPoints = new float[nProject1d][];
        pseudoRandom.shuffle(proind);
        for (int j = 0; j < nProject1d; j++) {
            shuffledProjectedPoints[j] = projectedPoints[proind[j]];
        }

        // New random generator
        TurboRandomGenerator rand = (TurboRandomGenerator) pseudoRandom.clone();
        rand.setSeed(i);

        put(splitJobs, new SplitJob(i, shuffledProjectedPoints, rand));
    }

    // Finish all the worker threads by passing in a null job
    for (int i = 0; i < nThreads; i++) {
        put(splitJobs, new SplitJob(-1, null, null));
    }

    // Wait for all to finish
    int total = 0;
    for (int i = 0; i < nThreads; i++) {
        try {
            threads.get(i).join();
            total += splitWorkers.get(i).splitSets.size();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
    threads.clear();

    // Merge the split-sets
    splitSets = splitWorkers.get(0).splitSets;
    splitSets.ensureCapacity(total);
    for (int i = 1; i < nThreads; i++)
        splitSets.addAll(splitWorkers.get(i).splitSets);

    if (tracker != null) {
        time = System.currentTimeMillis() - time;
        tracker.log("Split data ... " + Utils.timeToString(time));
        tracker.progress(1);
    }
}

From source file:com.evolveum.midpoint.model.impl.util.AbstractSearchIterativeResultHandler.java

public void createWorkerThreads(Task coordinatorTask, OperationResult opResult) {
    Integer threadsCount = getWorkerThreadsCount(coordinatorTask);
    if (threadsCount == null || threadsCount == 0) {
        return; // nothing to do
    }//ww w .  ja v a 2  s  .c  o m

    int queueSize = threadsCount * 2; // actually, size of threadsCount should be sufficient but it doesn't hurt if queue is larger
    requestQueue = new ArrayBlockingQueue<>(queueSize);

    workerSpecificResults = new ArrayList<>(threadsCount);

    for (int i = 0; i < threadsCount; i++) {
        // we intentionally do not put worker specific result under main operation result until the handler is done
        // (because of concurrency issues - adding subresults vs e.g. putting main result into the task)
        OperationResult workerSpecificResult = new OperationResult(
                taskOperationPrefix + ".handleAsynchronously");
        workerSpecificResult.addContext("subtask", i);
        workerSpecificResults.add(workerSpecificResult);

        Task subtask = coordinatorTask.createSubtask(new WorkerHandler(workerSpecificResult));
        if (isEnableIterationStatistics()) {
            subtask.resetIterativeTaskInformation(null);
        }
        if (isEnableSynchronizationStatistics()) {
            subtask.resetSynchronizationInformation(null);
        }
        if (isEnableActionsExecutedStatistics()) {
            subtask.resetActionsExecutedInformation(null);
        }
        subtask.setCategory(coordinatorTask.getCategory());
        subtask.setResult(new OperationResult(taskOperationPrefix + ".executeWorker",
                OperationResultStatus.IN_PROGRESS, null));
        subtask.setName("Worker thread " + (i + 1) + " of " + threadsCount);
        subtask.startLightweightHandler();
        LOGGER.trace("Worker subtask {} created", subtask);
    }
}