Example usage for java.util.concurrent LinkedBlockingQueue put

List of usage examples for java.util.concurrent LinkedBlockingQueue put

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingQueue put.

Prototype

public void put(E e) throws InterruptedException 

Source Link

Document

Inserts the specified element at the tail of this queue, waiting if necessary for space to become available.

Usage

From source file:au.org.ala.spatial.analysis.layers.LayerDistanceIndex.java

/**
 * @param threadcount    number of threads to run analysis.
 * @param onlyThesePairs array of distances to run as fieldId1 + " " +
 *                       fieldId2 where fieldId1.compareTo(fieldId2) &lt 0 or null for all missing
 *                       distances.//from w ww .j a  va  2 s.c  o m
 * @throws InterruptedException
 */
public void occurrencesUpdate(int threadcount, String[] onlyThesePairs) throws InterruptedException {

    //create distances file if it does not exist.
    File layerDistancesFile = new File(IntersectConfig.getAlaspatialOutputPath() + LAYER_DISTANCE_FILE);
    if (!layerDistancesFile.exists()) {
        FileWriter fw = null;
        try {
            fw = new FileWriter(layerDistancesFile);
            fw.flush();
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        } finally {
            if (fw != null) {
                try {
                    fw.close();
                } catch (Exception e) {
                    logger.error(e.getMessage(), e);
                }
            }
        }
    }

    Map<String, Double> map = loadDistances();

    LinkedBlockingQueue<String> todo = new LinkedBlockingQueue();

    if (onlyThesePairs != null && onlyThesePairs.length > 0) {
        for (String s : onlyThesePairs) {
            todo.add(s);
        }
    } else {
        //find all environmental layer analysis files
        File root = new File(IntersectConfig.getAlaspatialOutputPath());
        File[] dirs = root.listFiles(new FileFilter() {

            @Override
            public boolean accept(File pathname) {
                return pathname != null && pathname.isDirectory();
            }
        });

        HashMap<String, String> domains = new HashMap<String, String>();
        for (File dir : dirs) {
            //iterate through files so we get everything
            File[] files = new File(dir.getPath()).listFiles(new FileFilter() {

                @Override
                public boolean accept(File pathname) {
                    return pathname.getName().endsWith(".grd") && pathname.getName().startsWith("el");
                }
            });

            for (int i = 0; i < files.length; i++) {
                for (int j = i + 1; j < files.length; j++) {
                    String file1 = files[i].getName().replace(".grd", "");
                    String file2 = files[j].getName().replace(".grd", "");

                    //only operate on file names that are valid fields
                    if (Client.getFieldDao().getFieldById(file1) != null
                            && Client.getFieldDao().getFieldById(file2) != null) {

                        String domain1 = domains.get(file1);
                        if (domain1 == null) {
                            String pid1 = Client.getFieldDao().getFieldById(file1).getSpid();
                            domain1 = Client.getLayerDao().getLayerById(Integer.parseInt(pid1)).getdomain();
                            domains.put(file1, domain1);
                        }
                        String domain2 = domains.get(file2);
                        if (domain2 == null) {
                            String pid2 = Client.getFieldDao().getFieldById(file2).getSpid();
                            domain2 = Client.getLayerDao().getLayerById(Integer.parseInt(pid2)).getdomain();
                            domains.put(file2, domain2);
                        }

                        String key = (file1.compareTo(file2) < 0) ? file1 + " " + file2 : file2 + " " + file1;

                        //domain test
                        if (isSameDomain(parseDomain(domain1), parseDomain(domain2))) {
                            if (!map.containsKey(key) && !todo.contains(key)) {
                                todo.put(key);
                            }
                        }
                    }
                }
            }
        }
    }

    LinkedBlockingQueue<String> toDisk = new LinkedBlockingQueue<String>();
    CountDownLatch cdl = new CountDownLatch(todo.size());
    CalcThread[] threads = new CalcThread[threadcount];
    for (int i = 0; i < threadcount; i++) {
        threads[i] = new CalcThread(cdl, todo, toDisk);
        threads[i].start();
    }

    ToDiskThread toDiskThread = new ToDiskThread(
            IntersectConfig.getAlaspatialOutputPath() + LAYER_DISTANCE_FILE, toDisk);
    toDiskThread.start();

    cdl.await();

    for (int i = 0; i < threadcount; i++) {
        threads[i].interrupt();
    }

    toDiskThread.interrupt();
}

From source file:org.commoncrawl.service.pagerank.slave.PageRankUtils.java

public static void distributeRank(final PRValueMap valueMap, final Path outlinksFile,
        final boolean outlinksIsRemote, File localOutputDir, String remoteOutputDir, int thisNodeIdx,
        int nodeCount, int iterationNumber, final ProgressAndCancelCheckCallback progressCallback)
        throws IOException {

    final Configuration conf = CrawlEnvironment.getHadoopConfig();

    Vector<PRValueOutputStream> outputStreamVector = new Vector<PRValueOutputStream>();

    // allocate a queue ... 
    final LinkedBlockingQueue<OutlinkItem> queue = new LinkedBlockingQueue<OutlinkItem>(20000);

    try {/*from   w ww  .j  av a 2  s  . co  m*/

        // start the loader thread ... 
        Thread loaderThread = new Thread(new Runnable() {

            final BytesWritable key = new BytesWritable();
            final BytesWritable value = new BytesWritable();

            final DataInputBuffer keyStream = new DataInputBuffer();
            final DataInputBuffer valueStream = new DataInputBuffer();

            @Override
            public void run() {
                LOG.info("Opening Outlinks File at:" + outlinksFile);
                SequenceFile.Reader reader = null;
                try {

                    FileSystem fsForOutlinksFile = null;
                    if (outlinksIsRemote) {
                        fsForOutlinksFile = CrawlEnvironment.getDefaultFileSystem();
                    } else {
                        fsForOutlinksFile = FileSystem.getLocal(conf);
                    }

                    FileStatus outlinksFileStatus = fsForOutlinksFile.getFileStatus(outlinksFile);
                    long bytesToReadTotal = (outlinksFileStatus != null) ? outlinksFileStatus.getLen() : 0;

                    reader = new SequenceFile.Reader(fsForOutlinksFile, outlinksFile, conf);
                    OutlinkItem item = new OutlinkItem();
                    int itemCount = 0;
                    boolean isCancelled = false;
                    while (!isCancelled && reader.next(key, value)) {

                        keyStream.reset(key.getBytes(), 0, key.getLength());
                        valueStream.reset(value.getBytes(), 0, value.getLength());

                        //populate item from data 
                        readURLFPFromStream(keyStream, item.targetFingerprint);
                        item.urlCount = readURLFPAndCountFromStream(valueStream, item.sourceFingerprint);

                        try {
                            long blockTimeStart = System.currentTimeMillis();
                            queue.put(item);
                            long blockTimeEnd = System.currentTimeMillis();
                        } catch (InterruptedException e) {
                        }
                        item = new OutlinkItem();

                        if (itemCount++ % 10000 == 0 && progressCallback != null) {

                            float percentComplete = (float) reader.getPosition() / (float) bytesToReadTotal;
                            if (progressCallback.updateProgress(percentComplete)) {
                                LOG.info("Cancel check callback returned true.Cancelling outlink item load");
                                isCancelled = true;
                            }
                        }
                    }
                    item.sourceFingerprint = null;
                    item.targetFingerprint = null;

                    // add empty item 
                    try {
                        if (!isCancelled) {
                            queue.put(item);
                        } else {
                            queue.put(new OutlinkItem(new IOException("Operation Cancelled")));
                        }
                    } catch (InterruptedException e) {
                    }

                } catch (IOException e) {
                    // add error item to queue.
                    try {
                        queue.put(new OutlinkItem(e));
                    } catch (InterruptedException e1) {
                    }
                } finally {
                    if (reader != null)
                        try {
                            reader.close();
                        } catch (IOException e) {
                        }
                }
            }

        });

        loaderThread.start();

        // first things first ... initialize output stream vector
        FileSystem fileSystem = buildDistributionOutputStreamVector(true,
                getOutlinksBaseName(thisNodeIdx, iterationNumber), localOutputDir, remoteOutputDir, thisNodeIdx,
                nodeCount, outputStreamVector);

        try {
            // open outlinks file .
            LOG.info("Iterating Items in Outlinks File and Writing Test Value");

            int itemCount = 0;
            int totalOutlinkCount = 0;
            int iterationOutlinkCount = 0;
            long iterationStart = System.currentTimeMillis();
            long timeStart = iterationStart;

            boolean done = false;

            ArrayList<OutlinkItem> items = new ArrayList<OutlinkItem>();
            // start iterating outlinks 
            while (!done) {

                //OutlinkItem item = null;

                //try {
                long waitTimeStart = System.currentTimeMillis();
                queue.drainTo(items);
                long waitTimeEnd = System.currentTimeMillis();
                //} catch (InterruptedException e) {
                //}

                for (OutlinkItem item : items) {
                    if (item.error != null) {
                        LOG.info(
                                "Loader Thread Returned Error:" + CCStringUtils.stringifyException(item.error));
                        throw item.error;
                    } else if (item.sourceFingerprint == null) {
                        LOG.info("Loader Thread Indicated EOF via emtpy item");
                        done = true;
                    } else {
                        ++itemCount;

                        /*
                        LOG.info("SourceFP-DomainHash:" + item.sourceFingerprint.getDomainHash() + " URLHash:" + item.sourceFingerprint.getUrlHash() 
                              + " PartitionIdx:" + ((item.sourceFingerprint.hashCode() & Integer.MAX_VALUE) % CrawlEnvironment.PR_NUMSLAVES) );
                        */

                        // now get pr value for fingerprint (random seek in memory here!!!)
                        float prValue = valueMap.getPRValue(item.sourceFingerprint)
                                / (float) Math.max(item.urlCount, 1);

                        // write value out 
                        int nodeIndex = (item.targetFingerprint.hashCode() & Integer.MAX_VALUE) % nodeCount;
                        outputStreamVector.get(nodeIndex).writePRValue(item.targetFingerprint,
                                item.sourceFingerprint, prValue);

                        if (itemCount % 10000 == 0) {

                            long timeEnd = System.currentTimeMillis();
                            int milliseconds = (int) (timeEnd - iterationStart);

                            LOG.info("Distribute PR for 10000 Items with:" + iterationOutlinkCount
                                    + " Outlinks Took:" + milliseconds + " Milliseconds" + " QueueCount:"
                                    + queue.size());

                            iterationStart = System.currentTimeMillis();
                            totalOutlinkCount += iterationOutlinkCount;
                            iterationOutlinkCount = 0;
                        }

                    }
                }
                items.clear();
            }

            totalOutlinkCount += iterationOutlinkCount;

            LOG.info("Distribute Finished for a total of:" + itemCount + " Items with:" + totalOutlinkCount
                    + " Outlinks Took:" + (System.currentTimeMillis() - timeStart) + " Milliseconds");

            LOG.info("Waiting for Loader Thread to Die");
            try {
                loaderThread.join();
            } catch (InterruptedException e) {
            }
            LOG.info("Loader Thread Died - Moving on...");
        } finally {

            for (PRValueOutputStream info : outputStreamVector) {

                if (info != null) {
                    info.close(false);
                }
            }

            if (fileSystem != null) {
                fileSystem.close();
            }
        }
    } catch (IOException e) {
        LOG.error("Exception caught while distributing outlinks:" + CCStringUtils.stringifyException(e));
        throw e;
    }
}

From source file:com.groksolutions.grok.mobile.service.GrokDataSyncService.java

/**
 * Loads metric data from the server//from  ww w .ja v a2  s  .  co m
 *
 * @param metricId (optional) The metric Id to get the data. If metricId is {@code null} then
 *                 loads data for all metrics at once.
 * @param from     return records from this date
 * @param to       return records up to this date
 * @see HTMClient#getMetricData
 */
private void loadMetricData(final String metricId, final long from, final long to)
        throws HTMException, IOException {

    if (getClient() == null) {
        Log.w(TAG, "Not connected to any server yet");
        return;
    }
    final CoreDatabase database = HTMITApplication.getDatabase();

    // Blocking queue holding metric data waiting to be saved to the
    // database. This queue will be filled by the HTMClient as it downloads
    // the metric data and it will be emptied by the databaseTask as is
    // saves the data to the database
    final LinkedBlockingQueue<MetricData> pending = new LinkedBlockingQueue<>(
            MAX_PENDING_METRIC_DATA_IO_BUFFER);

    // Background task used save metric data to the database. This task will
    // wait for metric data to arrive from the server and save them to the
    // database in batches until it finds the end of the queue marked by
    // METRIC_DATA_EOF or it times out after 60 seconds
    final Future<?> databaseTask = getService().getIOThreadPool().submit(new Runnable() {
        @Override
        public void run() {

            // Make the batch size 1 hour for all metrics or one week for
            // single metric
            int batchSize = metricId == null ? DataUtils.MILLIS_PER_HOUR : 24 * 7 * DataUtils.MILLIS_PER_HOUR;

            // Save metrics in batches, 24 hours at the time
            final List<MetricData> batch = new ArrayList<>();

            // Tracks batch timestamp. Once the metric timestamp is greater
            // than the batch timestamp, a new batch is created
            long batchTimestamp = 0;

            try {
                // Process all pending metric data until the METRIC_DATA_EOF
                // is found or a timeout is reached
                MetricData metricData;
                while ((metricData = pending.poll(60, TimeUnit.SECONDS)) != METRIC_DATA_EOF
                        && metricData != null) {
                    // Add metric data to batch regardless of the timestamp.
                    // At this point we may receive stale metric data with
                    // lower timestamp after we receive the latest data with
                    // the current timestamp. As a side effect, you may see
                    // gaps in the data as described in MER-1524
                    batch.add(metricData);
                    // Process batches
                    if (metricData.getTimestamp() > batchTimestamp) {
                        // Calculate next batch timestamp
                        batchTimestamp = metricData.getTimestamp() + batchSize;
                        if (database.addMetricDataBatch(batch)) {
                            Log.d(TAG, "Saving " + batch.size() + " new records");
                            // Notify receivers new data has arrived
                            fireMetricDataChangedEvent();
                        }
                        batch.clear();
                    }
                }
                // Last batch
                if (!batch.isEmpty()) {
                    if (database.addMetricDataBatch(batch)) {
                        Log.d(TAG, "Received " + batch.size() + " records");
                        // Notify receivers new data has arrived
                        fireMetricDataChangedEvent();
                    }
                }
            } catch (InterruptedException e) {
                Log.w(TAG, "Interrupted while loading metric data");
            }
        }
    });

    try {
        // Get new data from server
        getClient().getMetricData(metricId, new Date(from), new Date(to),
                new HTMClient.DataCallback<MetricData>() {
                    @Override
                    public boolean onData(MetricData metricData) {
                        // enqueue data for saving
                        try {
                            Metric metric = database.getMetric(metricData.getMetricId());
                            if (metric == null) {
                                Log.w(TAG, "Received data for unknown metric:" + metricData.getMetricId());
                                return true;
                            }
                            pending.put(metricData);
                        } catch (InterruptedException e) {
                            pending.clear();
                            Log.w(TAG, "Interrupted while loading metric data");
                            return false;
                        }
                        return true;
                    }
                });
        // Mark the end of the records
        pending.add(METRIC_DATA_EOF);
        // Wait for the database task to complete
        databaseTask.get();
    } catch (InterruptedException e) {
        Log.w(TAG, "Interrupted while loading metric data");
    } catch (ExecutionException e) {
        Log.e(TAG, "Failed to load metric data", e);
    }
}

From source file:com.YOMPsolutions.YOMP.mobile.service.YOMPDataSyncService.java

/**
 * Loads metric data from the server//  w w w . jav a2  s  .c o  m
 *
 * @param metricId (optional) The metric Id to get the data. If metricId is {@code null} then
 *                 loads data for all metrics at once.
 * @param from     return records from this date
 * @param to       return records up to this date
 * @see com.numenta.core.service.YOMPClient#getMetricData
 */
private void loadMetricData(final String metricId, final long from, final long to)
        throws YOMPException, IOException {

    if (getClient() == null) {
        Log.w(TAG, "Not connected to any server yet");
        return;
    }
    final CoreDatabase database = YOMPApplication.getDatabase();

    // Blocking queue holding metric data waiting to be saved to the
    // database. This queue will be filled by the YOMPClient as it downloads
    // the metric data and it will be emptied by the databaseTask as is
    // saves the data to the database
    final LinkedBlockingQueue<MetricData> pending = new LinkedBlockingQueue<>(
            MAX_PENDING_METRIC_DATA_IO_BUFFER);

    // Background task used save metric data to the database. This task will
    // wait for metric data to arrive from the server and save them to the
    // database in batches until it finds the end of the queue marked by
    // METRIC_DATA_EOF or it times out after 60 seconds
    final Future<?> databaseTask = getService().getIOThreadPool().submit(new Runnable() {
        @Override
        public void run() {

            // Make the batch size 1 hour for all metrics or one week for
            // single metric
            int batchSize = metricId == null ? DataUtils.MILLIS_PER_HOUR : 24 * 7 * DataUtils.MILLIS_PER_HOUR;

            // Save metrics in batches, 24 hours at the time
            final List<MetricData> batch = new ArrayList<>();

            // Tracks batch timestamp. Once the metric timestamp is greater
            // than the batch timestamp, a new batch is created
            long batchTimestamp = 0;

            try {
                // Process all pending metric data until the METRIC_DATA_EOF
                // is found or a timeout is reached
                MetricData metricData;
                while ((metricData = pending.poll(60, TimeUnit.SECONDS)) != METRIC_DATA_EOF
                        && metricData != null) {
                    // Add metric data to batch regardless of the timestamp.
                    // At this point we may receive stale metric data with
                    // lower timestamp after we receive the latest data with
                    // the current timestamp. As a side effect, you may see
                    // gaps in the data as described in MER-1524
                    batch.add(metricData);
                    // Process batches
                    if (metricData.getTimestamp() > batchTimestamp) {
                        // Calculate next batch timestamp
                        batchTimestamp = metricData.getTimestamp() + batchSize;
                        if (database.addMetricDataBatch(batch)) {
                            Log.d(TAG, "Saving " + batch.size() + " new records");
                            // Notify receivers new data has arrived
                            fireMetricDataChangedEvent();
                        }
                        batch.clear();
                    }
                }
                // Last batch
                if (!batch.isEmpty()) {
                    if (database.addMetricDataBatch(batch)) {
                        Log.d(TAG, "Received " + batch.size() + " records");
                        // Notify receivers new data has arrived
                        fireMetricDataChangedEvent();
                    }
                }
            } catch (InterruptedException e) {
                Log.w(TAG, "Interrupted while loading metric data");
            }
        }
    });

    try {
        // Get new data from server
        getClient().getMetricData(metricId, new Date(from), new Date(to),
                new YOMPClient.DataCallback<MetricData>() {
                    @Override
                    public boolean onData(MetricData metricData) {
                        // enqueue data for saving
                        try {
                            Metric metric = database.getMetric(metricData.getMetricId());
                            if (metric == null) {
                                Log.w(TAG, "Received data for unknown metric:" + metricData.getMetricId());
                                return true;
                            }
                            pending.put(metricData);
                        } catch (InterruptedException e) {
                            pending.clear();
                            Log.w(TAG, "Interrupted while loading metric data");
                            return false;
                        }
                        return true;
                    }
                });
        // Mark the end of the records
        pending.add(METRIC_DATA_EOF);
        // Wait for the database task to complete
        databaseTask.get();
    } catch (InterruptedException e) {
        Log.w(TAG, "Interrupted while loading metric data");
    } catch (ExecutionException e) {
        Log.e(TAG, "Failed to load metric data", e);
    }
}