Example usage for java.util.concurrent LinkedBlockingQueue put

List of usage examples for java.util.concurrent LinkedBlockingQueue put

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingQueue put.

Prototype

public void put(E e) throws InterruptedException 

Source Link

Document

Inserts the specified element at the tail of this queue, waiting if necessary for space to become available.

Usage

From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorageTest.java

@Test
public void testConsistencyCheckConcurrentGC() throws Exception {
    final long signalDone = -1;
    final List<Exception> asyncErrors = new ArrayList<>();
    final LinkedBlockingQueue<Long> toCompact = new LinkedBlockingQueue<>();
    final Semaphore awaitingCompaction = new Semaphore(0);

    interleavedStorage.flush();//from  www.  j  a  v  a  2s  . c  o m
    final long lastLogId = entryLogger.getLeastUnflushedLogId();

    final MutableInt counter = new MutableInt(0);
    entryLogger.setCheckEntryTestPoint((ledgerId, entryId, entryLogId, pos) -> {
        if (entryLogId < lastLogId) {
            if (counter.intValue() % 100 == 0) {
                try {
                    toCompact.put(entryLogId);
                    awaitingCompaction.acquire();
                } catch (InterruptedException e) {
                    asyncErrors.add(e);
                }
            }
            counter.increment();
        }
    });

    Thread mutator = new Thread(() -> {
        EntryLogCompactor compactor = new EntryLogCompactor(conf, entryLogger, interleavedStorage,
                entryLogger::removeEntryLog);
        while (true) {
            Long next = null;
            try {
                next = toCompact.take();
                if (next == null || next == signalDone) {
                    break;
                }
                compactor.compact(entryLogger.getEntryLogMetadata(next));
            } catch (BufferedChannelBase.BufferedChannelClosedException e) {
                // next was already removed, ignore
            } catch (Exception e) {
                asyncErrors.add(e);
                break;
            } finally {
                if (next != null) {
                    awaitingCompaction.release();
                }
            }
        }
    });
    mutator.start();

    List<LedgerStorage.DetectedInconsistency> inconsistencies = interleavedStorage
            .localConsistencyCheck(Optional.empty());
    for (LedgerStorage.DetectedInconsistency e : inconsistencies) {
        LOG.error("Found: {}", e);
    }
    Assert.assertEquals(0, inconsistencies.size());

    toCompact.offer(signalDone);
    mutator.join();
    for (Exception e : asyncErrors) {
        throw e;
    }

    if (!conf.isEntryLogPerLedgerEnabled()) {
        Assert.assertNotEquals(0,
                statsProvider.getCounter(BOOKIE_SCOPE + "." + STORAGE_SCRUB_PAGE_RETRIES).get().longValue());
    }
}

From source file:org.apache.bookkeeper.bookie.LedgerCacheTest.java

/**
 * Race where a flush would fail because a garbage collection occurred at
 * the wrong time./*w w  w. j  ava 2 s.  c o m*/
 * {@link https://issues.apache.org/jira/browse/BOOKKEEPER-604}
 */
@Test(timeout = 60000)
public void testFlushDeleteRace() throws Exception {
    newLedgerCache();
    final AtomicInteger rc = new AtomicInteger(0);
    final LinkedBlockingQueue<Long> ledgerQ = new LinkedBlockingQueue<Long>(1);
    final byte[] masterKey = "masterKey".getBytes();
    Thread newLedgerThread = new Thread() {
        public void run() {
            try {
                for (int i = 0; i < 1000 && rc.get() == 0; i++) {
                    ledgerCache.setMasterKey(i, masterKey);
                    ledgerQ.put((long) i);
                }
            } catch (Exception e) {
                rc.set(-1);
                LOG.error("Exception in new ledger thread", e);
            }
        }
    };
    newLedgerThread.start();

    Thread flushThread = new Thread() {
        public void run() {
            try {
                while (true) {
                    Long id = ledgerQ.peek();
                    if (id == null) {
                        continue;
                    }
                    LOG.info("Put entry for {}", id);
                    try {
                        ledgerCache.putEntryOffset((long) id, 1, 0);
                    } catch (Bookie.NoLedgerException nle) {
                        //ignore
                    }
                    ledgerCache.flushLedger(true);
                }
            } catch (Exception e) {
                rc.set(-1);
                LOG.error("Exception in flush thread", e);
            }
        }
    };
    flushThread.start();

    Thread deleteThread = new Thread() {
        public void run() {
            try {
                while (true) {
                    long id = ledgerQ.take();
                    LOG.info("Deleting {}", id);
                    ledgerCache.deleteLedger(id);
                }
            } catch (Exception e) {
                rc.set(-1);
                LOG.error("Exception in delete thread", e);
            }
        }
    };
    deleteThread.start();

    newLedgerThread.join();
    assertEquals("Should have been no errors", rc.get(), 0);

    deleteThread.interrupt();
    flushThread.interrupt();
}

From source file:disko.flow.analyzers.FullRelexAnalyzer.java

public void process(AnalysisContext<TextDocument> ctx, Ports ports) throws InterruptedException {
    if (pool == null)
        init();//from ww w.  ja va2 s .  c o  m
    final InputPort<EntityMaintainer> inputPort = ports.getInput(EntityAnalyzer.ENTITY_CHANNEL);
    final OutputPort<RelexTaskResult> outputPort = ports.getOutput(PARSE_CHANNEL);
    final LinkedBlockingQueue<Future<RelexTaskResult>> futureResults = new LinkedBlockingQueue<Future<RelexTaskResult>>(
            outputPort.getChannel().getCapacity());
    log.debug("Starting LinkGrammarAnalyzer...");
    exec.submit(new Callable<Integer>() {
        public Integer call() throws Exception {
            try {
                log.debug("LinkGrammarAnalyzer from channel + " + inputPort.getChannel());
                for (EntityMaintainer em = inputPort.take(); !inputPort.isEOS(em); em = inputPort.take())
                    submitTask(em, futureResults);
            } catch (Throwable t) {
                log.error("Unable to submit parsing task.", t);
            } finally {
                futureResults.put(new FutureRelexTaskResultEOS());
            }
            return (futureResults.size() - 1);
        }
    });

    try {
        while (true) {
            try {
                Future<RelexTaskResult> futureResult = futureResults.take();
                RelexTaskResult relexTaskResult;
                relexTaskResult = futureResult.get();
                if (relexTaskResult == null)
                    break;
                log.debug("LinkGrammarAnalyzer received " + relexTaskResult.index + ": "
                        + relexTaskResult.result.getParses().size() + " parses of sentences "
                        + relexTaskResult.sentence);
                relexTaskResult.result.setSentence(relexTaskResult.entityMaintainer.getOriginalSentence());
                outputPort.put(relexTaskResult);
            } catch (InterruptedException e) {
                for (Future<RelexTaskResult> future : futureResults) {
                    try {
                        future.cancel(true);
                    } catch (Throwable t) {
                        log.error(t);
                    }
                }
                break;
            }
        }
        for (Future<RelexTaskResult> future : futureResults) {
            future.cancel(true);
        }
    } catch (ExecutionException e) {
        throw new RuntimeException(e);
    } finally {
        outputPort.close();
        /*
         * exec.shutdown(); for (RelexContext context: pool){
         * context.getLinkParserClient().close(); }
         */
        destroy();
    }
}

From source file:com.turn.splicer.SuggestHttpWorker.java

@Override
public String call() throws Exception {
    LinkedBlockingQueue<String> TSDs;

    //TODO: have it implement its own RegionChecker to get hbase locality looking for metric names
    //lets have it just pick a random host
    String hostname = getRandomHost();
    TSDs = HttpWorker.TSDMap.get(hostname);

    if (TSDs == null) {
        LOG.error("We are not running TSDs on regionserver={}. Choosing a random host failed", hostname);
        return "{'error': 'Choice of hostname=" + hostname + " failed.'}";
    }/*from  www .  j a va  2  s.co m*/

    String server = TSDs.take();
    String uri = "http://" + server + "/api/suggest?" + suggestQuery;

    CloseableHttpClient postman = HttpClientBuilder.create().build();
    try {
        HttpGet getRequest = new HttpGet(uri);

        LOG.info("Sending query=" + uri + " to TSD running on host=" + hostname);

        HttpResponse response = postman.execute(getRequest);

        if (response.getStatusLine().getStatusCode() != 200) {
            throw new RuntimeException(
                    "Failed : HTTP error code : " + response.getStatusLine().getStatusCode());
        }

        List<String> dl = IOUtils.readLines(response.getEntity().getContent());
        String result = StringUtils.join(dl, "");
        LOG.info("Result={}", result);

        return result;
    } finally {
        IOUtils.closeQuietly(postman);

        TSDs.put(server);
        LOG.info("Returned {} into the available queue", server);
    }
}

From source file:org.apache.kylin.engine.spark.SparkCubing.java

/** return hfile location */
private String build(JavaRDD<List<String>> javaRDD, final String cubeName, final String segmentId,
        final byte[][] splitKeys) throws Exception {
    CubeInstance cubeInstance = CubeManager.getInstance(KylinConfig.getInstanceFromEnv()).getCube(cubeName);
    CubeDesc cubeDesc = cubeInstance.getDescriptor();
    final CubeSegment cubeSegment = cubeInstance.getSegmentById(segmentId);
    List<TblColRef> baseCuboidColumn = Cuboid.findById(cubeDesc, Cuboid.getBaseCuboidId(cubeDesc)).getColumns();
    final Map<TblColRef, Integer> columnLengthMap = Maps.newHashMap();
    final CubeDimEncMap dimEncMap = cubeSegment.getDimensionEncodingMap();
    for (TblColRef tblColRef : baseCuboidColumn) {
        columnLengthMap.put(tblColRef, dimEncMap.get(tblColRef).getLengthOfEncoding());
    }/*from   w w  w.j ava 2  s  . c o m*/
    final Map<TblColRef, Dictionary<String>> dictionaryMap = Maps.newHashMap();
    for (DimensionDesc dim : cubeDesc.getDimensions()) {
        // dictionary
        for (TblColRef col : dim.getColumnRefs()) {
            if (cubeDesc.getRowkey().isUseDictionary(col)) {
                Dictionary<String> dict = cubeSegment.getDictionary(col);
                if (dict == null) {
                    System.err.println("Dictionary for " + col + " was not found.");
                    continue;
                }
                dictionaryMap.put(col, dict);
                System.out.println("col:" + col + " dictionary size:" + dict.getSize());
            }
        }
    }

    for (MeasureDesc measureDesc : cubeDesc.getMeasures()) {
        FunctionDesc func = measureDesc.getFunction();
        List<TblColRef> colRefs = func.getMeasureType().getColumnsNeedDictionary(func);
        for (TblColRef col : colRefs) {
            dictionaryMap.put(col, cubeSegment.getDictionary(col));
        }
    }

    final JavaPairRDD<byte[], byte[]> javaPairRDD = javaRDD.glom()
            .mapPartitionsToPair(new PairFlatMapFunction<Iterator<List<List<String>>>, byte[], byte[]>() {

                @Override
                public Iterable<Tuple2<byte[], byte[]>> call(Iterator<List<List<String>>> listIterator)
                        throws Exception {
                    long t = System.currentTimeMillis();
                    prepare();

                    final CubeInstance cubeInstance = CubeManager.getInstance(KylinConfig.getInstanceFromEnv())
                            .getCube(cubeName);

                    LinkedBlockingQueue<List<String>> blockingQueue = new LinkedBlockingQueue();
                    System.out.println("load properties finished");
                    IJoinedFlatTableDesc flatDesc = EngineFactory.getJoinedFlatTableDesc(cubeSegment);
                    AbstractInMemCubeBuilder inMemCubeBuilder = new DoggedCubeBuilder(
                            cubeInstance.getDescriptor(), flatDesc, dictionaryMap);
                    final SparkCuboidWriter sparkCuboidWriter = new BufferedCuboidWriter(
                            new DefaultTupleConverter(cubeInstance.getSegmentById(segmentId), columnLengthMap));
                    Executors.newCachedThreadPool()
                            .submit(inMemCubeBuilder.buildAsRunnable(blockingQueue, sparkCuboidWriter));
                    try {
                        while (listIterator.hasNext()) {
                            for (List<String> row : listIterator.next()) {
                                blockingQueue.put(row);
                            }
                        }
                        blockingQueue.put(Collections.<String>emptyList());
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                    System.out.println("build partition cost: " + (System.currentTimeMillis() - t) + "ms");
                    return sparkCuboidWriter.getResult();
                }
            });

    KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv();
    Configuration conf = getConfigurationForHFile(cubeSegment.getStorageLocationIdentifier());
    Path path = new Path(kylinConfig.getHdfsWorkingDirectory(), "hfile_" + UUID.randomUUID().toString());
    Preconditions.checkArgument(!FileSystem.get(conf).exists(path));
    String url = conf.get("fs.defaultFS") + path.toString();
    System.out.println("use " + url + " as hfile");
    List<MeasureDesc> measuresDescs = cubeDesc.getMeasures();
    final int measureSize = measuresDescs.size();
    final String[] dataTypes = new String[measureSize];
    for (int i = 0; i < dataTypes.length; i++) {
        dataTypes[i] = measuresDescs.get(i).getFunction().getReturnType();
    }
    final MeasureAggregators aggs = new MeasureAggregators(measuresDescs);
    writeToHFile2(javaPairRDD, dataTypes, measureSize, aggs, splitKeys, conf, url);
    return url;
}

From source file:org.apache.pulsar.functions.instance.JavaInstanceRunnableProcessTest.java

@BeforeMethod
public void setup() throws Exception {
    mockProducers.clear();/*  w  ww . j a va2  s. c om*/
    mockConsumers.clear();

    functionDetails = FunctionDetails.newBuilder().setAutoAck(true).setClassName(TestFunction.class.getName())
            .addInputs("test-src-topic").setName("test-function").setOutput("test-output-topic")
            .setProcessingGuarantees(ProcessingGuarantees.ATLEAST_ONCE).setTenant("test-tenant")
            .setNamespace("test-namespace").build();

    config = new InstanceConfig();
    config.setFunctionId("test-function-id");
    config.setFunctionVersion("v1");
    config.setInstanceId("test-instance-id");
    config.setMaxBufferedTuples(1000);
    config.setFunctionDetails(functionDetails);

    mockClient = mock(PulsarClientImpl.class);

    // mock FunctionCacheManager
    fnCache = mock(FunctionCacheManager.class);
    doNothing().when(fnCache).registerFunctionInstance(anyString(), anyString(), anyList(), anyList());
    doNothing().when(fnCache).unregisterFunctionInstance(anyString(), anyString());

    ClassLoader clsLoader = JavaInstanceRunnableTest.class.getClassLoader();
    when(fnCache.getClassLoader(anyString())).thenReturn(clsLoader);

    // mock producer & consumer
    when(mockClient.createProducer(anyString(), any(ProducerConfiguration.class)))
            .thenAnswer(invocationOnMock -> {
                String topic = invocationOnMock.getArgumentAt(0, String.class);
                ProducerConfiguration conf = invocationOnMock.getArgumentAt(1, ProducerConfiguration.class);
                String producerName = conf.getProducerName();

                Pair<String, String> pair = Pair.of(topic, producerName);
                ProducerInstance producerInstance = mockProducers.get(pair);
                if (null == producerInstance) {
                    Producer producer = mock(Producer.class);
                    LinkedBlockingQueue<Message> msgQueue = new LinkedBlockingQueue<>();
                    final ProducerInstance instance = new ProducerInstance(producer, msgQueue);
                    producerInstance = instance;
                    when(producer.getProducerName()).thenReturn(producerName);
                    when(producer.getTopic()).thenReturn(topic);
                    when(producer.sendAsync(any(Message.class))).thenAnswer(invocationOnMock1 -> {
                        Message msg = invocationOnMock1.getArgumentAt(0, Message.class);
                        log.info("producer send message {}", msg);

                        CompletableFuture<MessageId> future = new CompletableFuture<>();
                        instance.addSendFuture(future);
                        msgQueue.put(msg);
                        return future;
                    });
                    when(producer.closeAsync()).thenReturn(FutureUtils.Void());

                    mockProducers.put(pair, producerInstance);
                }
                return producerInstance.getProducer();
            });
    when(mockClient.subscribe(anyString(), anyString(), any(ConsumerConfiguration.class)))
            .thenAnswer(invocationOnMock -> {
                String topic = invocationOnMock.getArgumentAt(0, String.class);
                String subscription = invocationOnMock.getArgumentAt(1, String.class);
                ConsumerConfiguration conf = invocationOnMock.getArgumentAt(2, ConsumerConfiguration.class);

                Pair<String, String> pair = Pair.of(topic, subscription);
                ConsumerInstance consumerInstance = mockConsumers.get(pair);
                if (null == consumerInstance) {
                    Consumer consumer = mock(Consumer.class);

                    ConsumerInstance instance = new ConsumerInstance(consumer, conf);
                    consumerInstance = instance;
                    when(consumer.getTopic()).thenReturn(topic);
                    when(consumer.getSubscription()).thenReturn(subscription);
                    when(consumer.acknowledgeAsync(any(Message.class))).thenAnswer(invocationOnMock1 -> {
                        Message msg = invocationOnMock1.getArgumentAt(0, Message.class);
                        log.info("Ack message {} : message id = {}", msg, msg.getMessageId());

                        instance.removeMessage(msg.getMessageId());
                        return FutureUtils.Void();
                    });
                    when(consumer.acknowledgeCumulativeAsync(any(Message.class)))
                            .thenAnswer(invocationOnMock1 -> {
                                Message msg = invocationOnMock1.getArgumentAt(0, Message.class);
                                log.info("Ack message cumulatively message id = {}", msg, msg.getMessageId());

                                instance.removeMessagesBefore(msg.getMessageId());
                                return FutureUtils.Void();
                            });
                    when(consumer.closeAsync()).thenAnswer(invocationOnMock1 -> {
                        mockConsumers.remove(pair, instance);
                        return FutureUtils.Void();
                    });
                    doAnswer(invocationOnMock1 -> {
                        mockConsumers.remove(pair, instance);
                        return null;
                    }).when(consumer).close();

                    mockConsumers.put(pair, consumerInstance);
                }
                return consumerInstance.getConsumer();
            });

    //
    // Mock State Store
    //

    StorageClientBuilder mockBuilder = mock(StorageClientBuilder.class);
    when(mockBuilder.withNamespace(anyString())).thenReturn(mockBuilder);
    when(mockBuilder.withSettings(any(StorageClientSettings.class))).thenReturn(mockBuilder);
    this.mockStorageClient = mock(StorageClient.class);
    when(mockBuilder.build()).thenReturn(mockStorageClient);
    StorageAdminClient adminClient = mock(StorageAdminClient.class);
    when(mockBuilder.buildAdmin()).thenReturn(adminClient);

    PowerMockito.mockStatic(StorageClientBuilder.class);
    PowerMockito.when(StorageClientBuilder.newBuilder()).thenReturn(mockBuilder);

    when(adminClient.getStream(anyString(), anyString()))
            .thenReturn(FutureUtils.value(StreamProperties.newBuilder().build()));
    mockTable = mock(Table.class);
    when(mockStorageClient.openTable(anyString())).thenReturn(FutureUtils.value(mockTable));

    //
    // Mock Function Stats
    //

    mockFunctionStats = spy(new FunctionStats());
    PowerMockito.whenNew(FunctionStats.class).withNoArguments().thenReturn(mockFunctionStats);

    // Mock message builder
    PowerMockito.mockStatic(MessageBuilder.class);
    PowerMockito.when(MessageBuilder.create()).thenAnswer(invocationOnMock -> {

        Message msg = mock(Message.class);
        MessageBuilder builder = mock(MessageBuilder.class);
        when(builder.setContent(any(byte[].class))).thenAnswer(invocationOnMock1 -> {
            byte[] content = invocationOnMock1.getArgumentAt(0, byte[].class);
            when(msg.getData()).thenReturn(content);
            return builder;
        });
        when(builder.setSequenceId(anyLong())).thenAnswer(invocationOnMock1 -> {
            long seqId = invocationOnMock1.getArgumentAt(0, long.class);
            when(msg.getSequenceId()).thenReturn(seqId);
            return builder;
        });
        when(builder.setProperty(anyString(), anyString())).thenAnswer(invocationOnMock1 -> {
            String key = invocationOnMock1.getArgumentAt(0, String.class);
            String value = invocationOnMock1.getArgumentAt(1, String.class);
            when(msg.getProperty(eq(key))).thenReturn(value);
            return builder;
        });
        when(builder.build()).thenReturn(msg);
        return builder;
    });
}

From source file:com.adaptris.http.RequestDispatcher.java

public void run() {
    RequestProcessor rp = null;//from w  ww. jav  a2  s  .  com
    HttpSession session = null;
    LinkedBlockingQueue queue = null;
    String oldName = Thread.currentThread().getName();
    Thread.currentThread().setName(threadName);
    do {
        try {
            if (logR.isTraceEnabled()) {
                logR.trace("Reading HTTP Request");
            }

            session = new ServerSession();
            session.setSocket(socket);
            //        String uri = session.getRequestLine().getURI();

            String file = session.getRequestLine().getFile();
            queue = (LinkedBlockingQueue) requestProcessors.get(file);

            if (queue == null) {
                // Get a default one, if any
                queue = (LinkedBlockingQueue) requestProcessors.get("*");
                if (queue == null) {
                    doResponse(session, HttpURLConnection.HTTP_NOT_FOUND);
                    break;
                }
            }
            rp = waitForRequestProcessor(queue);
            if (!parent.isAlive()) {
                doResponse(session, HttpURLConnection.HTTP_INTERNAL_ERROR);
                break;
            }
            rp.processRequest(session);
            session.commit();
        } catch (Exception e) {
            // if an exception occurs, then it's pretty much a fatal error for 
            // this session
            // we ignore any output that it might have setup, and use our own
            try {
                logR.error(e.getMessage(), e);
                if (session != null) {
                    doResponse(session, HttpURLConnection.HTTP_INTERNAL_ERROR);
                }
            } catch (Exception e2) {
                ;
            }
        }
    } while (false);
    try {
        if (rp != null && queue != null) {
            queue.put(rp);
            logR.trace(rp + " put back on to queue");
        }
    } catch (Exception e) {
        ;
    }
    session = null;
    queue = null;
    rp = null;
    Thread.currentThread().setName(oldName);
    return;
}

From source file:org.commoncrawl.service.pagerank.slave.PageRankUtils.java

public static void calculateRank(final Configuration conf, final FileSystem fs, final PRValueMap valueMap,
        final File jobLocalDir, final String jobWorkPath, final int nodeIndex, final int slaveCount,
        final int iterationNumber, final SuperDomainFilter superDomainFilter,
        final ProgressAndCancelCheckCallback progressAndCancelCallback) throws IOException {

    final LinkedBlockingQueue<CalculateRankQueueItem> readAheadQueue = new LinkedBlockingQueue<CalculateRankQueueItem>(
            20);// w  w  w. j a v  a 2  s . c o  m

    // build stream vector ... 
    Vector<Path> streamVector = buildCalculationInputStreamVector(jobLocalDir, jobWorkPath, nodeIndex,
            slaveCount, iterationNumber);

    // construct a reader ... 
    final SortedPRInputReader reader = new SortedPRInputReader(conf, fs, streamVector, true);

    Thread readerThread = new Thread(new Runnable() {

        @Override
        public void run() {

            IOException exceptionOut = null;
            try {

                TargetAndSources target = null;

                while ((target = reader.readNextTarget()) != null) {
                    try {
                        readAheadQueue.put(new CalculateRankQueueItem(target));
                    } catch (InterruptedException e) {
                    }
                }
            } catch (IOException e) {
                LOG.error(CCStringUtils.stringifyException(e));
                exceptionOut = e;
            } finally {
                if (reader != null) {
                    reader.close();
                }
            }
            try {
                readAheadQueue.put(new CalculateRankQueueItem(exceptionOut));
            } catch (InterruptedException e1) {
            }

        }
    });

    readerThread.start();

    int failedUpdates = 0;
    int totalUpdates = 0;
    long iterationStart = System.currentTimeMillis();
    boolean cancelled = false;

    while (!cancelled) {

        CalculateRankQueueItem queueItem = null;
        try {
            queueItem = readAheadQueue.take();
        } catch (InterruptedException e) {
        }

        if (queueItem._next != null) {
            totalUpdates++;
            //LOG.info("Target: DomainHash:" + target.target.getDomainHash() + " URLHash:" + target.target.getUrlHash() + " ShardIdx:" + ((target.target.hashCode() & Integer.MAX_VALUE) % CrawlEnvironment.PR_NUMSLAVES)); 
            // now accumulate rank from stream into value map 
            if (!accumulateRank(valueMap, queueItem._next, superDomainFilter)) {
                failedUpdates++;
                LOG.error("**TotalUpdates:" + totalUpdates + " Failed Updates:" + failedUpdates);
            }

            if ((totalUpdates + failedUpdates) % 10000 == 0) {

                float percentComplete = (float) reader._totalBytesRead / (float) reader._totalBytesToRead;
                if (progressAndCancelCallback != null) {
                    cancelled = progressAndCancelCallback.updateProgress(percentComplete);
                    if (cancelled) {
                        LOG.info("Cancel check callback returned true");
                    }
                }

                long timeEnd = System.currentTimeMillis();
                int milliseconds = (int) (timeEnd - iterationStart);

                //LOG.info("Accumulate PR for 10000 Items Took:" + milliseconds + " Milliseconds QueueSize:" + readAheadQueue.size());

                iterationStart = System.currentTimeMillis();
            }
        } else {
            if (queueItem._e != null) {
                LOG.error(CCStringUtils.stringifyException(queueItem._e));
                throw queueItem._e;
            } else {
                // now finally pagerank value in value map ... 
                valueMap.finalizePageRank();
            }
            break;
        }
    }
    try {
        readerThread.join();
    } catch (InterruptedException e) {
    }
}

From source file:com.numenta.taurus.service.TaurusDataSyncService.java

/**
 * Load all instance data from the database
 */// w  ww. j av a 2 s  .c  o m
@Override
protected void loadAllData() throws HTMException, IOException {

    Context context = TaurusApplication.getContext();
    if (context == null) {
        // Should not happen.
        // We need application context to run.
        return;
    }

    // Get last known date from the database
    final TaurusDatabase database = TaurusApplication.getDatabase();
    if (database == null) {
        // Should not happen.
        // We need application context to run.
        return;
    }
    long from = database.getLastTimestamp();

    // Get current time
    final long now = System.currentTimeMillis();

    // The server updates the instance data table into hourly buckets as the models process
    // data. This may leave the last hour with outdated values when the server updates the
    // instance data table after we start loading the new hourly bucket.
    // To make sure the last hour bucket is updated we should get data since last update up to
    // now and on when the time is above a certain threshold (15 minutes) also download the
    // previous hour once.
    SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(context);

    // Check if we need to update the previous hour
    long previousHourThreshold = prefs.getLong(PREF_PREVIOUS_HOUR_THRESHOLD, now);
    if (now >= previousHourThreshold) {
        // Download the previous hour
        from -= DataUtils.MILLIS_PER_HOUR;

        // Set threshold time to minute 15 of next hour
        Calendar calendar = Calendar.getInstance();
        calendar.setTimeInMillis(now);
        calendar.add(Calendar.HOUR, 1);
        calendar.set(Calendar.MINUTE, 15);
        calendar.set(Calendar.SECOND, 0);
        calendar.set(Calendar.MILLISECOND, 0);
        prefs.edit().putLong(PREF_PREVIOUS_HOUR_THRESHOLD, calendar.getTimeInMillis()).apply();
    }
    final long oldestTimestamp = DataUtils
            .floorTo60minutes(now - TaurusApplication.getNumberOfDaysToSync() * DataUtils.MILLIS_PER_DAY);

    // Check if we need to catch up and download old data
    if (database.getFirstTimestamp() > oldestTimestamp) {
        from = oldestTimestamp;
    }

    // Don't get date older than NUMBER_OF_DAYS_TO_SYNC
    from = Math.max(from, oldestTimestamp);

    // Blocking queue holding data waiting to be saved to the database.
    // This queue will be filled by the TaurusClient as it downloads data and it will be
    // emptied by the databaseTask as is saves data to the database
    final LinkedBlockingQueue<InstanceData> pending = new LinkedBlockingQueue<InstanceData>(
            PENDING_IO_BUFFER_SIZE);

    // Background task used save data to the database. This task will wait for data to arrive
    // from the server and save them to the database in batches until it finds the end of the
    // queue marked by DATA_EOF or it times out after 60 seconds
    final Future<?> databaseTask = getService().getIOThreadPool().submit(new Runnable() {
        @Override
        public void run() {
            // Save data in batches, one day at the time
            final List<InstanceData> batch = new ArrayList<InstanceData>();
            int batchSize = -DataUtils.MILLIS_PER_HOUR;

            // Tracks batch timestamp. Once the data timestamp is greater than the batch
            // timestamp, a new batch is created
            long batchTimestamp = now - DataUtils.MILLIS_PER_HOUR;

            try {
                // Process all pending data until the DATA_EOF is found or a timeout is reached
                InstanceData data;
                while ((data = pending.poll(60, TimeUnit.SECONDS)) != DATA_EOF && data != null) {
                    batch.add(data);
                    // Process batches
                    if (data.getTimestamp() < batchTimestamp) {
                        // Calculate next batch timestamp
                        batchTimestamp = data.getTimestamp() + batchSize;
                        if (database.addInstanceDataBatch(batch)) {
                            // Notify receivers new data has arrived
                            fireInstanceDataChangedEvent();
                        }
                        batch.clear();
                    }
                }
                // Last batch
                if (!batch.isEmpty()) {
                    if (database.addInstanceDataBatch(batch)) {
                        // Notify receivers new data has arrived
                        fireInstanceDataChangedEvent();
                    }
                }
            } catch (InterruptedException e) {
                Log.w(TAG, "Interrupted while loading data");
            }
        }
    });

    try {
        // Get new data from server
        Log.d(TAG, "Start downloading data from " + from);
        TaurusClient client = getClient();
        client.getAllInstanceData(new Date(from), new Date(now), false,
                new HTMClient.DataCallback<InstanceData>() {
                    @Override
                    public boolean onData(InstanceData data) {
                        // enqueue data for saving
                        try {
                            pending.put(data);
                        } catch (InterruptedException e) {
                            pending.clear();
                            Log.w(TAG, "Interrupted while loading data");
                            return false;
                        }
                        return true;
                    }
                });
        // Mark the end of the records
        pending.add(DATA_EOF);
        // Wait for the database task to complete
        databaseTask.get();
        // Clear client cache
        client.clearCache();
    } catch (InterruptedException e) {
        Log.w(TAG, "Interrupted while loading data");
    } catch (ExecutionException e) {
        Log.e(TAG, "Failed to load data", e);
    }
}

From source file:org.ala.spatial.analysis.index.LayerDistanceIndex.java

/**
 * @param threadcount    number of threads to run analysis.
 * @param onlyThesePairs array of distances to run as fieldId1 + " " +
 *                       fieldId2 where fieldId1.compareTo(fieldId2) &lt 0 or null for all missing
 *                       distances./*from ww  w. j a v  a 2 s.com*/
 * @throws InterruptedException
 */
public void occurrencesUpdate(int threadcount, String[] onlyThesePairs) throws InterruptedException {

    //create distances file if it does not exist.
    File layerDistancesFile = new File(AlaspatialProperties.getAnalysisWorkingDir() + LAYER_DISTANCE_FILE);
    if (!layerDistancesFile.exists()) {
        try {
            FileWriter fw = new FileWriter(layerDistancesFile);
            fw.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    Map<String, Double> map = loadDistances();

    LinkedBlockingQueue<String> todo = new LinkedBlockingQueue();

    if (onlyThesePairs != null && onlyThesePairs.length > 0) {
        for (String s : onlyThesePairs) {
            todo.add(s);
        }
    } else {
        //find all environmental layer analysis files
        File root = new File(AlaspatialProperties.getAnalysisLayersDir());
        File[] dirs = root.listFiles(new FileFilter() {

            @Override
            public boolean accept(File pathname) {
                return pathname != null && pathname.isDirectory();
            }
        });

        HashMap<String, String> domains = new HashMap<String, String>();
        for (File dir : dirs) {
            //iterate through files so we get everything
            File[] files = new File(dir.getPath()).listFiles(new FileFilter() {

                @Override
                public boolean accept(File pathname) {
                    return pathname.getName().endsWith(".grd") && pathname.getName().startsWith("el");
                }
            });

            for (int i = 0; i < files.length; i++) {
                for (int j = i + 1; j < files.length; j++) {
                    String file1 = files[i].getName().replace(".grd", "");
                    String file2 = files[j].getName().replace(".grd", "");

                    //only operate on file names that are valid fields
                    if (Client.getFieldDao().getFieldById(file1) != null
                            && Client.getFieldDao().getFieldById(file2) != null) {

                        String domain1 = domains.get(file1);
                        if (domain1 == null) {
                            String pid1 = Client.getFieldDao().getFieldById(file1).getSpid();
                            domain1 = Client.getLayerDao().getLayerById(Integer.parseInt(pid1)).getdomain();
                            domains.put(file1, domain1);
                        }
                        String domain2 = domains.get(file2);
                        if (domain2 == null) {
                            String pid2 = Client.getFieldDao().getFieldById(file2).getSpid();
                            domain2 = Client.getLayerDao().getLayerById(Integer.parseInt(pid2)).getdomain();
                            domains.put(file2, domain2);
                        }

                        String key = (file1.compareTo(file2) < 0) ? file1 + " " + file2 : file2 + " " + file1;

                        //domain test
                        if (isSameDomain(parseDomain(domain1), parseDomain(domain2))) {
                            if (!map.containsKey(key) && !todo.contains(key)) {
                                todo.put(key);
                            }
                        }
                    }
                }
            }
        }
    }

    LinkedBlockingQueue<String> toDisk = new LinkedBlockingQueue<String>();
    CountDownLatch cdl = new CountDownLatch(todo.size());
    CalcThread[] threads = new CalcThread[threadcount];
    for (int i = 0; i < threadcount; i++) {
        threads[i] = new CalcThread(cdl, todo, toDisk);
        threads[i].start();
    }

    ToDiskThread toDiskThread = new ToDiskThread(
            AlaspatialProperties.getAnalysisWorkingDir() + LAYER_DISTANCE_FILE, toDisk);
    toDiskThread.start();

    cdl.await();

    for (int i = 0; i < threadcount; i++) {
        threads[i].interrupt();
    }

    toDiskThread.interrupt();
}