List of usage examples for java.util.concurrent ConcurrentLinkedQueue isEmpty
public boolean isEmpty()
From source file:edu.cornell.mannlib.vitro.webapp.rdfservice.impl.sparql.RDFServiceSparql.java
private List<Statement> sort(List<Statement> stmts) { List<Statement> output = new ArrayList<Statement>(); int originalSize = stmts.size(); if (originalSize == 1) return stmts; List<Statement> remaining = stmts; ConcurrentLinkedQueue<com.hp.hpl.jena.rdf.model.Resource> subjQueue = new ConcurrentLinkedQueue<com.hp.hpl.jena.rdf.model.Resource>(); for (Statement stmt : remaining) { if (stmt.getSubject().isURIResource()) { subjQueue.add(stmt.getSubject()); break; }// w w w . j av a 2 s.c o m } if (subjQueue.isEmpty()) { throw new RuntimeException("No named subject in statement patterns"); } while (remaining.size() > 0) { if (subjQueue.isEmpty()) { subjQueue.add(remaining.get(0).getSubject()); } while (!subjQueue.isEmpty()) { com.hp.hpl.jena.rdf.model.Resource subj = subjQueue.poll(); List<Statement> temp = new ArrayList<Statement>(); for (Statement stmt : remaining) { if (stmt.getSubject().equals(subj)) { output.add(stmt); if (stmt.getObject().isResource()) { subjQueue.add((com.hp.hpl.jena.rdf.model.Resource) stmt.getObject()); } } else { temp.add(stmt); } } remaining = temp; } } if (output.size() != originalSize) { throw new RuntimeException( "original list size was " + originalSize + " but sorted size is " + output.size()); } return output; }
From source file:metlos.executors.batch.BatchExecutorTest.java
private void runSimpleDelayTest(int nofThreads) throws Exception { final ConcurrentLinkedQueue<Long> executionTimes = new ConcurrentLinkedQueue<Long>(); Runnable task = new Runnable() { @Override//from w w w .jav a 2s. c o m public void run() { executionTimes.add(System.currentTimeMillis()); } }; BatchExecutor ex = getExecutor(nofThreads); //start running my task... the task should "take" 0ms and there should be a delay //of 10ms between executions... the executionTimes collection should therefore //contain time stamps 10ms apart from each other. ex.submitWithPreferedDurationAndFixedDelay(Collections.singleton(task), 0, 0, 10, TimeUnit.MILLISECONDS); Thread.sleep(1000); ex.shutdown(); assert executionTimes.size() > 1 : "There should have been more than 1 task executed."; long minDelay = 8; //10ms +- 20% long maxDelay = 12; int nofElements = executionTimes.size(); long previousTime = executionTimes.poll(); long cummulativeDiff = 0; while (!executionTimes.isEmpty()) { long thisTime = executionTimes.poll(); long diff = thisTime - previousTime; cummulativeDiff += diff; previousTime = thisTime; } long averageDelay = cummulativeDiff / (nofElements - 1); assert minDelay < averageDelay && averageDelay < maxDelay : "The average delay should be in <" + minDelay + ", " + maxDelay + "> but was " + averageDelay + "."; }
From source file:com.ibm.crail.tools.CrailBenchmark.java
void readMultiStream(String filename, int size, int loop, int batch) throws Exception { System.out.println(/*from www . ja v a 2 s . c om*/ "readMultiStream, filename " + filename + ", size " + size + ", loop " + loop + ", batch " + batch); //warmup ConcurrentLinkedQueue<CrailBuffer> bufferQueue = new ConcurrentLinkedQueue<CrailBuffer>(); for (int i = 0; i < warmup; i++) { CrailBuffer buf = fs.allocateBuffer().limit(size).slice(); bufferQueue.add(buf); } warmUp(filename, warmup, bufferQueue); while (!bufferQueue.isEmpty()) { CrailBuffer buf = bufferQueue.poll(); fs.freeBuffer(buf); } //benchmark System.out.println("starting benchmark..."); fs.getStatistics().reset(); CrailBuffer _buf = null; if (size == CrailConstants.BUFFER_SIZE) { _buf = fs.allocateBuffer(); } else if (size < CrailConstants.BUFFER_SIZE) { CrailBuffer __buf = fs.allocateBuffer(); __buf.clear().limit(size); _buf = __buf.slice(); } else { _buf = OffHeapBuffer.wrap(ByteBuffer.allocateDirect(size)); } ByteBuffer buf = _buf.getByteBuffer(); for (int i = 0; i < loop; i++) { CrailBufferedInputStream multiStream = fs.lookup(filename).get().asMultiFile().getMultiStream(batch); double sumbytes = 0; long _sumbytes = 0; double ops = 0; buf.clear(); long start = System.currentTimeMillis(); int ret = multiStream.read(buf); while (ret >= 0) { sumbytes = sumbytes + ret; long _ret = (long) ret; _sumbytes += _ret; ops = ops + 1.0; buf.clear(); ret = multiStream.read(buf); } long end = System.currentTimeMillis(); multiStream.close(); double executionTime = ((double) (end - start)) / 1000.0; double throughput = 0.0; double latency = 0.0; double sumbits = sumbytes * 8.0; if (executionTime > 0) { throughput = sumbits / executionTime / 1000.0 / 1000.0; latency = 1000000.0 * executionTime / ops; } System.out.println("round " + i + ":"); System.out.println("bytes read " + _sumbytes); System.out.println("execution time " + executionTime); System.out.println("ops " + ops); System.out.println("throughput " + throughput); System.out.println("latency " + latency); } fs.getStatistics().print("close"); }
From source file:org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.java
/** * Sweep phase of gc candidate deletion. * <p>//w w w . j a v a 2 s . co m * Performs the following steps depending upon the type of the blob store refer * {@link org.apache.jackrabbit.oak.plugins.blob.SharedDataStore.Type}: * * <ul> * <li>Shared</li> * <li> * <ul> * <li> Merge all marked references (from the mark phase run independently) available in the data store meta * store (from all configured independent repositories). * <li> Retrieve all blob ids available. * <li> Diffs the 2 sets above to retrieve list of blob ids not used. * <li> Deletes only blobs created after * (earliest time stamp of the marked references - #maxLastModifiedInterval) from the above set. * </ul> * </li> * * <li>Default</li> * <li> * <ul> * <li> Mark phase already run. * <li> Retrieve all blob ids available. * <li> Diffs the 2 sets above to retrieve list of blob ids not used. * <li> Deletes only blobs created after * (time stamp of the marked references - #maxLastModifiedInterval). * </ul> * </li> * </ul> * * @return the number of blobs deleted * @throws Exception the exception * @param fs the garbage collector file state * @param markStart the start time of mark to take as reference for deletion */ protected long sweep(GarbageCollectorFileState fs, long markStart) throws Exception { long earliestRefAvailTime; // Merge all the blob references available from all the reference files in the data store meta store // Only go ahead if merge succeeded try { earliestRefAvailTime = GarbageCollectionType.get(blobStore).mergeAllMarkedReferences(blobStore, fs); LOG.debug("Earliest reference available for timestamp [{}]", earliestRefAvailTime); earliestRefAvailTime = (earliestRefAvailTime < markStart ? earliestRefAvailTime : markStart); } catch (Exception e) { return 0; } // Find all blob references after iterating over the whole repository (new BlobIdRetriever(fs)).call(); // Calculate the references not used difference(fs); long count = 0; long deleted = 0; long lastMaxModifiedTime = getLastMaxModifiedTime(earliestRefAvailTime); LOG.debug("Starting sweep phase of the garbage collector"); LOG.debug("Sweeping blobs with modified time > than the configured max deleted time ({}). ", timestampToString(lastMaxModifiedTime)); ConcurrentLinkedQueue<String> exceptionQueue = new ConcurrentLinkedQueue<String>(); LineIterator iterator = FileUtils.lineIterator(fs.getGcCandidates(), Charsets.UTF_8.name()); List<String> ids = newArrayList(); while (iterator.hasNext()) { ids.add(iterator.next()); if (ids.size() >= getBatchCount()) { count += ids.size(); deleted += sweepInternal(ids, exceptionQueue, lastMaxModifiedTime); ids = newArrayList(); } } if (!ids.isEmpty()) { count += ids.size(); deleted += sweepInternal(ids, exceptionQueue, lastMaxModifiedTime); } BufferedWriter writer = null; try { if (!exceptionQueue.isEmpty()) { writer = Files.newWriter(fs.getGarbage(), Charsets.UTF_8); saveBatchToFile(newArrayList(exceptionQueue), writer); } } finally { LineIterator.closeQuietly(iterator); IOUtils.closeQuietly(writer); } if (!exceptionQueue.isEmpty()) { LOG.warn( "Unable to delete some blobs entries from the blob store. Details around such blob entries can " + "be found in [{}]", fs.getGarbage().getAbsolutePath()); } if (count != deleted) { LOG.warn( "Deleted only [{}] blobs entries from the [{}] candidates identified. This may happen if blob " + "modified time is > " + "than the max deleted time ({})", deleted, count, timestampToString(lastMaxModifiedTime)); } // Remove all the merged marked references GarbageCollectionType.get(blobStore).removeAllMarkedReferences(blobStore); LOG.debug("Ending sweep phase of the garbage collector"); return deleted; }
From source file:org.apache.hadoop.hbase.client.AsyncBatchRpcRetryingCaller.java
private void groupAndSend(Stream<Action> actions, int tries) { long locateTimeoutNs; if (operationTimeoutNs > 0) { locateTimeoutNs = remainingTimeNs(); if (locateTimeoutNs <= 0) { failAll(actions, tries);/*from w w w .ja v a2 s.com*/ return; } } else { locateTimeoutNs = -1L; } ConcurrentMap<ServerName, ServerRequest> actionsByServer = new ConcurrentHashMap<>(); ConcurrentLinkedQueue<Action> locateFailed = new ConcurrentLinkedQueue<>(); CompletableFuture.allOf( actions.map(action -> conn.getLocator().getRegionLocation(tableName, action.getAction().getRow(), RegionLocateType.CURRENT, locateTimeoutNs).whenComplete((loc, error) -> { if (error != null) { error = translateException(error); if (error instanceof DoNotRetryIOException) { failOne(action, tries, error, EnvironmentEdgeManager.currentTime(), ""); return; } addError(action, error, null); locateFailed.add(action); } else { computeIfAbsent(actionsByServer, loc.getServerName(), ServerRequest::new) .addAction(loc, action); } })).toArray(CompletableFuture[]::new)) .whenComplete((v, r) -> { if (!actionsByServer.isEmpty()) { send(actionsByServer, tries); } if (!locateFailed.isEmpty()) { tryResubmit(locateFailed.stream(), tries); } }); }
From source file:com.btoddb.fastpersitentqueue.JournalMgrIT.java
@Test public void testThreading() throws IOException, ExecutionException { final int numEntries = 10000; final int numPushers = 3; int numPoppers = 3; final Random pushRand = new Random(1000L); final Random popRand = new Random(1000000L); final ConcurrentLinkedQueue<FpqEntry> events = new ConcurrentLinkedQueue<FpqEntry>(); final AtomicInteger pusherFinishCount = new AtomicInteger(); final AtomicInteger numPops = new AtomicInteger(); final AtomicLong pushSum = new AtomicLong(); final AtomicLong popSum = new AtomicLong(); mgr.setMaxJournalFileSize(1000);//from w w w .j av a 2 s . co m mgr.init(); ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers); Set<Future> futures = new HashSet<Future>(); // start pushing for (int i = 0; i < numPushers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { for (int i = 0; i < numEntries; i++) { try { long x = idGen.incrementAndGet(); FpqEntry entry = mgr.append(new FpqEntry(x, new byte[100])); events.offer(entry); pushSum.addAndGet(x); if (x % 500 == 0) { System.out.println("pushed ID = " + x); } Thread.sleep(pushRand.nextInt(5)); } catch (Exception e) { e.printStackTrace(); } } pusherFinishCount.incrementAndGet(); } }); futures.add(future); } // start popping for (int i = 0; i < numPoppers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { while (pusherFinishCount.get() < numPushers || !events.isEmpty()) { try { FpqEntry entry; while (null != (entry = events.poll())) { if (entry.getId() % 500 == 0) { System.out.println("popped ID = " + entry.getId()); } popSum.addAndGet(entry.getId()); numPops.incrementAndGet(); mgr.reportTake(entry); Thread.sleep(popRand.nextInt(5)); } } catch (Exception e) { e.printStackTrace(); } } } }); futures.add(future); } boolean finished = false; while (!finished) { try { for (Future f : futures) { f.get(); } finished = true; } catch (InterruptedException e) { // ignore Thread.interrupted(); } } assertThat(numPops.get(), is(numEntries * numPushers)); assertThat(popSum.get(), is(pushSum.get())); assertThat(mgr.getJournalIdMap().entrySet(), hasSize(1)); assertThat(FileUtils.listFiles(theDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), hasSize(1)); }