List of usage examples for java.util.concurrent ConcurrentLinkedDeque ConcurrentLinkedDeque
public ConcurrentLinkedDeque()
From source file:com.cloudera.livy.rsc.driver.RSCDriver.java
public RSCDriver(SparkConf conf, RSCConf livyConf) throws Exception { Set<PosixFilePermission> perms = PosixFilePermissions.fromString("rwx------"); this.localTmpDir = Files.createTempDirectory("rsc-tmp", PosixFilePermissions.asFileAttribute(perms)) .toFile();/*from ww w .j a va 2 s . c o m*/ this.executor = Executors.newCachedThreadPool(); this.jobQueue = new LinkedList<>(); this.clients = new ConcurrentLinkedDeque<>(); this.serializer = new Serializer(); this.conf = conf; this.livyConf = livyConf; this.jcLock = new Object(); this.shutdownLock = new Object(); this.activeJobs = new ConcurrentHashMap<>(); this.bypassJobs = new ConcurrentLinkedDeque<>(); this.idleTimeout = new AtomicReference<>(); }
From source file:org.apache.eagle.alert.engine.publisher.dedup.DedupCache.java
private DedupValue[] add(EventUniq eventEniq, AlertStreamEvent event, String stateFieldValue, String stateCloseValue) { DedupValue dedupValue = null;//from w w w .j a va 2 s . c o m if (!events.containsKey(eventEniq)) { dedupValue = createDedupValue(eventEniq, event, stateFieldValue); ConcurrentLinkedDeque<DedupValue> dedupValues = new ConcurrentLinkedDeque<>(); dedupValues.add(dedupValue); // skip the event which put failed due to concurrency events.put(eventEniq, dedupValues); LOG.info("{} Add new dedup key {}, and value {}", this.publishName, eventEniq, dedupValues); } else if (!StringUtils.equalsIgnoreCase(stateFieldValue, events.get(eventEniq).getLast().getStateFieldValue())) { // existing a de-dup value, try update or reset DedupValue lastDedupValue = events.get(eventEniq).getLast(); dedupValue = updateDedupValue(lastDedupValue, eventEniq, event, stateFieldValue, stateCloseValue); LOG.info("{} Update dedup key {}, and value {}", this.publishName, eventEniq, dedupValue); } if (dedupValue == null) { return null; } return new DedupValue[] { dedupValue }; }
From source file:org.deeplearning4j.models.word2vec.Word2Vec.java
/** * Train the model/*from www .ja va 2s . com*/ */ public void fit() throws IOException { boolean loaded = buildVocab(); //save vocab after building if (!loaded && saveVocab) vocab().saveVocab(); if (stopWords == null) readStopWords(); log.info("Training word2vec multithreaded"); if (sentenceIter != null) sentenceIter.reset(); if (docIter != null) docIter.reset(); int[] docs = vectorizer.index().allDocs(); if (docs.length < 1) { vectorizer.fit(); } docs = vectorizer.index().allDocs(); if (docs.length < 1) { throw new IllegalStateException("No documents found"); } totalWords = vectorizer.numWordsEncountered(); if (totalWords < 1) throw new IllegalStateException("Unable to train, total words less than 1"); totalWords *= numIterations; log.info("Processing sentences..."); AtomicLong numWordsSoFar = new AtomicLong(0); final AtomicLong nextRandom = new AtomicLong(5); ExecutorService exec = new ThreadPoolExecutor(Runtime.getRuntime().availableProcessors(), Runtime.getRuntime().availableProcessors(), 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), new RejectedExecutionHandler() { @Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { try { Thread.sleep(1000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } executor.submit(r); } }); final Queue<List<VocabWord>> batch2 = new ConcurrentLinkedDeque<>(); vectorizer.index().eachDoc(new Function<List<VocabWord>, Void>() { @Override public Void apply(List<VocabWord> input) { List<VocabWord> batch = new ArrayList<>(); addWords(input, nextRandom, batch); if (!batch.isEmpty()) { batch2.add(batch); } return null; } }, exec); exec.shutdown(); try { exec.awaitTermination(1, TimeUnit.DAYS); } catch (InterruptedException e) { e.printStackTrace(); } ActorSystem actorSystem = ActorSystem.create(); for (int i = 0; i < numIterations; i++) doIteration(batch2, numWordsSoFar, nextRandom, actorSystem); actorSystem.shutdown(); }
From source file:com.spotify.helios.agent.TaskHistoryWriter.java
private Deque<TaskStatusEvent> getDeque(final JobId key) { synchronized (items) { final Deque<TaskStatusEvent> deque = items.get(key); if (deque == null) { // try more assertively to get a deque final ConcurrentLinkedDeque<TaskStatusEvent> newDeque = new ConcurrentLinkedDeque<>(); items.put(key, newDeque);// ww w. jav a2s .c o m return newDeque; } return deque; } }
From source file:com.spotify.helios.agent.QueueingHistoryWriter.java
private Deque<TaskStatusEvent> getDeque(final JobId key) { synchronized (items) { final Deque<TaskStatusEvent> deque = items.get(key); if (deque == null) { // try more assertively to get a deque final ConcurrentLinkedDeque<TaskStatusEvent> newDeque = new ConcurrentLinkedDeque<TaskStatusEvent>(); items.put(key, newDeque);//from w w w.ja v a2 s . co m return newDeque; } return deque; } }
From source file:org.apache.gobblin.ingestion.google.webmaster.GoogleWebmasterDataFetcherImpl.java
/** * Get all pages in an async mode./*from www. ja v a 2s. co m*/ */ private Collection<String> getPages(String startDate, String endDate, List<Dimension> dimensions, ApiDimensionFilter countryFilter, Queue<Pair<String, FilterOperator>> toProcess, int rowLimit) throws IOException { String country = GoogleWebmasterFilter.countryFilterToString(countryFilter); ConcurrentLinkedDeque<String> allPages = new ConcurrentLinkedDeque<>(); int r = 0; while (r <= GET_PAGES_RETRIES) { ++r; log.info(String.format("Get pages at round %d with size %d.", r, toProcess.size())); ConcurrentLinkedDeque<Pair<String, FilterOperator>> nextRound = new ConcurrentLinkedDeque<>(); ExecutorService es = Executors.newFixedThreadPool(10, ExecutorsUtils .newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName()))); while (!toProcess.isEmpty()) { submitJob(toProcess.poll(), countryFilter, startDate, endDate, dimensions, es, allPages, nextRound, rowLimit); } //wait for jobs to finish and start next round if necessary. try { es.shutdown(); boolean terminated = es.awaitTermination(5, TimeUnit.MINUTES); if (!terminated) { es.shutdownNow(); log.warn(String.format( "Timed out while getting all pages for country-%s at round %d. Next round now has size %d.", country, r, nextRound.size())); } } catch (InterruptedException e) { throw new RuntimeException(e); } if (nextRound.isEmpty()) { break; } toProcess = nextRound; } if (r == GET_PAGES_RETRIES) { throw new RuntimeException(String.format( "Getting all pages reaches the maximum number of retires %d. Date range: %s ~ %s. Country: %s.", GET_PAGES_RETRIES, startDate, endDate, country)); } return allPages; }
From source file:gobblin.ingestion.google.webmaster.GoogleWebmasterDataFetcherImpl.java
/** * Get all pages in an async mode./*ww w.j a v a2 s.c o m*/ */ private Collection<String> getPages(String startDate, String endDate, List<Dimension> dimensions, ApiDimensionFilter countryFilter, Queue<Pair<String, FilterOperator>> toProcess) throws IOException { String country = GoogleWebmasterFilter.countryFilterToString(countryFilter); ConcurrentLinkedDeque<String> allPages = new ConcurrentLinkedDeque<>(); int r = 0; while (r <= RETRY) { ++r; log.info(String.format("Get pages at round %d with size %d.", r, toProcess.size())); ConcurrentLinkedDeque<Pair<String, FilterOperator>> nextRound = new ConcurrentLinkedDeque<>(); ExecutorService es = Executors.newFixedThreadPool(10, ExecutorsUtils .newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName()))); while (!toProcess.isEmpty()) { submitJob(toProcess.poll(), countryFilter, startDate, endDate, dimensions, es, allPages, nextRound); } //wait for jobs to finish and start next round if necessary. try { es.shutdown(); boolean terminated = es.awaitTermination(5, TimeUnit.MINUTES); if (!terminated) { es.shutdownNow(); log.warn(String.format( "Timed out while getting all pages for country-%s at round %d. Next round now has size %d.", country, r, nextRound.size())); } } catch (InterruptedException e) { throw new RuntimeException(e); } if (nextRound.isEmpty()) { break; } toProcess = nextRound; } if (r == RETRY) { throw new RuntimeException(String.format( "Getting all pages reaches the maximum number of retires %d. Date range: %s ~ %s. Country: %s.", RETRY, startDate, endDate, country)); } return allPages; }
From source file:co.paralleluniverse.galaxy.core.Cache.java
@Override public void init() throws Exception { super.init(); if (synchronous) throw new RuntimeException("Synchronous mode has not been implemented yet."); this.freeLineList = reuseLines ? new ConcurrentLinkedDeque<CacheLine>() : null; this.freeSharerSetList = reuseSharerSets ? new ConcurrentLinkedDeque<ShortSet>() : null; this.broadcastsRoutedToServer = hasServer && ((AbstractComm) comm).isSendToServerInsteadOfMulticast(); // this is a special case that requires special handling b/c of potential consistency problems (see MainMemory) }
From source file:gobblin.couchbase.writer.CouchbaseWriterTest.java
private List<Pair<AbstractDocument, Future>> writeRecords(Iterator<AbstractDocument> recordIterator, CouchbaseWriter writer, int outstandingRequests, long kvTimeout, TimeUnit kvTimeoutUnit) throws DataConversionException, UnsupportedEncodingException { final BlockingQueue<Pair<AbstractDocument, Future>> outstandingCallQueue = new LinkedBlockingDeque<>( outstandingRequests);//from ww w . j a v a2 s .c o m final List<Pair<AbstractDocument, Future>> failedFutures = new ArrayList<>(outstandingRequests); int index = 0; long runTime = 0; final AtomicInteger callbackSuccesses = new AtomicInteger(0); final AtomicInteger callbackFailures = new AtomicInteger(0); final ConcurrentLinkedDeque<Throwable> callbackExceptions = new ConcurrentLinkedDeque<>(); Verifier verifier = new Verifier(); while (recordIterator.hasNext()) { AbstractDocument doc = recordIterator.next(); index++; verifier.onWrite(doc); final long startTime = System.nanoTime(); Future callFuture = writer.write(doc, new WriteCallback<TupleDocument>() { @Override public void onSuccess(WriteResponse<TupleDocument> writeResponse) { callbackSuccesses.incrementAndGet(); } @Override public void onFailure(Throwable throwable) { callbackFailures.incrementAndGet(); callbackExceptions.add(throwable); } }); drainQueue(outstandingCallQueue, 1, kvTimeout, kvTimeoutUnit, failedFutures); outstandingCallQueue.add(new Pair<>(doc, callFuture)); runTime += System.nanoTime() - startTime; } int failedWrites = 0; long responseStartTime = System.nanoTime(); drainQueue(outstandingCallQueue, outstandingRequests, kvTimeout, kvTimeoutUnit, failedFutures); runTime += System.nanoTime() - responseStartTime; for (Throwable failure : callbackExceptions) { System.out.println(failure.getClass() + " : " + failure.getMessage()); } failedWrites += failedFutures.size(); System.out.println("Total time to send " + index + " records = " + runTime / 1000000.0 + "ms, " + "Failed writes = " + failedWrites + " Callback Successes = " + callbackSuccesses.get() + "Callback Failures = " + callbackFailures.get()); verifier.verify(writer.getBucket()); return failedFutures; }
From source file:org.ops4j.pax.url.mvn.internal.AetherBasedResolver.java
private void releaseSession(RepositorySystemSession session) { LocalRepository repo = session.getLocalRepository(); Deque<RepositorySystemSession> deque = sessions.get(repo); if (deque == null) { sessions.putIfAbsent(repo, new ConcurrentLinkedDeque<RepositorySystemSession>()); deque = sessions.get(repo);//from w ww. j a v a 2 s.c om } session.getData().set(SESSION_CHECKS, null); deque.add(session); }