List of usage examples for java.util.concurrent PriorityBlockingQueue add
public boolean add(E e)
From source file:org.apache.geode.geospatial.client.ActorController.java
private void addActorToQueue(PriorityBlockingQueue<Actor> priorityQueue, int i) { //Randomly have the tracks move between 50 to 70 MPH priorityQueue.add(new Actor(Math.random() * 20 + 50, roads, roads.getRandomRoad(), Integer.toString(i))); }
From source file:org.apache.hadoop.hbase.replication.regionserver.RecoveredReplicationSource.java
public void locateRecoveredPaths(PriorityBlockingQueue<Path> queue) throws IOException { boolean hasPathChanged = false; PriorityBlockingQueue<Path> newPaths = new PriorityBlockingQueue<Path>(queueSizePerGroup, new LogsComparator()); pathsLoop: for (Path path : queue) { if (fs.exists(path)) { // still in same location, don't need to do anything newPaths.add(path); continue; }//from w ww. ja va 2s. co m // Path changed - try to find the right path. hasPathChanged = true; if (stopper instanceof ReplicationSyncUp.DummyServer) { // In the case of disaster/recovery, HMaster may be shutdown/crashed before flush data // from .logs to .oldlogs. Loop into .logs folders and check whether a match exists Path newPath = getReplSyncUpPath(path); newPaths.add(newPath); continue; } else { // See if Path exists in the dead RS folder (there could be a chain of failures // to look at) List<String> deadRegionServers = this.replicationQueueInfo.getDeadRegionServers(); LOG.info("NB dead servers : " + deadRegionServers.size()); final Path walDir = FSUtils.getWALRootDir(conf); for (String curDeadServerName : deadRegionServers) { final Path deadRsDirectory = new Path(walDir, AbstractFSWALProvider.getWALDirectoryName(curDeadServerName)); Path[] locs = new Path[] { new Path(deadRsDirectory, path.getName()), new Path(deadRsDirectory.suffix(AbstractFSWALProvider.SPLITTING_EXT), path.getName()) }; for (Path possibleLogLocation : locs) { LOG.info("Possible location " + possibleLogLocation.toUri().toString()); if (manager.getFs().exists(possibleLogLocation)) { // We found the right new location LOG.info("Log " + path + " still exists at " + possibleLogLocation); newPaths.add(possibleLogLocation); continue pathsLoop; } } } // didn't find a new location LOG.error(String.format("WAL Path %s doesn't exist and couldn't find its new location", path)); newPaths.add(path); } } if (hasPathChanged) { if (newPaths.size() != queue.size()) { // this shouldn't happen LOG.error("Recovery queue size is incorrect"); throw new IOException("Recovery queue size error"); } // put the correct locations in the queue // since this is a recovered queue with no new incoming logs, // there shouldn't be any concurrency issues queue.clear(); for (Path path : newPaths) { queue.add(path); } } }
From source file:org.apache.geode.geospatial.client.ActorController.java
public void run() { final PriorityBlockingQueue<Actor> priorityQueue = new PriorityBlockingQueue<>(numberOfActors, (Comparator<Actor>) (o1, o2) -> (int) (o1.timeToAdvance() - o2.timeToAdvance())); //Slam in a couple tracks to keep the simulators busy int initialCount = numberOfSimulators * 2; for (int i = 0; i < initialCount; i++) { addActorToQueue(priorityQueue, i); }// w ww . jav a2 s.c o m for (int i = 0; i < numberOfSimulators; i++) { new Thread(() -> { while (true) { Actor actor = priorityQueue.poll(); long currentDelay = actor.timeToAdvance() - System.currentTimeMillis(); actorDelay.update(currentDelay); if (currentDelay <= 0) { try { actor.advance(); Coordinate coordinate = actor.currentEvent(); LocationEvent locationEvent = new LocationEvent(coordinate.y, coordinate.x, actor.getUid()); geoRegion.put(locationEvent.getUid(), locationEvent); } catch (Exception e) { logger.error(e.getMessage(), e); } priorityQueue.add(actor); } else { priorityQueue.add(actor); try { Thread.sleep(1); } catch (InterruptedException e) { e.printStackTrace(); } } } }).start(); } for (int i = initialCount; i < numberOfActors; i++) { //Now slowly trickle in more actors until we get up to the number of requested actors. //if we don't trickle them in then they are clustered at various starting points. try { Thread.sleep(newActorTimeout); } catch (InterruptedException e) { e.printStackTrace(); } addActorToQueue(priorityQueue, i); if (i % 1000 == 0) { logger.info("Injected {} drivers @ {}", i, new Date()); } } }
From source file:oculus.aperture.graph.aggregation.impl.ModularityAggregator.java
@Override public void run() { logger.debug("Running kSnap clustering algorithm on " + nodeMap.size() + " nodes and " + linkMap.size() + " links..."); StopWatch stopWatch = new StopWatch(); stopWatch.start();/* w w w .ja va 2 s . co m*/ HashMap<String, ModularityNode> linklookup = new HashMap<String, ModularityAggregator.ModularityNode>(); for (Node n : nodeMap.values()) { ModularityNode mn = new ModularityNode(n); linklookup.put(n.getId(), mn); groups.add(mn); } links = new ArrayList<ModularityLink>(); for (Link l : linkMap.values()) { if (linklookup.containsKey(l.getSourceId()) && linklookup.containsKey(l.getTargetId())) { //if this is not true we have links pointing to an invalid node... ModularityLink ml = new ModularityLink(linklookup.get(l.getSourceId()), linklookup.get(l.getTargetId())); links.add(ml); ModularityNode start = linklookup.get(l.getSourceId()); ModularityNode end = linklookup.get(l.getSourceId()); start.addLink(ml); end.addLink(ml); } } boolean notterminate = true; int linksize; while (notterminate) { final List<Future<?>> futures = new ArrayList<Future<?>>(); notterminate = false; final PriorityBlockingQueue<ModularityLink> linksort = new PriorityBlockingQueue<ModularityLink>(); linksize = links.size(); final int itrsize = linksize / nThreads; for (int i = 0; i < nThreads; i++) { final int passval = i; Future<?> foo = executor.submit(new Callable<Boolean>() { @Override public Boolean call() throws Exception { boolean nt = false; for (int lnknum = 0; lnknum < itrsize; lnknum++) { ModularityLink ln = links.get(passval * itrsize + lnknum); long nc = 0; if (ln.source.neighbourcounts.containsKey(ln.target)) { nc = ln.source.neighbourcounts.get(ln.target).intValue(); } else { System.out.println("Oooops"); } long q = nc - (ln.source.totalvolume * ln.target.totalvolume) / 2; if (q > 0) nt = true; ln.q.set(q); linksort.add(ln); } return nt; } }); futures.add(foo); } for (Future<?> foo : futures) { try { notterminate = (Boolean) foo.get(); } catch (InterruptedException interruptedCancellingAndSignalling) { Thread.currentThread().interrupt(); } catch (ExecutionException wtf) { wtf.printStackTrace(); } } if (!notterminate) break; //Now we take each link in the queue and add it to maximal matching ConcurrentLinkedQueue<ModularityLink> maximalmatching = new ConcurrentLinkedQueue<ModularityAggregator.ModularityLink>(); ConcurrentSkipListSet<ModularityNode> vertexcheck = new ConcurrentSkipListSet<ModularityAggregator.ModularityNode>(); ModularityLink top = linksort.poll(); maximalmatching.add(top); vertexcheck.add(top.source); vertexcheck.add(top.target); while (!linksort.isEmpty()) { ModularityLink nlnk = linksort.poll(); if (nlnk.q.intValue() < 0) continue; if (vertexcheck.contains(nlnk.source) || vertexcheck.contains(nlnk.target)) continue; maximalmatching.add(nlnk); vertexcheck.add(nlnk.source); vertexcheck.add(nlnk.target); } //Now we take all the pairs in maximal matching and fuse them for (ModularityLink ln : maximalmatching) { ModularityNode so = ln.source; ModularityNode tr = ln.target; so.assimilate(tr); groups.remove(tr); links.remove(ln); } linksize = links.size(); if (linksize == 1) notterminate = false; } /* final List<Future<?>> futures = new ArrayList<Future<?>>(); Future<?> foo = executor.submit(new Runnable(){ @Override public void run() { }}); futures.add(foo); */ clusterSet = new ArrayList<Set<Node>>(); for (ModularityNode g : groups) { if (cancel) { setStatusWaiting(); return; } Set<Node> set = new HashSet<Node>(); clusterSet.add(set); for (Node n : g.nodes) { if (cancel) { setStatusWaiting(); return; } set.add(n); } } if (clusterer != null) { graphResult = clusterer.convertClusterSet(clusterSet); } stopWatch.stop(); System.out.println("Finished Modularity clustering algorithm."); System.out.println("Algorithm took " + stopWatch.toString());//30 = 33.487 stopWatch.reset(); this.result = result; }
From source file:com.mobiperf.MeasurementScheduler.java
/** * Update the schedule based on a set of tasks from the server. * <p>/* w w w. j av a 2s . co m*/ * The current tasks to schedule are in a hash table indexed by a unique task key. * <p> * Remove all tasks from the schedule that are not in the new list or that have changed. * Then, add all tasks from the new list that were not in the schedule, or have changed. * Then, the schedule will match the one in the server, and unchanged tasks are left as they are. * * <p> * If the state has changed and the schedule was received from the server, save it to disk * so it can be recovered in case of a crash. * * @param newTasks List of MeasurementTasks from the server * @param reLoad if it's True, we're loading from disk: don't adjust frequencies or save to disk again. */ private void updateSchedule(List<MeasurementTask> newTasks, boolean reLoad) { // Keep track of what tasks need to be added. // Altered tasks are removed and then added, so they go here too Vector<MeasurementTask> tasksToAdd = new Vector<MeasurementTask>(); // Keep track of what keys are not being used. Remove keys from this as // you find they are in use. Set<String> missingKeys = new HashSet<String>(currentSchedule.keySet()); Set<String> keysToRemove = new HashSet<String>(); Logger.i("Attempting to add new tasks"); for (MeasurementTask newTask : newTasks) { // Adjust the frequency of the new task, based on the selected data consumption profile, // or ignore it if the task is disabled for this profile. // If we are loading again, don't re-adjust task frequencies. if (!reLoad) { if (!adjustInterval(newTask)) { continue; } } String newKey = newTask.getDescription().key; if (!missingKeys.contains(newKey)) { tasksToAdd.add(newTask); } else { // check for changes. If any parameter changes, it counts as a change. if (!currentSchedule.get(newKey).getDescription().equals(newTask.getDescription())) { // If there's a change, replace the task with the new task from the server keysToRemove.add(newKey); tasksToAdd.add(newTask); } // We've seen the task missingKeys.remove(newKey); } } // scheduleKeys now contain all keys that do not exist keysToRemove.addAll(missingKeys); // Add all new tasks, and copy all unmodified tasks, to a new queue. // Also update currentSchedule accordingly. PriorityBlockingQueue<MeasurementTask> newQueue = new PriorityBlockingQueue<MeasurementTask>( Config.MAX_TASK_QUEUE_SIZE, new TaskComparator()); synchronized (currentSchedule) { Logger.i("Tasks to remove:" + keysToRemove.size()); for (MeasurementTask task : this.taskQueue) { String taskKey = task.getDescription().key; if (!keysToRemove.contains(taskKey)) { newQueue.add(task); } else { Logger.w("Removing task with key" + taskKey); // Also need to keep our master schedule up to date currentSchedule.remove(taskKey); } } this.taskQueue = newQueue; // add all new tasks Logger.i("New tasks added:" + tasksToAdd.size()); for (MeasurementTask task : tasksToAdd) { submitTask(task); currentSchedule.put(task.getDescription().key, task); } } if (!reLoad && (!tasksToAdd.isEmpty() || !keysToRemove.isEmpty())) { saveSchedulerState(); } }