List of usage examples for java.util LinkedList remove
public E remove(int index)
From source file:org.apache.tajo.scheduler.FairScheduler.java
@Override public void notifyQueryStop(QueryId queryId) { synchronized (queues) { QuerySchedulingInfo runningQuery = runningQueries.remove(queryId); String queueName = queryAssignedMap.remove(queryId); LinkedList<QuerySchedulingInfo> queue = queues.get(queueName); if (queue == null) { LOG.error("Can't get queue from multiple queue: " + queryId.toString() + ", queue=" + queueName); return; }// w w w . j a va2 s . c om // If the query is a waiting query, remove from a queue. LOG.info(queryId.toString() + " is not a running query. Removing from queue."); QuerySchedulingInfo stoppedQuery = null; for (QuerySchedulingInfo eachQuery : queue) { if (eachQuery.getQueryId().equals(queryId)) { stoppedQuery = eachQuery; break; } } if (stoppedQuery != null) { queue.remove(stoppedQuery); } else { LOG.error("No query info in the queue: " + queryId + ", queue=" + queueName); return; } } wakeupProcessor(); }
From source file:org.openanzo.test.QueryTestSuiteBase.java
private void assertTupleQueryResultEquals(MutableTupleQueryResult expected, SolutionSet actual, boolean ordered, boolean allowAnagrams) throws Exception { // this is laxer than it need be: SELECT * allows names in any order, but SELECT ?x ?y ?z mandates // the order//from w w w.ja va 2s .com Iterator<PatternSolution> actualIter = actual.iterator(); assertEquals(new HashSet<String>(expected.getBindingNames()), new HashSet<String>(actual.getBindingNames())); if (ordered) { // if it's ordered, we can just walk the lists in parallel expected.beforeFirst(); while (expected.hasNext()) { assertTrue(actualIter.hasNext()); BindingSet expectedPatternSolution = expected.next(); PatternSolution actualPatternSolution = actualIter.next(); assertTrue("Expected solution does not equal actual solution:\n\t" + converter.convert(expectedPatternSolution) + "\n\t\tvs.\n\t" + actualPatternSolution, areSolutionsEqual(converter.convert(expectedPatternSolution), actualPatternSolution, allowAnagrams)); } assertFalse(actualIter.hasNext()); } else { // otherwise, we can just build bags from each solution set and compare them //assertEquals(tqr2bag(expected), tqr2bag(actual)); // this doesn't work because different implementations have different .hashCodes, so // we do this the naive way LinkedList<PatternSolution> actualBindings = new LinkedList<PatternSolution>(); while (actualIter.hasNext()) actualBindings.add(actualIter.next()); int expectedCount = 0; expected.beforeFirst(); while (expected.hasNext()) { expectedCount++; BindingSet e = expected.next(); PatternSolution expect = converter.convert(e); boolean found = false; for (PatternSolution a : actualBindings) { if (areSolutionsEqual(expect, a, allowAnagrams)) { actualBindings.remove(a); found = true; break; } } assertTrue("Did not find expected bindings: \n\t" + expect + "\nActual bindings are:\n\t" + actualBindings, found); } assertEquals("Actual bindings found that were not expected. Expected were: \n\t[" + StringUtils.join(convertResults(expected), ", ") + "]\nActual bindings are:\n\t" + actualBindings, 0, actualBindings.size()); } }
From source file:com.google.ie.business.service.impl.ProjectServiceImpl.java
@SuppressWarnings("unchecked") private void addIdeaToRecentlyPickedIdeaListInCache(Idea ideaToBeAdded) { /* Get the list of recently picked ideas from cache */ LinkedList<Idea> ideas = (LinkedList<Idea>) CacheHelper.getObject(CacheConstants.IDEA_NAMESPACE, CacheConstants.RECENTLY_PICKED_IDEAS); if (ideas != null) { Iterator<Idea> iterator = ideas.iterator(); Idea ideaFromCache = null;//ww w .j a va 2s . c o m /* Iterate to check whether the list already contains the idea */ while (iterator.hasNext()) { ideaFromCache = iterator.next(); String ideaKey = ideaFromCache.getKey(); if (ideaKey.equalsIgnoreCase(ideaToBeAdded.getKey())) { /* * If idea already exists in the list , move it to the head */ ideas.remove(ideaFromCache); ideas.addFirst(ideaFromCache); CacheHelper.putObject(CacheConstants.IDEA_NAMESPACE, CacheConstants.RECENTLY_PICKED_IDEAS, ideas, CacheConstants.RECENTLY_PICKED_IDEAS_EXPIRATION_DELAY); return; } } } /* This is executed if the idea does not already exist in cache */ ideaService.addIdeaToListInCache(ideaToBeAdded, CacheConstants.RECENTLY_PICKED_IDEAS, DEFAULT_NO_OF_RECENT_PROJECTS, CacheConstants.RECENTLY_PICKED_IDEAS_EXPIRATION_DELAY); }
From source file:com.espertech.esper.regression.pattern.TestCronParameter.java
private boolean compareLists(EventBean[] receivedResults, LinkedList<EventDescriptor> expectedResults) { int receivedSize = (receivedResults == null) ? 0 : receivedResults.length; if (expectedResults.size() != receivedSize) { return false; }//from w ww . jav a2 s . com // To make sure all received events have been expected LinkedList<EventDescriptor> expectedResultsClone = new LinkedList<EventDescriptor>(expectedResults); // Go through the list of expected results and remove from received result list if found for (EventDescriptor desc : expectedResults) { EventDescriptor foundMatch = null; for (EventBean received : receivedResults) { if (compareEvents(desc, received)) { foundMatch = desc; break; } } // No match between expected and received if (foundMatch == null) { return false; } expectedResultsClone.remove(foundMatch); } // Any left over received results also invalidate the test if (expectedResultsClone.size() > 0) { return false; } return true; }
From source file:org.apache.hadoop.hbase.util.RegionSplitter.java
static LinkedList<Pair<byte[], byte[]>> splitScan(LinkedList<Pair<byte[], byte[]>> regionList, HTable table, SplitAlgorithm splitAlgo) throws IOException, InterruptedException { LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList(); LinkedList<Pair<byte[], byte[]>> logicalSplitting = Lists.newLinkedList(); LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList(); // get table info Path rootDir = FSUtils.getRootDir(table.getConfiguration()); Path tableDir = FSUtils.getTableDir(rootDir, table.getName()); FileSystem fs = tableDir.getFileSystem(table.getConfiguration()); HTableDescriptor htd = table.getTableDescriptor(); // clear the cache to forcibly refresh region information table.clearRegionCache();// ww w . j a v a 2s .co m // for every region that hasn't been verified as a finished split for (Pair<byte[], byte[]> region : regionList) { byte[] start = region.getFirst(); byte[] split = region.getSecond(); // see if the new split daughter region has come online try { HRegionInfo dri = table.getRegionLocation(split).getRegionInfo(); if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) { logicalSplitting.add(region); continue; } } catch (NoServerForRegionException nsfre) { // NSFRE will occur if the old hbase:meta entry has no server assigned LOG.info(nsfre); logicalSplitting.add(region); continue; } try { // when a daughter region is opened, a compaction is triggered // wait until compaction completes for both daughter regions LinkedList<HRegionInfo> check = Lists.newLinkedList(); check.add(table.getRegionLocation(start).getRegionInfo()); check.add(table.getRegionLocation(split).getRegionInfo()); for (HRegionInfo hri : check.toArray(new HRegionInfo[] {})) { byte[] sk = hri.getStartKey(); if (sk.length == 0) sk = splitAlgo.firstRow(); String startKey = splitAlgo.rowToStr(sk); HRegionFileSystem regionFs = HRegionFileSystem .openRegionFromFileSystem(table.getConfiguration(), fs, tableDir, hri, true); // check every Column Family for that region boolean refFound = false; for (HColumnDescriptor c : htd.getFamilies()) { if ((refFound = regionFs.hasReferences(htd.getTableName().getNameAsString()))) { break; } } // compaction is completed when all reference files are gone if (!refFound) { check.remove(hri); } } if (check.isEmpty()) { finished.add(region); } else { physicalSplitting.add(region); } } catch (NoServerForRegionException nsfre) { LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start)); physicalSplitting.add(region); table.clearRegionCache(); } } LOG.debug("Split Scan: " + finished.size() + " finished / " + logicalSplitting.size() + " split wait / " + physicalSplitting.size() + " reference wait"); return finished; }
From source file:edu.cornell.med.icb.clustering.QTClusterer.java
/** * Groups instances into clusters. Returns the indices of the instances * that belong to a cluster as an int array in the list result. * * @param calculator The/*from w w w .j a v a 2 s . co m*/ * {@link edu.cornell.med.icb.clustering.SimilarityDistanceCalculator} * that should be used when clustering * @param qualityThreshold The QT clustering algorithm quality threshold (d) * @return The list of clusters. */ public List<int[]> cluster(final SimilarityDistanceCalculator calculator, final double qualityThreshold) { final ProgressLogger clusterProgressLogger = new ProgressLogger(LOGGER, logInterval, "instances clustered"); clusterProgressLogger.displayFreeMemory = true; clusterProgressLogger.expectedUpdates = instanceCount; clusterProgressLogger.start("Starting to cluster " + instanceCount + " instances using " + parallelTeam.getThreadCount() + " threads."); // reset cluster results clusterCount = 0; // instanceList is the set "G" to cluster final LinkedList<Integer> instanceList = new LinkedList<Integer>(); for (int i = 0; i < instanceCount; i++) { clusters[i].clear(); // set each node in the instance list to it's // original position in the source data array instanceList.add(i); } final double ignoreDistance = calculator.getIgnoreDistance(); // eliminate any instances that will never cluster with anything else final IntList singletonClusters = identifySingletonClusters(calculator, qualityThreshold, instanceList, clusterProgressLogger); final ProgressLogger innerLoopProgressLogger = new ProgressLogger(LOGGER, logInterval, "inner loop iterations"); innerLoopProgressLogger.displayFreeMemory = false; final ProgressLogger outerLoopProgressLogger = new ProgressLogger(LOGGER, logInterval, "outer loop iterations"); outerLoopProgressLogger.displayFreeMemory = true; try { // loop over instances until they have all been added to a cluster while (!instanceList.isEmpty()) { // cluster remaining instances to find the maximum cardinality for (int i = 0; i < instanceList.size(); i++) { candidateClusters[i].clear(); } if (logOuterLoopProgress) { outerLoopProgressLogger.expectedUpdates = instanceList.size(); outerLoopProgressLogger.start("Entering outer loop for " + instanceList.size() + " iterations"); } // for each i in G (instance list) // find instance j such that distance i,j minimum parallelTeam.execute(new ParallelRegion() { // NOPMD @Override public void run() throws Exception { // NOPMD // each thread will populate a different portion of the "candidateCluster" // array so we shouldn't need to worry about concurrent access execute(0, instanceList.size() - 1, new IntegerForLoop() { @Override public void run(final int first, final int last) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("first = " + first + ", last = " + last); } for (int i = first; i <= last; i++) { @SuppressWarnings("unchecked") final LinkedList<Integer> notClustered = (LinkedList<Integer>) instanceList .clone(); // add the first instance to the next candidate cluster final IntArrayList candidateCluster = candidateClusters[i]; candidateCluster.add(notClustered.remove(i)); if (logInnerLoopProgress) { innerLoopProgressLogger.expectedUpdates = notClustered.size(); innerLoopProgressLogger.start( "Entering inner loop for " + notClustered.size() + " iterations"); } // cluster the remaining instances to find the maximum // cardinality find instance j such that distance i,j minimum boolean done = false; while (!done && !notClustered.isEmpty()) { // find the node that has minimum distance between the // current cluster and the instances that have not yet // been clustered. double minDistance = Double.POSITIVE_INFINITY; int minDistanceInstanceIndex = 0; int instanceIndex = 0; for (final int instance : notClustered) { double newDistance = ignoreDistance; final int[] cluster = candidateCluster.elements(); for (int instanceInCluster = 0; instanceInCluster < candidateCluster .size(); instanceInCluster++) { final double a = calculator.distance(cluster[instanceInCluster], instance); // if the distance of the instance will force the candidate cluster // to be larger than the cutoff value, we can stop here // because we know that this candidate cluster will be too large if (a >= minDistance) { newDistance = ignoreDistance; break; } final double b = newDistance; // This code is inlined from java.lang.Math.max(a, b) if (a != a) { // a is NaN newDistance = a; } else if (a == 0.0d && b == 0.0d && Double.doubleToLongBits(a) == negativeZeroDoubleBits) { newDistance = b; } else if (a >= b) { newDistance = a; } else { newDistance = b; } } if (newDistance != ignoreDistance && newDistance < minDistance) { minDistance = newDistance; minDistanceInstanceIndex = instanceIndex; } instanceIndex++; } // grow clusters until min distance between new instance // and cluster reaches quality threshold // if (diameter(Ai U {j}) > d) if (minDistance > qualityThreshold) { done = true; } else { // remove the instance from the ones to be considered final int instance = notClustered.remove(minDistanceInstanceIndex); // and add it to the newly formed cluster candidateCluster.add(instance); } if (logInnerLoopProgress) { innerLoopProgressLogger.update(); } } if (logInnerLoopProgress) { innerLoopProgressLogger.stop("Inner loop completed."); } if (logOuterLoopProgress) { outerLoopProgressLogger.update(); } } } }); } }); if (logOuterLoopProgress) { outerLoopProgressLogger.stop("Outer loop completed."); } // identify cluster (set C) with maximum cardinality int maxCardinality = 0; int selectedClusterIndex = -1; for (int i = 0; i < instanceList.size(); i++) { final int size = candidateClusters[i].size(); if (LOGGER.isTraceEnabled() && size > 0) { LOGGER.trace("potential cluster " + i + ": " + ArrayUtils.toString(candidateClusters[i])); } if (size > maxCardinality) { maxCardinality = size; selectedClusterIndex = i; } } final IntArrayList selectedCluster = candidateClusters[selectedClusterIndex]; if (LOGGER.isTraceEnabled()) { LOGGER.trace("adding " + selectedCluster.size() + " instances to cluster " + clusterCount); } // and add that cluster to the final result clusters[clusterCount].addAll(selectedCluster); // remove instances in cluster C so they are no longer considered instanceList.removeAll(selectedCluster); if (logClusterProgress) { final int selectedClusterSize = selectedCluster.size(); int i = 0; while (i < selectedClusterSize - 1) { clusterProgressLogger.lightUpdate(); i++; } // make sure there is at least one "full" update per loop if (i < selectedClusterSize) { clusterProgressLogger.update(); } } // we just created a new cluster clusterCount++; // next iteration is over (G - C) } } catch (RuntimeException e) { LOGGER.error("Caught runtime exception - rethrowing", e); throw e; } catch (Exception e) { LOGGER.error("Caught exception - rethrowing as ClusteringException", e); throw new ClusteringException(e); } // add singleton clusters to the end so the largest clusters are at the start of the list for (final int singleton : singletonClusters) { clusters[clusterCount].add(singleton); clusterCount++; } clusterProgressLogger.stop("Clustering completed."); return getClusters(); }
From source file:net.sourceforge.seqware.pipeline.plugins.MetadataTest.java
@Test public void testListAllTables() { systemErr.println("Test List all Tables\n"); launchPlugin("--list-tables"); String output = getOut();/*from w ww . j av a 2 s .c o m*/ // fix up test to support basic workflow/run creation tools, see git commit 4862eaba7f3d7c7495155dc913ead745b544f358 String[] tables = new String[] { "TableName", "study", "experiment", "sample", "ius", "lane", "sequencer_run", "workflow", "workflow_run" }; LinkedList<String> stuff = new LinkedList(Arrays.asList(output.split("\n"))); for (String table : tables) { int index = stuff.indexOf(table); if (index >= 0) { stuff.remove(index); } else { Assert.fail("Missing a table:" + table); } } while (!stuff.isEmpty()) { String s = stuff.poll(); Assert.fail("There are extra tables listed: " + s); } }
From source file:org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.java
/** * Add new entries to the PendingUncached list. * * @param neededUncached The number of replicas that need to be uncached. * @param cachedBlock The block which needs to be uncached. * @param cached A list of DataNodes currently caching the block. * @param pendingUncached A list of DataNodes that will soon uncache the * block./* w ww. j a v a 2s. co m*/ */ private void addNewPendingUncached(int neededUncached, CachedBlock cachedBlock, List<DatanodeDescriptor> cached, List<DatanodeDescriptor> pendingUncached) { // Figure out which replicas can be uncached. LinkedList<DatanodeDescriptor> possibilities = new LinkedList<DatanodeDescriptor>(); for (DatanodeDescriptor datanode : cached) { if (!pendingUncached.contains(datanode)) { possibilities.add(datanode); } } while (neededUncached > 0) { if (possibilities.isEmpty()) { LOG.warn("Logic error: we're trying to uncache more replicas than " + "actually exist for " + cachedBlock); return; } DatanodeDescriptor datanode = possibilities.remove(random.nextInt(possibilities.size())); pendingUncached.add(datanode); boolean added = datanode.getPendingUncached().add(cachedBlock); assert added; neededUncached--; } }
From source file:org.nuxeo.ecm.platform.routing.core.impl.GraphRunner.java
protected void recursiveCancelInput(GraphRoute graph, GraphNode originalNode, LinkedList<GraphNode> pendingNodes) { LinkedList<GraphNode> todo = new LinkedList<GraphNode>(); todo.add(originalNode);//from w w w . jav a2 s . c om Set<String> done = new HashSet<String>(); while (!todo.isEmpty()) { GraphNode node = todo.pop(); done.add(node.getId()); for (Transition t : node.getInputTransitions()) { if (t.loop) { // don't recurse through loop transitions continue; } GraphNode source = t.source; if (done.contains(source.getId())) { // looping somewhere TODO check it's not happening continue; } source.setCanceled(); State state = source.getState(); source.setState(State.READY); pendingNodes.remove(node); if (state == State.SUSPENDED) { // we're suspended on a task, cancel it and stop recursion source.cancelTasks(); } else { // else recurse todo.add(source); } } } }
From source file:com.mine.psf.PsfPlaybackService.java
private void generateShuffleList() { shuffleList = new int[playList.length]; if (playShuffle) { // make a shuffle list // algro: get rand(), LinkedList<Integer> tmpList = new LinkedList<Integer>(); for (int i = 0; i < playList.length; ++i) { tmpList.add(i);/*from w w w .j av a 2 s . c o m*/ } Random r = new Random(); for (int i = 0; i < playList.length; ++i) { int tmp = r.nextInt(playList.length - i); shuffleList[i] = tmpList.get(tmp); tmpList.remove(tmp); } } else { for (int i = 0; i < playList.length; ++i) { shuffleList[i] = i; } } // StringBuilder sb = new StringBuilder(); // for (int i = 0; i < playList.length; ++i) { // sb.append(shuffleList[i]); // sb.append(","); // } // Log.d(LOGTAG, "GetShuffleList: " + sb.toString()); }