List of usage examples for java.util LinkedList removeAll
boolean removeAll(Collection<?> c);
From source file:com.waz.zclient.pages.extendedcursor.emoji.EmojiKeyboardLayout.java
private List<String> getFilteredList(String[] strings, Set<String> unsupported) { List<String> list = Arrays.asList(strings); if (unsupported == null || unsupported.size() == 0) { return list; }/*from ww w.j av a 2 s .c o m*/ LinkedList<String> filteredList = new LinkedList<>(list); filteredList.removeAll(unsupported); return filteredList; }
From source file:org.duracloud.account.app.controller.AccountGroupsController.java
private Collection<DuracloudUser> getAvailableUsers(AccountService as, Collection<DuracloudUser> groupUsers) { Set<DuracloudUser> allUsers = as.getUsers(); LinkedList<DuracloudUser> list = new LinkedList<DuracloudUser>(); list.addAll(allUsers);// www. j a va 2s.c om for (DuracloudUser user : allUsers) { if (user.isRoot()) { list.remove(user); } } if (groupUsers != null) { list.removeAll(groupUsers); } Collections.sort(list, USERNAME_COMPARATOR); return list; }
From source file:com.linkedin.pinot.controller.helix.core.rebalance.ReplicaGroupRebalanceSegmentStrategy.java
/** * Uniformly distribute segments across servers in a replica group. It adopts a simple algorithm that pre-computes * the number of segments per server after rebalance and tries to assign/remove segments to/from a server until it * becomes to have the correct number of segments. * * @param serversInReplicaGroup A list of servers within the same replica group * @param serverToSegments A Mapping of servers to their segments *//*from w w w.j a va 2 s. c om*/ private void rebalanceReplicaGroup(List<String> serversInReplicaGroup, Map<String, LinkedList<String>> serverToSegments, Set<String> segmentsToCover) { // Make sure that all the segments are covered only once within a replica group. Set<String> currentCoveredSegments = new HashSet<>(); for (String server : serversInReplicaGroup) { Iterator<String> segmentIter = serverToSegments.get(server).iterator(); while (segmentIter.hasNext()) { String segment = segmentIter.next(); if (currentCoveredSegments.contains(segment)) { segmentIter.remove(); } else { currentCoveredSegments.add(segment); } } } // Compute the segments to add LinkedList<String> segmentsToAdd = new LinkedList<>(segmentsToCover); segmentsToAdd.removeAll(currentCoveredSegments); // Compute the number of segments per server after rebalance than numSegmentsPerServer int numSegmentsPerServer = segmentsToCover.size() / serversInReplicaGroup.size(); // Remove segments from servers that has more segments for (String server : serversInReplicaGroup) { LinkedList<String> segmentsInServer = serverToSegments.get(server); int segmentToMove = numSegmentsPerServer - segmentsInServer.size(); if (segmentToMove < 0) { // Server has more segments than needed, remove segments from this server for (int i = 0; i < Math.abs(segmentToMove); i++) { segmentsToAdd.add(segmentsInServer.pop()); } } } // Add segments to servers that has less segments than numSegmentsPerServer for (String server : serversInReplicaGroup) { LinkedList<String> segmentsInServer = serverToSegments.get(server); int segmentToMove = numSegmentsPerServer - segmentsInServer.size(); if (segmentToMove > 0) { // Server has less segments than needed, add segments from this server for (int i = 0; i < segmentToMove; i++) { segmentsInServer.add(segmentsToAdd.pop()); } } } // Handling the remainder of segments to add int count = 0; while (!segmentsToAdd.isEmpty()) { int serverIndex = count % serversInReplicaGroup.size(); serverToSegments.get(serversInReplicaGroup.get(serverIndex)).add(segmentsToAdd.pop()); count++; } }
From source file:org.alfresco.repo.rendition.StandardRenditionLocationResolverImpl.java
private RenditionLocationImpl findOrCreateTemplatedPath(NodeRef sourceNode, String path, NodeRef companyHome) { if (log.isDebugEnabled()) { StringBuilder msg = new StringBuilder(); msg.append("FindOrCreateTemplatedPath for ").append(sourceNode).append(", ").append(path); log.debug(msg.toString());//w w w . ja v a 2 s.com } NodeService nodeService = serviceRegistry.getNodeService(); List<String> pathElements = Arrays.asList(path.split("/")); LinkedList<String> folderElements = new LinkedList<String>(pathElements); // We need to strip out any empty strings within the path elements. // prior to passing this path to the fileFolderService for creation. // e.g. "//foo//bar///item.txt" would cause an exception. folderElements.removeAll(Arrays.asList(new String[] { "" })); // Remove 'Company Home' if it is at the start of the path. Serializable companyHomeName = nodeService.getProperty(companyHome, ContentModel.PROP_NAME); if (folderElements.getFirst().equals(companyHomeName)) { folderElements.removeFirst(); } String fileName = folderElements.removeLast(); if (fileName == null || fileName.length() == 0) { StringBuilder msg = new StringBuilder(); msg.append("The path must include a valid filename! Path: ").append(path); if (log.isDebugEnabled()) { log.debug(msg.toString()); } throw new RenditionServiceException(msg.toString()); } FileFolderService fileFolderService = serviceRegistry.getFileFolderService(); NodeRef parent = companyHome; if (!folderElements.isEmpty()) { FileInfo parentInfo = FileFolderUtil.makeFolders(fileFolderService, companyHome, folderElements, ContentModel.TYPE_FOLDER); parent = parentInfo.getNodeRef(); } if (log.isDebugEnabled()) { log.debug("folderElements: " + folderElements); log.debug("parent: " + parent); log.debug(" " + nodeService.getType(parent) + " " + nodeService.getPath(parent)); log.debug("fileName: " + fileName); } NodeRef child = fileFolderService.searchSimple(parent, fileName); if (log.isDebugEnabled()) { StringBuilder msg = new StringBuilder(); msg.append("RenditionLocation parent=").append(parent).append(", child=").append(child) .append(", fileName=").append(fileName); log.debug(msg.toString()); if (child != null) { log.debug("child path = " + nodeService.getPath(child)); } } return new RenditionLocationImpl(parent, child, fileName); }
From source file:fr.inria.oak.paxquery.common.xml.navigation.NavigationTreePattern.java
/** * This method will return an {@link ArrayList} containing 3 strings each representing * a transcription of the TreePattern into a aquax expression. * The first string will contain the only the needed nodes from the pattern. * The second string will contain all the nodes in the pattern. * The third string will have the needed nodes as mandatory and the others as optional. * //from w w w . ja va 2s . c o m * @return an {@link ArrayList} containing 3 {@link String} objects with the significance described above. */ public ArrayList<String> convertToAquax() { // the list that will contain the aquax form for the pattern ArrayList<String> aquaxTranslations = new ArrayList<String>(); LinkedList<NavigationTreePatternNode> optionalNodes = this.getNodes(); LinkedList<NavigationTreePatternNode> neededNodes = new LinkedList<NavigationTreePatternNode>(); for (NavigationTreePatternNode node : optionalNodes) { if (node.nodeStoresSomething()) { neededNodes.add(node); } } optionalNodes.removeAll(neededNodes); // creating the first string that contains all the needed Nodes StringBuffer sb = new StringBuffer(), sb2 = new StringBuffer(); for (NavigationTreePatternNode n : neededNodes) { if (n.getParentEdge().n1.getNodeCode() == -1) sb.append(n.getNodeCode() + ":" + 0 + ":m " + n.getTag() + " "); else sb.append(n.getNodeCode() + ":" + n.getParentEdge().n1.getNodeCode() + ":m " + n.getTag() + " "); } aquaxTranslations.add(sb.toString()); sb2.append(sb); // second and third strings containing both the needed and the other nodes, in one as mandatory, in the other as optional for (NavigationTreePatternNode n : optionalNodes) { if (n.getParentEdge().n1.getNodeCode() == -1) { sb.append(n.getNodeCode() + ":" + 0 + ":m " + n.getTag() + " "); sb2.append(n.getNodeCode() + ":" + 0 + ":o " + n.getTag() + " "); } else { sb.append(n.getNodeCode() + ":" + n.getParentEdge().n1.getNodeCode() + ":m " + n.getTag() + " "); sb2.append(n.getNodeCode() + ":" + n.getParentEdge().n1.getNodeCode() + ":o " + n.getTag() + " "); } } aquaxTranslations.add(sb.toString()); aquaxTranslations.add(sb2.toString()); return aquaxTranslations; }
From source file:org.apache.hadoop.hbase.util.RegionSplitter.java
static void rollingSplit(String tableName, SplitAlgorithm splitAlgo, Configuration conf) throws IOException, InterruptedException { final int minOS = conf.getInt("split.outstanding", 2); HTable table = new HTable(conf, tableName); // max outstanding splits. default == 50% of servers final int MAX_OUTSTANDING = Math.max(table.getConnection().getCurrentNrHRS() / 2, minOS); Path hbDir = FSUtils.getRootDir(conf); Path tableDir = FSUtils.getTableDir(hbDir, table.getName()); Path splitFile = new Path(tableDir, "_balancedSplit"); FileSystem fs = FileSystem.get(conf); // get a list of daughter regions to create LinkedList<Pair<byte[], byte[]>> tmpRegionSet = getSplits(table, splitAlgo); LinkedList<Pair<byte[], byte[]>> outstanding = Lists.newLinkedList(); int splitCount = 0; final int origCount = tmpRegionSet.size(); // all splits must compact & we have 1 compact thread, so 2 split // requests to the same RS can stall the outstanding split queue. // To fix, group the regions into an RS pool and round-robin through it LOG.debug("Bucketing regions by regionserver..."); TreeMap<String, LinkedList<Pair<byte[], byte[]>>> daughterRegions = Maps.newTreeMap(); for (Pair<byte[], byte[]> dr : tmpRegionSet) { String rsLocation = table.getRegionLocation(dr.getSecond()).getHostnamePort(); if (!daughterRegions.containsKey(rsLocation)) { LinkedList<Pair<byte[], byte[]>> entry = Lists.newLinkedList(); daughterRegions.put(rsLocation, entry); }//w ww . j a v a 2 s. com daughterRegions.get(rsLocation).add(dr); } LOG.debug("Done with bucketing. Split time!"); long startTime = System.currentTimeMillis(); // open the split file and modify it as splits finish FSDataInputStream tmpIn = fs.open(splitFile); byte[] rawData = new byte[tmpIn.available()]; tmpIn.readFully(rawData); tmpIn.close(); FSDataOutputStream splitOut = fs.create(splitFile); splitOut.write(rawData); try { // *** split code *** while (!daughterRegions.isEmpty()) { LOG.debug(daughterRegions.size() + " RS have regions to splt."); // Get RegionServer : region count mapping final TreeMap<ServerName, Integer> rsSizes = Maps.newTreeMap(); Map<HRegionInfo, ServerName> regionsInfo = table.getRegionLocations(); for (ServerName rs : regionsInfo.values()) { if (rsSizes.containsKey(rs)) { rsSizes.put(rs, rsSizes.get(rs) + 1); } else { rsSizes.put(rs, 1); } } // sort the RS by the number of regions they have List<String> serversLeft = Lists.newArrayList(daughterRegions.keySet()); Collections.sort(serversLeft, new Comparator<String>() { public int compare(String o1, String o2) { return rsSizes.get(o1).compareTo(rsSizes.get(o2)); } }); // round-robin through the RS list. Choose the lightest-loaded servers // first to keep the master from load-balancing regions as we split. for (String rsLoc : serversLeft) { Pair<byte[], byte[]> dr = null; // find a region in the RS list that hasn't been moved LOG.debug("Finding a region on " + rsLoc); LinkedList<Pair<byte[], byte[]>> regionList = daughterRegions.get(rsLoc); while (!regionList.isEmpty()) { dr = regionList.pop(); // get current region info byte[] split = dr.getSecond(); HRegionLocation regionLoc = table.getRegionLocation(split); // if this region moved locations String newRs = regionLoc.getHostnamePort(); if (newRs.compareTo(rsLoc) != 0) { LOG.debug("Region with " + splitAlgo.rowToStr(split) + " moved to " + newRs + ". Relocating..."); // relocate it, don't use it right now if (!daughterRegions.containsKey(newRs)) { LinkedList<Pair<byte[], byte[]>> entry = Lists.newLinkedList(); daughterRegions.put(newRs, entry); } daughterRegions.get(newRs).add(dr); dr = null; continue; } // make sure this region wasn't already split byte[] sk = regionLoc.getRegionInfo().getStartKey(); if (sk.length != 0) { if (Bytes.equals(split, sk)) { LOG.debug("Region already split on " + splitAlgo.rowToStr(split) + ". Skipping this region..."); ++splitCount; dr = null; continue; } byte[] start = dr.getFirst(); Preconditions.checkArgument(Bytes.equals(start, sk), splitAlgo.rowToStr(start) + " != " + splitAlgo.rowToStr(sk)); } // passed all checks! found a good region break; } if (regionList.isEmpty()) { daughterRegions.remove(rsLoc); } if (dr == null) continue; // we have a good region, time to split! byte[] split = dr.getSecond(); LOG.debug("Splitting at " + splitAlgo.rowToStr(split)); HBaseAdmin admin = new HBaseAdmin(table.getConfiguration()); admin.split(table.getTableName(), split); LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList(); if (conf.getBoolean("split.verify", true)) { // we need to verify and rate-limit our splits outstanding.addLast(dr); // with too many outstanding splits, wait for some to finish while (outstanding.size() >= MAX_OUTSTANDING) { finished = splitScan(outstanding, table, splitAlgo); if (finished.isEmpty()) { Thread.sleep(30 * 1000); } else { outstanding.removeAll(finished); } } } else { finished.add(dr); } // mark each finished region as successfully split. for (Pair<byte[], byte[]> region : finished) { splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); splitCount++; if (splitCount % 10 == 0) { long tDiff = (System.currentTimeMillis() - startTime) / splitCount; LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount + ". Avg Time / Split = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); } } } } if (conf.getBoolean("split.verify", true)) { while (!outstanding.isEmpty()) { LinkedList<Pair<byte[], byte[]>> finished = splitScan(outstanding, table, splitAlgo); if (finished.isEmpty()) { Thread.sleep(30 * 1000); } else { outstanding.removeAll(finished); for (Pair<byte[], byte[]> region : finished) { splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); } } } } LOG.debug("All regions have been successfully split!"); } finally { long tDiff = System.currentTimeMillis() - startTime; LOG.debug("TOTAL TIME = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); LOG.debug("Splits = " + splitCount); LOG.debug("Avg Time / Split = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount)); splitOut.close(); if (table != null) { table.close(); } } fs.delete(splitFile, false); }
From source file:edu.cornell.med.icb.clustering.QTClusterer.java
/** * Groups instances into clusters. Returns the indices of the instances * that belong to a cluster as an int array in the list result. * * @param calculator The/*from w w w.j a v a 2 s. c o m*/ * {@link edu.cornell.med.icb.clustering.SimilarityDistanceCalculator} * that should be used when clustering * @param qualityThreshold The QT clustering algorithm quality threshold (d) * @return The list of clusters. */ public List<int[]> cluster(final SimilarityDistanceCalculator calculator, final double qualityThreshold) { final ProgressLogger clusterProgressLogger = new ProgressLogger(LOGGER, logInterval, "instances clustered"); clusterProgressLogger.displayFreeMemory = true; clusterProgressLogger.expectedUpdates = instanceCount; clusterProgressLogger.start("Starting to cluster " + instanceCount + " instances using " + parallelTeam.getThreadCount() + " threads."); // reset cluster results clusterCount = 0; // instanceList is the set "G" to cluster final LinkedList<Integer> instanceList = new LinkedList<Integer>(); for (int i = 0; i < instanceCount; i++) { clusters[i].clear(); // set each node in the instance list to it's // original position in the source data array instanceList.add(i); } final double ignoreDistance = calculator.getIgnoreDistance(); // eliminate any instances that will never cluster with anything else final IntList singletonClusters = identifySingletonClusters(calculator, qualityThreshold, instanceList, clusterProgressLogger); final ProgressLogger innerLoopProgressLogger = new ProgressLogger(LOGGER, logInterval, "inner loop iterations"); innerLoopProgressLogger.displayFreeMemory = false; final ProgressLogger outerLoopProgressLogger = new ProgressLogger(LOGGER, logInterval, "outer loop iterations"); outerLoopProgressLogger.displayFreeMemory = true; try { // loop over instances until they have all been added to a cluster while (!instanceList.isEmpty()) { // cluster remaining instances to find the maximum cardinality for (int i = 0; i < instanceList.size(); i++) { candidateClusters[i].clear(); } if (logOuterLoopProgress) { outerLoopProgressLogger.expectedUpdates = instanceList.size(); outerLoopProgressLogger.start("Entering outer loop for " + instanceList.size() + " iterations"); } // for each i in G (instance list) // find instance j such that distance i,j minimum parallelTeam.execute(new ParallelRegion() { // NOPMD @Override public void run() throws Exception { // NOPMD // each thread will populate a different portion of the "candidateCluster" // array so we shouldn't need to worry about concurrent access execute(0, instanceList.size() - 1, new IntegerForLoop() { @Override public void run(final int first, final int last) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("first = " + first + ", last = " + last); } for (int i = first; i <= last; i++) { @SuppressWarnings("unchecked") final LinkedList<Integer> notClustered = (LinkedList<Integer>) instanceList .clone(); // add the first instance to the next candidate cluster final IntArrayList candidateCluster = candidateClusters[i]; candidateCluster.add(notClustered.remove(i)); if (logInnerLoopProgress) { innerLoopProgressLogger.expectedUpdates = notClustered.size(); innerLoopProgressLogger.start( "Entering inner loop for " + notClustered.size() + " iterations"); } // cluster the remaining instances to find the maximum // cardinality find instance j such that distance i,j minimum boolean done = false; while (!done && !notClustered.isEmpty()) { // find the node that has minimum distance between the // current cluster and the instances that have not yet // been clustered. double minDistance = Double.POSITIVE_INFINITY; int minDistanceInstanceIndex = 0; int instanceIndex = 0; for (final int instance : notClustered) { double newDistance = ignoreDistance; final int[] cluster = candidateCluster.elements(); for (int instanceInCluster = 0; instanceInCluster < candidateCluster .size(); instanceInCluster++) { final double a = calculator.distance(cluster[instanceInCluster], instance); // if the distance of the instance will force the candidate cluster // to be larger than the cutoff value, we can stop here // because we know that this candidate cluster will be too large if (a >= minDistance) { newDistance = ignoreDistance; break; } final double b = newDistance; // This code is inlined from java.lang.Math.max(a, b) if (a != a) { // a is NaN newDistance = a; } else if (a == 0.0d && b == 0.0d && Double.doubleToLongBits(a) == negativeZeroDoubleBits) { newDistance = b; } else if (a >= b) { newDistance = a; } else { newDistance = b; } } if (newDistance != ignoreDistance && newDistance < minDistance) { minDistance = newDistance; minDistanceInstanceIndex = instanceIndex; } instanceIndex++; } // grow clusters until min distance between new instance // and cluster reaches quality threshold // if (diameter(Ai U {j}) > d) if (minDistance > qualityThreshold) { done = true; } else { // remove the instance from the ones to be considered final int instance = notClustered.remove(minDistanceInstanceIndex); // and add it to the newly formed cluster candidateCluster.add(instance); } if (logInnerLoopProgress) { innerLoopProgressLogger.update(); } } if (logInnerLoopProgress) { innerLoopProgressLogger.stop("Inner loop completed."); } if (logOuterLoopProgress) { outerLoopProgressLogger.update(); } } } }); } }); if (logOuterLoopProgress) { outerLoopProgressLogger.stop("Outer loop completed."); } // identify cluster (set C) with maximum cardinality int maxCardinality = 0; int selectedClusterIndex = -1; for (int i = 0; i < instanceList.size(); i++) { final int size = candidateClusters[i].size(); if (LOGGER.isTraceEnabled() && size > 0) { LOGGER.trace("potential cluster " + i + ": " + ArrayUtils.toString(candidateClusters[i])); } if (size > maxCardinality) { maxCardinality = size; selectedClusterIndex = i; } } final IntArrayList selectedCluster = candidateClusters[selectedClusterIndex]; if (LOGGER.isTraceEnabled()) { LOGGER.trace("adding " + selectedCluster.size() + " instances to cluster " + clusterCount); } // and add that cluster to the final result clusters[clusterCount].addAll(selectedCluster); // remove instances in cluster C so they are no longer considered instanceList.removeAll(selectedCluster); if (logClusterProgress) { final int selectedClusterSize = selectedCluster.size(); int i = 0; while (i < selectedClusterSize - 1) { clusterProgressLogger.lightUpdate(); i++; } // make sure there is at least one "full" update per loop if (i < selectedClusterSize) { clusterProgressLogger.update(); } } // we just created a new cluster clusterCount++; // next iteration is over (G - C) } } catch (RuntimeException e) { LOGGER.error("Caught runtime exception - rethrowing", e); throw e; } catch (Exception e) { LOGGER.error("Caught exception - rethrowing as ClusteringException", e); throw new ClusteringException(e); } // add singleton clusters to the end so the largest clusters are at the start of the list for (final int singleton : singletonClusters) { clusters[clusterCount].add(singleton); clusterCount++; } clusterProgressLogger.stop("Clustering completed."); return getClusters(); }