List of usage examples for java.util SortedSet removeAll
boolean removeAll(Collection<?> c);
From source file:org.jasig.schedassist.impl.owner.SpringJDBCAvailableScheduleDaoImpl.java
@Transactional @Override/*from w w w. j a v a 2s . c o m*/ public AvailableSchedule addToSchedule(final IScheduleOwner owner, final Set<AvailableBlock> blocks) { // retrieve existing schedule AvailableSchedule stored = retrieve(owner); // expand it to minimum possible size SortedSet<AvailableBlock> storedExpanded = AvailableBlockBuilder.expand(stored.getAvailableBlocks(), 1); // expand the argument to minimum possible size blocks SortedSet<AvailableBlock> blocksExpanded = AvailableBlockBuilder.expand(blocks, 1); // since AvailableBlock equals and hashCode ignore location, call remove first to get // rid of any blocks that have matching times storedExpanded.removeAll(blocksExpanded); // add the new blocks to the expanded set boolean modified = storedExpanded.addAll(blocksExpanded); if (modified) { replaceSchedule(owner, storedExpanded); } // retrieve the new complete schedule and return return retrieve(owner); }
From source file:net.pms.dlna.protocolinfo.DeviceProtocolInfo.java
/** * Removes all elements from the {@link Set} for the given * {@link DeviceProtocolInfoSource} that are also contained in * {@code collection}.// ww w . j a va2 s. c om * * @param type the {@link DeviceProtocolInfoSource} type. * @param collection a {@link Collection} containing the elements to be * removed. * @return {@code true} if this call resulted in a change. * * @see #remove(DeviceProtocolInfoSource, ProtocolInfo) * @see #remove(ProtocolInfo) * @see #contains(DeviceProtocolInfoSource, ProtocolInfo) * @see #contains(ProtocolInfo) * @see #removeAll(Collection) */ public boolean removeAll(DeviceProtocolInfoSource<?> type, Collection<ProtocolInfo> collection) { setsLock.writeLock().lock(); try { SortedSet<ProtocolInfo> set = protocolInfoSets.get(type); if (set != null && set.removeAll(collection)) { updateImageProfiles(); return true; } return false; } finally { setsLock.writeLock().unlock(); } }
From source file:net.pms.dlna.protocolinfo.DeviceProtocolInfo.java
/** * Removes all elements from all the {@link DeviceProtocolInfoSource} * {@link Set}s that are also contained in {@code collection}. * * @param collection a {@link Collection} containing the elements to be * removed./* ww w. ja v a 2s. c o m*/ * @return {@code true} if this call resulted in a change. * * @see #remove(DeviceProtocolInfoSource, ProtocolInfo) * @see #remove(ProtocolInfo) * @see #contains(DeviceProtocolInfoSource, ProtocolInfo) * @see #contains(ProtocolInfo) * @see #removeAll(DeviceProtocolInfoSource, Collection) */ public boolean removeAll(Collection<ProtocolInfo> collection) { boolean result = false; setsLock.writeLock().lock(); try { for (SortedSet<ProtocolInfo> set : protocolInfoSets.values()) { result |= set != null && set.removeAll(collection); } if (result) { updateImageProfiles(); } } finally { setsLock.writeLock().unlock(); } return result; }
From source file:jenkins.scm.impl.subversion.SubversionSCMSource.java
/** * Groups a set of path segments based on a supplied prefix. * * @param pathSegments the input path segments. * @param prefix the prefix to group on. * @return a map, all keys will {@link #startsWith(java.util.List, java.util.List)} the input prefix and be longer * than the input prefix, all values will {@link #startsWith(java.util.List, * java.util.List)} their corresponding key. *//*w w w . jav a2 s .co m*/ @NonNull static SortedMap<List<String>, SortedSet<List<String>>> groupPaths( @NonNull SortedSet<List<String>> pathSegments, @NonNull List<String> prefix) { // ensure pre-condition is valid and ensure we are using a copy pathSegments = filterPaths(pathSegments, prefix); SortedMap<List<String>, SortedSet<List<String>>> result = new TreeMap<List<String>, SortedSet<List<String>>>( COMPARATOR); while (!pathSegments.isEmpty()) { List<String> longestPrefix = null; int longestIndex = -1; for (List<String> pathSegment : pathSegments) { if (longestPrefix == null) { longestPrefix = pathSegment; longestIndex = indexOfNextWildcard(pathSegment, prefix.size()); } else { int index = indexOfNextWildcard(pathSegment, prefix.size()); if (index > longestIndex) { longestPrefix = pathSegment; longestIndex = index; } } } assert longestPrefix != null; longestPrefix = new ArrayList<String>(longestPrefix.subList(0, longestIndex)); SortedSet<List<String>> group = filterPaths(pathSegments, longestPrefix); result.put(longestPrefix, group); pathSegments.removeAll(group); } String optimization; while (null != (optimization = getOptimizationPoint(result.keySet(), prefix.size()))) { List<String> optimizedPrefix = copyAndAppend(prefix, optimization); SortedSet<List<String>> optimizedGroup = new TreeSet<List<String>>(COMPARATOR); for (Iterator<Map.Entry<List<String>, SortedSet<List<String>>>> iterator = result.entrySet() .iterator(); iterator.hasNext();) { Map.Entry<List<String>, SortedSet<List<String>>> entry = iterator.next(); if (startsWith(entry.getKey(), optimizedPrefix)) { iterator.remove(); optimizedGroup.addAll(entry.getValue()); } } result.put(optimizedPrefix, optimizedGroup); } return result; }
From source file:de.uni_potsdam.hpi.asg.logictool.mapping.SequenceBasedAndGateDecomposer.java
private void removeCandidates(SortedSet<IOBehaviour> sequencesFront, SortedSet<IOBehaviour> sequencesBack, Set<IOBehaviour> newSequences, Set<IOBehaviour> rmSequences) { removeSubSequences(sequencesFront, sequencesBack, newSequences, rmSequences); //new->front,back ; set rm sequencesBack.removeAll(rmSequences); sequencesFront.removeAll(rmSequences); newSequences.removeAll(rmSequences); if (rmSequences.size() > 0) { rmSub += rmSequences.size();/*from w ww .j ava 2 s . c om*/ logger.debug("rmSub removed " + rmSequences.size() + " candidates"); } // checkFalling(newSequences, rmSequences, term, relevant, partitions); //set rm // sequencesBack.removeAll(rmSequences); // sequencesFront.removeAll(rmSequences); newSequences.clear(); // if(rmSequences.size() > 0) { // rmFall += rmSequences.size(); // logger.debug("chkFall removed " + rmSequences.size() + " candidates"); // } }
From source file:cerrla.LocalCrossEntropyDistribution.java
/** * Modifies the policy values before updating (cutting the values down to * size).// w w w. ja v a 2s . c o m * * @param elites * The policy values to modify. * @param numElite * The minimum number of elite samples. * @param staleValue * The number of policies a sample hangs around for. * @param minValue * The minimum observed value. * @return The policy values that were removed. */ private SortedSet<PolicyValue> preUpdateModification(SortedSet<PolicyValue> elites, int numElite, int staleValue, double minValue) { // Firstly, remove any policy values that have been around for more // than N steps // Make a backup - just in case the elites are empty afterwards SortedSet<PolicyValue> backup = new TreeSet<PolicyValue>(elites); // Only remove stuff if the elites are a representative solution if (!ProgramArgument.GLOBAL_ELITES.booleanValue()) { int iteration = policyGenerator_.getPoliciesEvaluated(); for (Iterator<PolicyValue> iter = elites.iterator(); iter.hasNext();) { PolicyValue pv = iter.next(); if (iteration - pv.getIteration() >= staleValue) { if (ProgramArgument.RETEST_STALE_POLICIES.booleanValue()) policyGenerator_.retestPolicy(pv.getPolicy()); iter.remove(); } } } if (elites.isEmpty()) elites.addAll(backup); SortedSet<PolicyValue> tailSet = null; if (elites.size() > numElite) { // Find the N_E value Iterator<PolicyValue> pvIter = elites.iterator(); PolicyValue currentPV = null; for (int i = 0; i < numElite; i++) currentPV = pvIter.next(); // Iter at N_E value. Remove any values less than N_E's value tailSet = new TreeSet<PolicyValue>(elites.tailSet(new PolicyValue(null, currentPV.getValue(), -1))); elites.removeAll(tailSet); } return tailSet; }
From source file:org.apache.hadoop.hive.ql.exec.DDLTask.java
/** * Write a list of the user defined functions to a file. * @param db//w ww.j av a 2s. c o m * * @param showFuncs * are the functions we're interested in. * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private int showFunctions(Hive db, ShowFunctionsDesc showFuncs) throws HiveException { // get the tables for the desired patten - populate the output stream Set<String> funcs = null; if (showFuncs.getPattern() != null) { LOG.info("pattern: " + showFuncs.getPattern()); if (showFuncs.getIsLikePattern()) { funcs = FunctionRegistry.getFunctionNamesByLikePattern(showFuncs.getPattern()); } else { console.printInfo("SHOW FUNCTIONS is deprecated, please use SHOW FUNCTIONS LIKE instead."); funcs = FunctionRegistry.getFunctionNames(showFuncs.getPattern()); } LOG.info("results : " + funcs.size()); } else { funcs = FunctionRegistry.getFunctionNames(); } // write the results in the file DataOutputStream outStream = getOutputStream(showFuncs.getResFile()); try { SortedSet<String> sortedFuncs = new TreeSet<String>(funcs); // To remove the primitive types sortedFuncs.removeAll(serdeConstants.PrimitiveTypes); Iterator<String> iterFuncs = sortedFuncs.iterator(); while (iterFuncs.hasNext()) { // create a row per table name outStream.writeBytes(iterFuncs.next()); outStream.write(terminator); } } catch (FileNotFoundException e) { LOG.warn("show function: " + stringifyException(e)); return 1; } catch (IOException e) { LOG.warn("show function: " + stringifyException(e)); return 1; } catch (Exception e) { throw new HiveException(e); } finally { IOUtils.closeStream(outStream); } return 0; }
From source file:spade.storage.CompressedTextFile.java
public static boolean encodeAncestorsSuccessors( HashMap.Entry<Integer, Pair<SortedSet<Integer>, SortedSet<Integer>>> nodeToCompress) { //find reference node Integer id = nodeToCompress.getKey(); //System.out.println(id); SortedSet<Integer> ancestors = nodeToCompress.getValue().first(); SortedSet<Integer> successors = nodeToCompress.getValue().second(); //NodeLayerAncestorSuccessor currentNode = new NodeLayerAncestorSuccessor(id, ancestors, successors); Pair<Integer, Integer> maxNodesInCommonAncestor = new Pair<Integer, Integer>(0, 0); Pair<Integer, Integer> maxNodesInCommonSuccessor = new Pair<Integer, Integer>(0, 0); //first integer is the max of nodes in common, the second one is the number of 0 in the corresponding bit list //NodeLayerAncestorSuccessor referenceAncestor = currentNode; String bitlistAncestor = ""; int layerAncestor = 1; Integer referenceAncestor = -1; SortedSet<Integer> referenceAncestorList = new TreeSet<Integer>(); //NodeLayerAncestorSuccessor referenceSuccessor = currentNode; String bitlistSuccessor = ""; int layerSuccessor = 1; Integer referenceSuccessor = -1; SortedSet<Integer> referenceSuccessorList = new TreeSet<Integer>(); //Iterator<NodeLayerAncestorSuccessor> iteratorPossibleReference = lastNodesSeen.iterator(); //while (iteratorPossibleReference.hasNext()){ //System.out.println("step 1"); for (Integer possibleReferenceID = 1; possibleReferenceID < id; possibleReferenceID++) { //for each node in the W last nodes seen, compute the proximity, i.e. the number of successors of the current node that also are successors of the possibleReference node. Pair<Pair<Integer, SortedSet<Integer>>, Pair<Integer, SortedSet<Integer>>> asl = uncompressAncestorsSuccessorsWithLayer( possibleReferenceID, true, true); if (asl.first().first() < L) { //System.out.println("step 1.1"); Pair<Pair<Integer, Integer>, String> nodesInCommonAncestor = commonNodes(asl.first().second(), ancestors);//w w w. j a va 2 s . c o m int numberOfOneAncestor = nodesInCommonAncestor.first().first(); int numberOfZeroAncestor = nodesInCommonAncestor.first().second(); int maxNumberOfOneAncestor = maxNodesInCommonAncestor.first(); int maxNumberOfZeroAncestor = maxNodesInCommonAncestor.second(); //System.out.println("step 2"); if (numberOfOneAncestor > maxNumberOfOneAncestor || (numberOfOneAncestor == maxNumberOfOneAncestor && numberOfZeroAncestor < maxNumberOfZeroAncestor)) { maxNodesInCommonAncestor = nodesInCommonAncestor.first(); bitlistAncestor = nodesInCommonAncestor.second(); referenceAncestor = possibleReferenceID; layerAncestor = asl.first().first() + 1; referenceAncestorList = asl.first().second(); //System.out.println("step 3"); } } //System.out.println("step 4"); if (asl.second().first() < L) { //System.out.println("step 4.1"); Pair<Pair<Integer, Integer>, String> nodesInCommonSuccessor = commonNodes(asl.second().second(), successors); int numberOfOneSuccessor = nodesInCommonSuccessor.first().first(); int numberOfZeroSuccessor = nodesInCommonSuccessor.first().second(); int maxNumberOfOneSuccessor = maxNodesInCommonSuccessor.first(); int maxNumberOfZeroSuccessor = maxNodesInCommonSuccessor.second(); if (numberOfOneSuccessor > maxNumberOfOneSuccessor || (numberOfOneSuccessor == maxNumberOfOneSuccessor && numberOfZeroSuccessor < maxNumberOfZeroSuccessor)) { maxNodesInCommonSuccessor = nodesInCommonSuccessor.first(); bitlistSuccessor = nodesInCommonSuccessor.second(); referenceSuccessor = possibleReferenceID; layerSuccessor = asl.second().first() + 1; referenceSuccessorList = asl.second().second(); } } //System.out.println("step 5"); } //System.out.println("step 6"); //encode ancestor list SortedSet<Integer> remainingNodesAncestor = new TreeSet<Integer>(); remainingNodesAncestor.addAll(ancestors); //encode reference //String encoding = id.toString() + " "; String encoding = layerAncestor + " "; if (maxNodesInCommonAncestor.first() > 0) { encoding = encoding + (referenceAncestor - id) + " " + bitlistAncestor + ""; //keep only remaining nodes remainingNodesAncestor.removeAll(referenceAncestorList); } else { encoding = encoding + "_"; } //encode consecutive nodes and delta encoding Integer previousNode = id; int countConsecutives = 0; for (Integer nodeID : remainingNodesAncestor) { Integer delta = nodeID - previousNode; if (delta == 1) { countConsecutives++; } else { if (countConsecutives > 0) { encoding = encoding + ":" + countConsecutives; countConsecutives = 1; } encoding = encoding + " " + delta; countConsecutives = 0; } previousNode = nodeID; } // encode successor list SortedSet<Integer> remainingNodesSuccessor = new TreeSet<Integer>(); remainingNodesSuccessor.addAll(successors); //encode reference encoding = encoding + " / " + layerSuccessor + " "; if (maxNodesInCommonSuccessor.first() > 0) { encoding = encoding + (referenceSuccessor - id) + " " + bitlistSuccessor + ""; //keep only remaining nodes remainingNodesSuccessor.removeAll(referenceSuccessorList); } else { encoding = encoding + "_ "; } //encode consecutive nodes and delta encoding previousNode = id; countConsecutives = 0; for (Integer nodeID : remainingNodesSuccessor) { Integer delta = nodeID - previousNode; if (delta == 1) { countConsecutives++; } else { if (countConsecutives > 0) { encoding = encoding + ":" + countConsecutives; countConsecutives = 1; } encoding = encoding + " " + delta; countConsecutives = 0; } previousNode = nodeID; } put(scaffoldWriter, id, encoding); //System.out.println(id + " " + encoding); return true; }
From source file:uk.ac.liverpool.thumbnails.PDFService.java
@Override public FontInformation[] extractFontList(URI u, File fff) throws MalformedURLException, IOException { SortedSet<FontInformation> ret = new TreeSet<FontInformation>(); PDDocument document = getPages(u, fff); List pages = document.getDocumentCatalog().getAllPages(); int i = 0;/*from ww w . ja va 2 s . co m*/ // The code down here is easier as it gets all the fonts used in the document. Still, this would inlcude unused fonts, so we get the fonts page by page and add them to a Hash table. for (COSObject c : document.getDocument().getObjectsByType(COSName.FONT)) { if (c == null || !(c.getObject() instanceof COSDictionary)) continue; //System.out.println(c.getObject()); COSDictionary fontDictionary = (COSDictionary) c.getObject(); // System.out.println(dic.getNameAsString(COSName.BASE_FONT)); // } // } // int pagen = document.getNumberOfPages(); // i=0; // for (int p=0;p<pagen;p++){ // PDPage page = (PDPage)pages.get(p); // PDResources res = page.findResources(); // //for each page resources // if (res==null) continue; // // get the font dictionary // COSDictionary fonts = (COSDictionary) res.getCOSDictionary().getDictionaryObject( COSName.FONT ); // for( COSName fontName : fonts.keySet() ) { // COSObject font = (COSObject) fonts.getItem( fontName ); // // if the font has already been visited we ingore it // long objectId = font.getObjectNumber().longValue(); // if (ret.get(objectId)!=null) // continue; // if( font==null || ! (font.getObject() instanceof COSDictionary) ) // continue; // COSDictionary fontDictionary = (COSDictionary)font.getObject(); // Type MUSt be font if (!fontDictionary.getNameAsString(COSName.TYPE).equals("Font")) continue; // get the variables FontInformation fi = new FontInformation(); fi.fontType = fontDictionary.getNameAsString(COSName.SUBTYPE); String baseFont = fontDictionary.getNameAsString(COSName.BASE_FONT); if (baseFont == null) continue; if (Arrays.binarySearch(standard14, baseFont) >= 0) continue; COSDictionary fontDescriptor = (COSDictionary) fontDictionary.getDictionaryObject(COSName.FONT_DESC); COSBase enc = fontDictionary.getItem(COSName.ENCODING); COSBase uni = fontDictionary.getItem(COSName.TO_UNICODE); int firstChar = fontDictionary.getInt(COSName.FIRST_CHAR); int lastChar = fontDictionary.getInt(COSName.LAST_CHAR); String encoding; boolean toUnicode = uni != null; if (enc == null) { encoding = "standard14"; } if (enc instanceof COSString) { encoding = ((COSString) enc).getString(); } else { encoding = "table"; } fi.isSubset = false; boolean t = true; // Type one and TT can have subsets defineing the basename see 5.5.3 pdfref 1.6 // if (fi.fontType.lastIndexOf(COSName.TYPE1.getName())!=-1 || fi.fontType.equals(COSName.TRUE_TYPE.getName()) ) if (baseFont != null) { if (baseFont.length() > 6) { for (int k = 0; k < 6; k++) if (!Character.isUpperCase(baseFont.charAt(k))) t = false; if (baseFont.charAt(6) != '+') t = false; } else t = false; fi.isSubset = t; if (fi.isSubset) baseFont = baseFont.substring(7); } fi.fontFlags = 0; if (fi.fontType.equals(COSName.TYPE0) || fi.fontType.equals(COSName.TYPE3)) fi.isEmbedded = true; if (fontDescriptor != null) { // in Type1 charset indicates font is subsetted if (fontDescriptor.getItem(COSName.CHAR_SET) != null) fi.isSubset = true; if (fontDescriptor.getItem(COSName.FONT_FILE) != null || fontDescriptor.getItem(COSName.FONT_FILE3) != null || fontDescriptor.getItem(COSName.FONT_FILE2) != null) fi.isEmbedded = true; fi.fontFlags = fontDescriptor.getInt(COSName.getPDFName("Flags")); fi.fontFamily = fontDescriptor.getString(COSName.FONT_FAMILY); fi.fontStretch = fontDescriptor.getString(COSName.FONT_STRETCH); } fi.charset = encoding; fi.fontName = baseFont; fi.isToUnicode = toUnicode; ret.add(fi); } // for all fonts // } // for all pages Iterator<FontInformation> it = ret.iterator(); FontInformation prev = null; LinkedList<FontInformation> toDelete = new LinkedList<FontInformation>(); while (it.hasNext()) { FontInformation current = it.next(); if (prev != null && prev.fontName.equals(current.fontName) && prev.fontType.startsWith("CIDFontType")) toDelete.add(current); prev = current; } ret.removeAll(toDelete); FontInformation[] retArray = ret.toArray(new FontInformation[0]); return retArray; }