Example usage for java.util Set removeAll

List of usage examples for java.util Set removeAll

Introduction

In this page you can find the example usage for java.util Set removeAll.

Prototype

boolean removeAll(Collection<?> c);

Source Link

Document

Removes from this set all of its elements that are contained in the specified collection (optional operation).

Usage

From source file:com.beoui.geocell.GeocellManager.java

public static final <T> List<T> proximitySearch(Point center, int maxResults, double maxDistance,
        Class<T> entityClass, GeocellQuery baseQuery, GeocellQueryEngine queryEngine,
        int maxGeocellResolution) {
    List<EntityLocationComparableTuple<T>> results = new ArrayList<EntityLocationComparableTuple<T>>();

    Validate.isTrue(maxGeocellResolution < MAX_GEOCELL_RESOLUTION + 1,
            "Invalid max resolution parameter. Must be inferior to ", MAX_GEOCELL_RESOLUTION);

    // The current search geocell containing the lat,lon.
    String curContainingGeocell = GeocellUtils.compute(center, maxGeocellResolution);

    // Set of already searched cells
    Set<String> searchedCells = new HashSet<String>();

    /*//from  ww  w .j  a  v a  2 s .c  o  m
     * The currently-being-searched geocells.
     * NOTES:
     * Start with max possible.
     * Must always be of the same resolution.
     * Must always form a rectangular region.
     * One of these must be equal to the cur_containing_geocell.
     */
    List<String> curGeocells = new ArrayList<String>();
    curGeocells.add(curContainingGeocell);
    double closestPossibleNextResultDist = 0;

    /*
     * Assumes both a and b are lists of (entity, dist) tuples, *sorted by dist*.
     * NOTE: This is an in-place merge, and there are guaranteed no duplicates in the resulting list.
     */

    int noDirection[] = { 0, 0 };
    List<Tuple<int[], Double>> sortedEdgesDistances = Arrays.asList(new Tuple<int[], Double>(noDirection, 0d));

    while (!curGeocells.isEmpty()) {
        closestPossibleNextResultDist = sortedEdgesDistances.get(0).getSecond();
        if (maxDistance > 0 && closestPossibleNextResultDist > maxDistance) {
            break;
        }

        Set<String> curTempUnique = new HashSet<String>(curGeocells);
        curTempUnique.removeAll(searchedCells);
        List<String> curGeocellsUnique = new ArrayList<String>(curTempUnique);

        List<T> newResultEntities = queryEngine.query(baseQuery, curGeocellsUnique, entityClass);

        logger.log(Level.FINE, "fetch complete for: " + StringUtils.join(curGeocellsUnique, ", "));

        searchedCells.addAll(curGeocells);

        // Begin storing distance from the search result entity to the
        // search center along with the search result itself, in a tuple.
        List<EntityLocationComparableTuple<T>> newResults = new ArrayList<EntityLocationComparableTuple<T>>();
        for (T entity : newResultEntities) {
            newResults.add(new EntityLocationComparableTuple<T>(entity,
                    GeocellUtils.distance(center, GeocellUtils.getLocation(entity))));
        }
        // TODO (Alex) we can optimize here. Sort is needed only if new_results.size() > max_results.
        Collections.sort(newResults);
        newResults = newResults.subList(0, Math.min(maxResults, newResults.size()));

        // Merge new_results into results
        for (EntityLocationComparableTuple<T> tuple : newResults) {
            // contains method will check if entity in tuple have same key
            if (!results.contains(tuple)) {
                results.add(tuple);
            }
        }

        Collections.sort(results);
        results = results.subList(0, Math.min(maxResults, results.size()));

        sortedEdgesDistances = GeocellUtils.distanceSortedEdges(curGeocells, center);

        if (results.size() == 0 || curGeocells.size() == 4) {
            /* Either no results (in which case we optimize by not looking at
               adjacents, go straight to the parent) or we've searched 4 adjacent
               geocells, in which case we should now search the parents of those
               geocells.*/
            curContainingGeocell = curContainingGeocell.substring(0,
                    Math.max(curContainingGeocell.length() - 1, 0));
            if (curContainingGeocell.length() == 0) {
                break; // Done with search, we've searched everywhere.
            }
            List<String> oldCurGeocells = new ArrayList<String>(curGeocells);
            curGeocells.clear();
            for (String cell : oldCurGeocells) {
                if (cell.length() > 0) {
                    String newCell = cell.substring(0, cell.length() - 1);
                    if (!curGeocells.contains(newCell)) {
                        curGeocells.add(newCell);
                    }
                }
            }
            if (curGeocells.size() == 0) {
                break; // Done with search, we've searched everywhere.
            }
        } else if (curGeocells.size() == 1) {
            // Get adjacent in one direction.
            // TODO(romannurik): Watch for +/- 90 degree latitude edge case geocells.
            int nearestEdge[] = sortedEdgesDistances.get(0).getFirst();
            curGeocells.add(GeocellUtils.adjacent(curGeocells.get(0), nearestEdge));
        } else if (curGeocells.size() == 2) {
            // Get adjacents in perpendicular direction.
            int nearestEdge[] = GeocellUtils.distanceSortedEdges(Arrays.asList(curContainingGeocell), center)
                    .get(0).getFirst();
            int[] perpendicularNearestEdge = { 0, 0 };
            if (nearestEdge[0] == 0) {
                // Was vertical, perpendicular is horizontal.
                for (Tuple<int[], Double> edgeDistance : sortedEdgesDistances) {
                    if (edgeDistance.getFirst()[0] != 0) {
                        perpendicularNearestEdge = edgeDistance.getFirst();
                        break;
                    }
                }
            } else {
                // Was horizontal, perpendicular is vertical.
                for (Tuple<int[], Double> edgeDistance : sortedEdgesDistances) {
                    if (edgeDistance.getFirst()[0] == 0) {
                        perpendicularNearestEdge = edgeDistance.getFirst();
                        break;
                    }
                }
            }
            List<String> tempCells = new ArrayList<String>();
            for (String cell : curGeocells) {
                tempCells.add(GeocellUtils.adjacent(cell, perpendicularNearestEdge));
            }
            curGeocells.addAll(tempCells);
        }

        // We don't have enough items yet, keep searching.
        if (results.size() < maxResults) {
            logger.log(Level.FINE,
                    results.size() + " results found but want " + maxResults + " results, continuing search.");
            continue;
        }

        logger.log(Level.FINE, results.size() + " results found.");

        // If the currently max_results'th closest item is closer than any
        // of the next test geocells, we're done searching.
        double currentFarthestReturnableResultDist = GeocellUtils.distance(center,
                GeocellUtils.getLocation(results.get(maxResults - 1).getFirst()));
        if (closestPossibleNextResultDist >= currentFarthestReturnableResultDist) {
            logger.log(Level.FINE, "DONE next result at least " + closestPossibleNextResultDist
                    + " away, current farthest is " + currentFarthestReturnableResultDist + " dist");
            break;
        }
        logger.log(Level.FINE, "next result at least " + closestPossibleNextResultDist
                + " away, current farthest is " + currentFarthestReturnableResultDist + " dist");
    }
    List<T> result = new ArrayList<T>();
    for (Tuple<T, Double> entry : results.subList(0, Math.min(maxResults, results.size()))) {
        if (maxDistance == 0 || entry.getSecond() < maxDistance) {
            result.add(entry.getFirst());
        } else {
            logger.info("Discarding result " + entry.getFirst() + " because distance " + entry.getSecond()
                    + "m > max distance " + maxDistance + "m");
        }
    }
    logger.log(Level.INFO, "Proximity query looked in " + searchedCells.size() + " geocells and found "
            + result.size() + " results.");
    return result;
}

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_Buckets.java

/** Want to end up with 3 lists:
 *  - v1 sources that don't exist in v2 (Create them)
 *  - v2 sources that don't exist in v1 (Delete them)
 *  - matching v1/v2 sources with different modified times (Update them)
 * @param to_compare/*from  www  . j ava  2 s  . c  o m*/
 * @returns a 3-tuple with "to create", "to delete", "to update"
 */
protected static Tuple3<Collection<String>, Collection<String>, Collection<String>> compareSourcesToBuckets_categorize(
        final Tuple2<Map<String, String>, Map<String, Date>> to_compare) {

    // Want to end up with 3 lists:
    // - v1 sources that don't exist in v2 (Create them)
    // - v2 sources that don't exist in v1 (Delete them)
    // - matching v1/v2 sources with different modified times (Update them)

    // (do delete first, then going to filter to_compare._1() on value==null)      
    final Set<String> v2_not_v1 = new HashSet<String>(to_compare._2().keySet());
    v2_not_v1.removeAll(to_compare._1().keySet());

    // OK not worried about deletes any more, not interested in isApproved:false

    final Set<String> to_compare_approved = to_compare._1().entrySet().stream()
            .filter(kv -> null != kv.getValue() && !kv.getValue().isEmpty()).map(kv -> kv.getKey())
            .collect(Collectors.toSet());

    final Set<String> v1_and_v2 = new HashSet<String>(to_compare_approved);
    v1_and_v2.retainAll(to_compare._2().keySet());

    final List<String> v1_and_v2_mod = v1_and_v2.stream().filter(id -> {
        try {
            final Date v1_date = parseJavaDate(to_compare._1().get(id));
            final Date v2_date = to_compare._2().get(id);
            return v1_date.getTime() > v2_date.getTime();
        } catch (Throwable e) {
            return false; // (just ignore)
        }
    }).collect(Collectors.toList());

    final Set<String> v1_not_v2 = new HashSet<String>(to_compare_approved);
    v1_not_v2.removeAll(to_compare._2().keySet());

    return Tuples._3T(v1_not_v2, v2_not_v1, v1_and_v2_mod);
}

From source file:com.aurel.track.admin.customize.category.filter.execute.loadItems.LoadItemLinksUtil.java

/**
 * //ww  w  .j a v  a  2  s  . c o  m
 * @param baseWorkItemBeanList
 * @param archived
 * @param deleted
 * @param allItemIDSet
 * @return
 */
private static Set<Integer> loadAncestorDescendantAndDirectLinkedItems(List<TWorkItemBean> baseWorkItemBeanList,
        Integer archived, Integer deleted, Set<Integer> allItemIDSet) {
    Set<Integer> originalItemIDSet = GeneralUtils.createIntegerSetFromBeanList(baseWorkItemBeanList);
    LOGGER.debug("Number of items in filter " + originalItemIDSet.size());
    Set<Integer> ancestorWorkItemIDsSet = getParentHierarchy(baseWorkItemBeanList, archived, deleted);
    LOGGER.debug("Number of ascendent items " + ancestorWorkItemIDsSet.size());
    allItemIDSet.addAll(originalItemIDSet);
    allItemIDSet.addAll(ancestorWorkItemIDsSet);
    Set<Integer> descendantItemIDSet = ItemBL.getChildHierarchy(
            GeneralUtils.createIntArrFromIntegerCollection(allItemIDSet), PARENT_CHILD_EXPRESSION.ALL_CHILDREN,
            archived, deleted, null);
    LOGGER.debug("Total number of descendent items " + descendantItemIDSet.size());
    allItemIDSet.addAll(descendantItemIDSet);
    //gather the MSProject link types
    MsProjectLinkType msProjectLinkType = MsProjectLinkType.getInstance();
    List<Integer> msProjectLinkTypes = LinkTypeBL.getLinkTypesByPluginClass(msProjectLinkType);
    //although Msproject link is unidirectional, we have to load also the predecessors in order to avoid moving back the successor items
    Map<Integer, SortedSet<Integer>> linkDependencyMap = ItemLinkBL.loadByWorkItemsAndLinkType(
            GeneralUtils.createIntegerListFromCollection(allItemIDSet), msProjectLinkTypes,
            msProjectLinkType.getPossibleDirection(), true, archived, deleted);
    Set<Integer> linkedItemIDsSet = getFlatItems(linkDependencyMap);
    LOGGER.debug("Number of linked items from hierarchy " + linkedItemIDsSet.size());
    linkedItemIDsSet.removeAll(allItemIDSet);
    LOGGER.debug("Number of extended linked items " + linkedItemIDsSet.size());
    allItemIDSet.addAll(linkedItemIDsSet);
    return linkedItemIDsSet;
}

From source file:org.paxml.launch.LaunchModelBuilder.java

public static Set<PaxmlResource> findResources(String base, Set<String> includes, Set<String> excludes) {
    if (includes == null) {
        includes = new HashSet<String>(1);
        includes.add("**/*.*");
    }/* www. ja  v a  2 s. c  o m*/
    if (excludes == null) {
        excludes = Collections.EMPTY_SET;
    }
    if (base == null) {
        base = ""; // the current working dir
    }
    File f = new File(base);
    if (f.isDirectory()) {
        f = new File(f, "fake.file");
    }
    Resource baseRes = new FileSystemResource(f).getSpringResource();
    Set<PaxmlResource> include = new LinkedHashSet<PaxmlResource>(0);
    Set<PaxmlResource> exclude = new LinkedHashSet<PaxmlResource>(0);
    ResourceMatcher matcher = new ResourceMatcher(includes, excludes);
    for (String pattern : matcher.include) {
        include.addAll(ResourceLocator.findResources(pattern, baseRes));
    }
    for (String pattern : matcher.exclude) {
        exclude.addAll(ResourceLocator.findResources(pattern, baseRes));
    }
    include.removeAll(exclude);

    return include;
}

From source file:edu.umd.cs.findbugs.gui2.GUISaveState.java

public static void loadInstance() {
    GUISaveState newInstance = new GUISaveState();
    newInstance.recentFiles = new ArrayList<File>();
    Preferences p = Preferences.userNodeForPackage(GUISaveState.class);

    newInstance.tabSize = p.getInt(TAB_SIZE, 4);

    newInstance.fontSize = p.getFloat(FONT_SIZE, 12.0f);

    newInstance.starterDirectoryForLoadBugs = new File(
            p.get(GUISaveState.STARTERDIRECTORY, SystemProperties.getProperty("user.dir")));

    int prevCommentsSize = p.getInt(GUISaveState.PREVCOMMENTSSIZE, 0);

    for (int x = 0; x < prevCommentsSize; x++) {
        String comment = p.get(GUISaveState.COMMENTKEYS[x], "");
        newInstance.previousComments.add(comment);
    }/*from  w  w w  .  j a v  a 2s.  c om*/

    int size = Math.min(MAXNUMRECENTPROJECTS, p.getInt(GUISaveState.NUMPROJECTS, 0));
    for (int x = 0; x < size; x++) {
        newInstance.addRecentFile(new File(p.get(GUISaveState.RECENTPROJECTKEYS[x], "")));
    }

    int sorterSize = p.getInt(GUISaveState.SORTERTABLELENGTH, -1);
    if (sorterSize != -1) {
        ArrayList<Sortables> sortColumns = new ArrayList<Sortables>();
        String[] sortKeys = GUISaveState.generateSorterKeys(sorterSize);
        for (int x = 0; x < sorterSize; x++) {
            Sortables s = Sortables.getSortableByPrettyName(p.get(sortKeys[x], "*none*"));

            if (s == null) {
                if (MainFrame.GUI2_DEBUG) {
                    System.err.println("Sort order was corrupted, using default sort order");
                }
                newInstance.useDefault = true;
                break;
            }
            sortColumns.add(s);
        }
        if (!newInstance.useDefault) {
            // add in default columns
            Set<Sortables> missingSortColumns = new HashSet<Sortables>(Arrays.asList(DEFAULT_COLUMN_HEADERS));
            missingSortColumns.removeAll(sortColumns);
            sortColumns.addAll(missingSortColumns);
            newInstance.sortColumns = sortColumns.toArray(new Sortables[sortColumns.size()]);
        }
    } else {
        newInstance.useDefault = true;
    }

    newInstance.dockingLayout = p.getByteArray(DOCKINGLAYOUT, new byte[0]);

    String boundsString = p.get(FRAME_BOUNDS, null);
    Rectangle r = new Rectangle(0, 0, 800, 650);
    if (boundsString != null) {
        String[] a = boundsString.split(",", 4);
        if (a.length > 0) {
            try {
                r.x = Math.max(0, Integer.parseInt(a[0]));
            } catch (NumberFormatException nfe) {
                assert true;
            }
        }
        if (a.length > 1) {
            try {
                r.y = Math.max(0, Integer.parseInt(a[1]));
            } catch (NumberFormatException nfe) {
                assert true;
            }
        }
        if (a.length > 2) {
            try {
                r.width = Math.max(40, Integer.parseInt(a[2]));
            } catch (NumberFormatException nfe) {
                assert true;
            }
        }
        if (a.length > 3) {
            try {
                r.height = Math.max(40, Integer.parseInt(a[3]));
            } catch (NumberFormatException nfe) {
                assert true;
            }
        }
    }
    newInstance.frameBounds = r;
    newInstance.extendedWindowState = p.getInt(EXTENDED_WINDOW_STATE, Frame.NORMAL);

    newInstance.splitMain = p.getInt(SPLIT_MAIN, 400);
    newInstance.splitSummary = p.getInt(SPLIT_SUMMARY_NEW, 400);
    newInstance.splitTop = p.getInt(SPLIT_TOP, -1);
    newInstance.splitTreeComments = p.getInt(SPLIT_TREE_COMMENTS, 250);
    newInstance.packagePrefixSegments = p.getInt(PACKAGE_PREFIX_SEGEMENTS, 3);

    String plugins = p.get(CUSTOM_PLUGINS, "");
    if (plugins.length() > 0) {
        for (String s : plugins.split(" ")) {
            try {
                URI u = new URI(s);
                Plugin.addCustomPlugin(u);
                newInstance.customPlugins.add(u);
            } catch (PluginException e) {
                assert true;
            } catch (URISyntaxException e) {
                assert true;
            }
        }
    }

    String enabledPluginsString = p.get(ENABLED_PLUGINS, "");
    String disabledPluginsString = p.get(DISABLED_PLUGINS, "");
    newInstance.enabledPlugins = new ArrayList<String>(Arrays.asList(enabledPluginsString.split(",")));
    newInstance.disabledPlugins = new ArrayList<String>(Arrays.asList(disabledPluginsString.split(",")));

    instance = newInstance;
}

From source file:it.units.malelab.ege.util.Utils.java

public static <T> boolean validate(Node<T> tree, Grammar<T> grammar) {
    if (tree == null) {
        return false;
    }//from   w  w  w . j  a va2  s .  co  m
    if (!tree.getContent().equals(grammar.getStartingSymbol())) {
        return false;
    }
    Set<T> terminals = new LinkedHashSet<>();
    for (List<List<T>> options : grammar.getRules().values()) {
        for (List<T> option : options) {
            terminals.addAll(option);
        }
    }
    terminals.removeAll(grammar.getRules().keySet());
    return innerValidate(tree, grammar, terminals);
}

From source file:io.sloeber.core.managers.InternalPackageManager.java

/**
 * This method removes the json files from disk and removes memory references to
 * these files or their content//  w  ww. ja  va 2s. c  o  m
 *
 * @param packageUrlsToRemove
 */
public static void removePackageURLs(Set<String> packageUrlsToRemove) {
    // remove the files from memory
    Set<String> activeUrls = new HashSet<>(Arrays.asList(ConfigurationPreferences.getJsonURLList()));

    activeUrls.removeAll(packageUrlsToRemove);

    ConfigurationPreferences.setJsonURLs(activeUrls.toArray(null));

    // remove the files from disk
    for (String curJson : packageUrlsToRemove) {
        File localFile = getLocalFileName(curJson, true);
        if (localFile != null) {
            if (localFile.exists()) {
                localFile.delete();
            }
        }
    }

    // reload the indices (this will remove all potential remaining
    // references
    // existing files do not need to be refreshed as they have been
    // refreshed at startup
    loadJsons(false);

}

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_LibraryJars.java

/** Want to end up with 3 lists:
 *  - v1 objects that don't exist in v2 (Create them)
 *  - v2 objects that don't exist in v1 (Delete them)
 *  - matching v1/v2 objects with different modified times (Update them)
 * @param to_compare//from  www .  j a v a2s. c  o m
 * @returns a 3-tuple with "to create", "to delete", "to update" - NOTE: none of the _ids here include the "v1_"
 */
protected static Tuple3<Collection<String>, Collection<String>, Collection<String>> compareJarsToLibraryBeans_categorize(
        final Tuple2<Map<String, String>, Map<String, Date>> to_compare) {

    // Want to end up with 3 lists:
    // - v1 sources that don't exist in v2 (Create them)
    // - v2 sources that don't exist in v1 (Delete them)
    // - matching v1/v2 sources with different modified times (Update them)

    // (do delete first, then going to filter to_compare._1() on value==null)      
    final Set<String> v2_not_v1 = new HashSet<String>(to_compare._2().keySet());
    v2_not_v1.removeAll(to_compare._1().keySet());

    // OK not worried about deletes any more, not interested in isApproved:false

    final Set<String> to_compare_approved = to_compare._1().entrySet().stream()
            .filter(kv -> null != kv.getValue() && !kv.getValue().isEmpty()).map(kv -> kv.getKey())
            .collect(Collectors.toSet());

    final Set<String> v1_and_v2 = new HashSet<String>(to_compare_approved);
    v1_and_v2.retainAll(to_compare._2().keySet());

    final List<String> v1_and_v2_mod = v1_and_v2.stream().filter(id -> {
        try {
            final Date v1_date = parseJavaDate(to_compare._1().get(id));
            final Date v2_date = to_compare._2().get(id);
            return v1_date.getTime() > v2_date.getTime();
        } catch (Exception e) {
            return false; // (just ignore)
        }
    }).collect(Collectors.toList());

    final Set<String> v1_not_v2 = new HashSet<String>(to_compare_approved);
    v1_not_v2.removeAll(to_compare._2().keySet());

    return Tuples._3T(v1_not_v2, v2_not_v1, v1_and_v2_mod);
}

From source file:com.centeractive.ws.legacy.SchemaUtils.java

/**
 * Extracts namespaces - used in tool integrations for mapping..
 *///from  w  w  w. j  av  a  2s .co  m
public static Collection<String> extractNamespaces(SchemaTypeSystem schemaTypes, boolean removeDefault) {
    Set<String> namespaces = new HashSet<String>();
    SchemaType[] globalTypes = schemaTypes.globalTypes();
    for (int c = 0; c < globalTypes.length; c++) {
        namespaces.add(globalTypes[c].getName().getNamespaceURI());
    }

    if (removeDefault) {
        namespaces.removeAll(defaultSchemas.keySet());
        namespaces.remove(Constants.SOAP11_ENVELOPE_NS);
        namespaces.remove(Constants.SOAP_ENCODING_NS);
    }

    return namespaces;
}

From source file:com.puppycrawl.tools.checkstyle.XDocsPagesTest.java

private static void validatePropertySection(String fileName, String sectionName, Node subSection,
        Object instance) {/*from  w w  w .j a  va 2s  .c  o  m*/
    final Set<String> properties = getProperties(instance.getClass());
    final Class<?> clss = instance.getClass();

    // remove global properties that don't need documentation
    if (hasParentModule(sectionName)) {
        properties.removeAll(CHECK_PROPERTIES);
    } else if (AbstractFileSetCheck.class.isAssignableFrom(clss)) {
        properties.removeAll(FILESET_PROPERTIES);

        // override
        properties.add("fileExtensions");
    }

    // remove undocumented properties
    for (String p : new HashSet<>(properties)) {
        if (UNDOCUMENTED_PROPERTIES.contains(clss.getSimpleName() + "." + p)) {
            properties.remove(p);
        }
    }

    final Check check;

    if (Check.class.isAssignableFrom(clss)) {
        check = (Check) instance;

        if (!Arrays.equals(check.getAcceptableTokens(), check.getDefaultTokens())
                || !Arrays.equals(check.getAcceptableTokens(), check.getRequiredTokens())) {
            properties.add("tokens");
        }
    } else {
        check = null;
    }

    if (subSection != null) {
        Assert.assertTrue(fileName + " section '" + sectionName + "' should have no properties to show",
                !properties.isEmpty());

        validatePropertySectionProperties(fileName, sectionName, subSection, check, properties);
    }

    Assert.assertTrue(fileName + " section '" + sectionName + "' should show properties: " + properties,
            properties.isEmpty());
}