Example usage for java.util HashSet removeAll

List of usage examples for java.util HashSet removeAll

Introduction

In this page you can find the example usage for java.util HashSet removeAll.

Prototype

boolean removeAll(Collection<?> c);

Source Link

Document

Removes from this set all of its elements that are contained in the specified collection (optional operation).

Usage

From source file:org.apache.fluo.recipes.core.export.it.ExportTestBase.java

protected void diff(Map<String, Set<String>> fr, Map<String, Set<String>> er) {
    HashSet<String> allKeys = new HashSet<>(fr.keySet());
    allKeys.addAll(er.keySet());/*  w  w  w  .j a v a2s .c om*/

    for (String k : allKeys) {
        Set<String> s1 = fr.getOrDefault(k, Collections.emptySet());
        Set<String> s2 = er.getOrDefault(k, Collections.emptySet());

        HashSet<String> sub1 = new HashSet<>(s1);
        sub1.removeAll(s2);

        HashSet<String> sub2 = new HashSet<>(s2);
        sub2.removeAll(s1);

        if (sub1.size() > 0 || sub2.size() > 0) {
            System.out.println(k + " " + sub1 + " " + sub2);
        }

    }
}

From source file:licenseUtil.LicensingObject.java

public HashSet<String> getNonFixedHeaders() {
    HashSet<String> result = new HashSet<>();
    result.addAll(keySet());// ww  w. jav  a2 s. c  o m
    result.removeAll(ColumnHeader.HEADER_VALUES);
    return result;
}

From source file:org.apache.bookkeeper.client.DefaultEnsemblePlacementPolicy.java

@Override
public Set<BookieSocketAddress> onClusterChanged(Set<BookieSocketAddress> writableBookies,
        Set<BookieSocketAddress> readOnlyBookies) {
    rwLock.writeLock().lock();/*www.ja v  a2 s  . com*/
    try {
        HashSet<BookieSocketAddress> deadBookies;
        deadBookies = new HashSet<BookieSocketAddress>(knownBookies);
        deadBookies.removeAll(writableBookies);
        // readonly bookies should not be treated as dead bookies
        deadBookies.removeAll(readOnlyBookies);
        if (this.isWeighted) {
            for (BookieSocketAddress b : deadBookies) {
                this.bookieInfoMap.remove(b);
            }
            @SuppressWarnings("unchecked")
            Collection<BookieSocketAddress> newBookies = CollectionUtils.subtract(writableBookies,
                    knownBookies);
            for (BookieSocketAddress b : newBookies) {
                this.bookieInfoMap.put(b, new BookieInfo());
            }
            if (deadBookies.size() > 0 || newBookies.size() > 0) {
                this.weightedSelection.updateMap(this.bookieInfoMap);
            }
        }
        knownBookies = writableBookies;
        return deadBookies;
    } finally {
        rwLock.writeLock().unlock();
    }
}

From source file:com.flipkart.flux.resource.StateMachineResource.java

private FsmGraph getGraphData(Long fsmId) throws IOException {
    StateMachine stateMachine = stateMachinesDAO.findById(fsmId);

    if (stateMachine == null) {
        throw new WebApplicationException(Response.Status.NOT_FOUND);
    }//from w w w  .ja  va2 s  . co m
    final FsmGraph fsmGraph = new FsmGraph();

    Map<String, Event> stateMachineEvents = eventsDAO.findBySMInstanceId(fsmId).stream()
            .collect(Collectors.<Event, String, Event>toMap(Event::getName, (event -> event)));
    Set<String> allOutputEventNames = new HashSet<>();

    final RAMContext ramContext = new RAMContext(System.currentTimeMillis(), null, stateMachine);
    /* After this operation, we'll have nodes for each state and its corresponding output event along with the output event's dependencies mapped out*/
    for (State state : stateMachine.getStates()) {
        if (state.getOutputEvent() != null) {
            EventDefinition eventDefinition = objectMapper.readValue(state.getOutputEvent(),
                    EventDefinition.class);
            final Event outputEvent = stateMachineEvents.get(eventDefinition.getName());
            final FsmGraphVertex vertex = new FsmGraphVertex(state.getId(), getDisplayName(state.getName()));
            fsmGraph.addVertex(vertex, new FsmGraphEdge(getDisplayName(outputEvent.getName()),
                    outputEvent.getStatus().name(), outputEvent.getEventSource()));
            final Set<State> dependantStates = ramContext.getDependantStates(outputEvent.getName());
            dependantStates.forEach((aState) -> fsmGraph.addOutgoingEdge(vertex, aState.getId()));
            allOutputEventNames.add(outputEvent.getName()); // we collect all output event names and use them below.
        } else {
            fsmGraph.addVertex(new FsmGraphVertex(state.getId(), this.getDisplayName(state.getName())), null);
        }
    }

    /* Handle states with no dependencies, i.e the states that can be triggered as soon as we execute the state machine */
    final Set<State> initialStates = ramContext.getInitialStates(Collections.emptySet());// hackety hack.  We're fooling the context to give us only events that depend on nothing
    if (!initialStates.isEmpty()) {
        final FsmGraphEdge initEdge = new FsmGraphEdge(TRIGGER, Event.EventStatus.triggered.name(), TRIGGER);
        initialStates.forEach((state) -> {
            initEdge.addOutgoingVertex(state.getId());
        });
        fsmGraph.addInitStateEdge(initEdge);
    }
    /* Now we handle events that were not "output-ed" by any state, which means that they were given to the workflow at the time of invocation or supplied externally*/
    final HashSet<String> eventsGivenOnWorkflowTrigger = new HashSet<>(stateMachineEvents.keySet());
    eventsGivenOnWorkflowTrigger.removeAll(allOutputEventNames);
    eventsGivenOnWorkflowTrigger.forEach((workflowTriggeredEventName) -> {
        final Event correspondingEvent = stateMachineEvents.get(workflowTriggeredEventName);
        final FsmGraphEdge initEdge = new FsmGraphEdge(this.getDisplayName(workflowTriggeredEventName),
                correspondingEvent.getStatus().name(), correspondingEvent.getEventSource());
        final Set<State> dependantStates = ramContext.getDependantStates(workflowTriggeredEventName);
        dependantStates.forEach((state) -> initEdge.addOutgoingVertex(state.getId()));
        fsmGraph.addInitStateEdge(initEdge);
    });
    return fsmGraph;
}

From source file:org.jasig.portlet.emailpreview.controller.EditPreferencesController.java

private Set<String> filterNonUserProtocols(Set<String> protocols) {
    HashSet filteredSet = new HashSet<String>(protocols);
    filteredSet.removeAll(nonUserProtocols);
    return filteredSet;
}

From source file:gobblin.source.extractor.filebased.FileBasedSource.java

/**
 * This method takes the snapshot seen in the previous run, and compares it to the list
 * of files currently in the source - it then decided which files it needs to pull
 * and distributes those files across the workunits; it does this comparison by comparing
 * the names of the files currently in the source vs. the names retrieved from the
 * previous state//w w  w .ja v  a  2  s  .  co  m
 * @param state is the source state
 * @return a list of workunits for the framework to run
 */
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
    initLogger(state);
    try {
        initFileSystemHelper(state);
    } catch (FileBasedHelperException e) {
        Throwables.propagate(e);
    }

    log.info("Getting work units");
    String nameSpaceName = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
    String entityName = state.getProp(ConfigurationKeys.SOURCE_ENTITY);

    // Override extract table name
    String extractTableName = state.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY);

    // If extract table name is not found then consider entity name as extract table name
    if (Strings.isNullOrEmpty(extractTableName)) {
        extractTableName = entityName;
    }

    TableType tableType = TableType
            .valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase());
    List<WorkUnitState> previousWorkunits = Lists.newArrayList(state.getPreviousWorkUnitStates());
    Set<String> prevFsSnapshot = Sets.newHashSet();

    // Get list of files seen in the previous run
    if (!previousWorkunits.isEmpty()) {
        if (previousWorkunits.get(0).getWorkunit().contains(ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT)) {
            prevFsSnapshot = previousWorkunits.get(0).getWorkunit()
                    .getPropAsSet(ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT);
        } else if (state.getPropAsBoolean(ConfigurationKeys.SOURCE_FILEBASED_FS_PRIOR_SNAPSHOT_REQUIRED,
                ConfigurationKeys.DEFAULT_SOURCE_FILEBASED_FS_PRIOR_SNAPSHOT_REQUIRED)) {
            // If a previous job exists, there should be a snapshot property.  If not, we need to fail so that we
            // don't accidentally read files that have already been processed.
            throw new RuntimeException(String.format("No '%s' found on state of prior job",
                    ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT));
        }
    }

    List<WorkUnit> workUnits = Lists.newArrayList();
    List<WorkUnit> previousWorkUnitsForRetry = this.getPreviousWorkUnitsForRetry(state);
    log.info("Total number of work units from the previous failed runs: " + previousWorkUnitsForRetry.size());
    for (WorkUnit previousWorkUnitForRetry : previousWorkUnitsForRetry) {
        prevFsSnapshot.addAll(
                previousWorkUnitForRetry.getPropAsSet(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL));
        workUnits.add(previousWorkUnitForRetry);
    }

    // Get list of files that need to be pulled
    List<String> currentFsSnapshot = this.getcurrentFsSnapshot(state);
    HashSet<String> filesWithTimeToPull = new HashSet<>(currentFsSnapshot);
    filesWithTimeToPull.removeAll(prevFsSnapshot);
    List<String> filesToPull = new ArrayList<>();
    Iterator<String> it = filesWithTimeToPull.iterator();
    while (it.hasNext()) {
        String filesWithoutTimeToPull[] = it.next().split(this.splitPattern);
        filesToPull.add(filesWithoutTimeToPull[0]);
    }

    if (!filesToPull.isEmpty()) {
        logFilesToPull(filesToPull);

        int numPartitions = state.contains(ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS)
                && state.getPropAsInt(ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS) <= filesToPull.size()
                        ? state.getPropAsInt(ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS)
                        : filesToPull.size();
        if (numPartitions <= 0) {
            throw new IllegalArgumentException("The number of partitions should be positive");
        }

        int filesPerPartition = filesToPull.size() % numPartitions == 0 ? filesToPull.size() / numPartitions
                : filesToPull.size() / numPartitions + 1;

        // Distribute the files across the workunits
        for (int fileOffset = 0; fileOffset < filesToPull.size(); fileOffset += filesPerPartition) {
            SourceState partitionState = new SourceState();
            partitionState.addAll(state);

            // Eventually these setters should be integrated with framework support for generalized watermark handling
            partitionState.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT,
                    StringUtils.join(currentFsSnapshot, ","));

            List<String> partitionFilesToPull = filesToPull.subList(fileOffset,
                    fileOffset + filesPerPartition > filesToPull.size() ? filesToPull.size()
                            : fileOffset + filesPerPartition);
            partitionState.setProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL,
                    StringUtils.join(partitionFilesToPull, ","));
            if (state.getPropAsBoolean(ConfigurationKeys.SOURCE_FILEBASED_PRESERVE_FILE_NAME, false)) {
                if (partitionFilesToPull.size() != 1) {
                    throw new RuntimeException(
                            "Cannot preserve the file name if a workunit is given multiple files");
                }
                partitionState.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR,
                        partitionState.getProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL));
            }

            // Use extract table name to create extract
            Extract extract = partitionState.createExtract(tableType, nameSpaceName, extractTableName);
            workUnits.add(partitionState.createWorkUnit(extract));
        }

        log.info("Total number of work units for the current run: "
                + (workUnits.size() - previousWorkUnitsForRetry.size()));
    }

    return workUnits;
}

From source file:org.fao.geonet.kernel.search.AbstractLanguageSearchOrderIntegrationTest.java

private void assertContainsOnly(String[] titles, String... expectedValues) {
    assertEquals(expectedValues.length, titles.length);
    final List<String> titlesList = Arrays.asList(titles);
    final List<String> expectedList = Arrays.asList(expectedValues);

    HashSet extras = new HashSet(titlesList);
    extras.removeAll(expectedList);
    HashSet missing = new HashSet(expectedList);
    missing.removeAll(titlesList);//w w  w. j a v a  2 s  .c  om

    if (!extras.isEmpty() || !missing.isEmpty()) {
        throw new AssertionError("Following strings should not be in results: " + extras
                + "\nFollowing strings should have been in" + " results: " + missing);
    }
}

From source file:org.entrystore.rest.util.jdil.JDIL.java

public JSONObject exportGraphToJDIL(Graph graph, Resource root) {
    try {/*  w ww  .ja v a2s.c o m*/
        HashMap<Resource, JSONObject> res2Jdil = new HashMap<Resource, JSONObject>();
        HashSet<Resource> notRoots = new HashSet<Resource>();

        for (Statement statement : graph) {
            JSONObject subj = getOrCreateSubject(statement.getSubject(), res2Jdil);
            String predicate = namespaces.abbreviate(statement.getPredicate().stringValue());
            notRoots.add(statement.getPredicate());
            Value value = statement.getObject();

            if (value instanceof Resource) {
                /*
                 * Create a new JDIL value to accumulate to the subject.
                 */
                JSONObject JDILValueObject = getOrCreateObject((Resource) value, res2Jdil);

                subj.accumulate(predicate, JDILValueObject);
                notRoots.add((Resource) value);

            } else {
                Literal lit = (Literal) value;
                String language = lit.getLanguage();
                URI datatype = lit.getDatatype();
                JSONObject object = new JSONObject();
                object.accumulate("@value", value.stringValue());
                if (language != null) {
                    object.accumulate("@language", language);
                } else if (datatype != null) {
                    object.accumulate("@datatype", datatype.stringValue());
                }
                subj.accumulate(predicate, object);
            }
        }
        if (root != null) {
            JSONObject obj = res2Jdil.get(root);
            cutLoops(obj, new HashSet());
            return obj;
        }
        HashSet<Resource> roots = new HashSet<Resource>(res2Jdil.keySet());
        roots.removeAll(notRoots);
        if (roots.size() == 1) {
            JSONObject obj = res2Jdil.get(roots.iterator().next());
            cutLoops(obj, new HashSet());
            return obj;
        }
    } catch (JSONException jse) {
        log.error(jse.getMessage());
    }
    return null;
}

From source file:com.redhat.rhn.manager.monitoring.ModifyFilterCommand.java

/**
 * Update the probe states for which the filter matches to <code>values</code>.
 * The values must be one of the constants in {@link MonitoringConstants#PROBE_STATES}
 * @param values the new probe state values on which the filter matches
 *///  w w  w. ja va  2 s . c o  m
public void updateStates(String[] values) {
    if (values != null) {
        HashSet valueSet = new HashSet(Arrays.asList(values));
        if (PROBE_STATE_SET.equals(valueSet)) {
            values = null;
        }
        valueSet.removeAll(PROBE_STATE_SET);
        if (valueSet.size() > 0) {
            throw new IllegalArgumentException(
                    "The state values must be one of " + PROBE_STATE_SET + ", but also contained " + valueSet);
        }
    }
    updateCriteria(MatchType.STATE, values);
}

From source file:org.apache.activemq.transport.discovery.http.HTTPDiscoveryAgent.java

private void update() {
    // Register all our services...
    synchronized (registeredServices) {
        for (String service : registeredServices) {
            doRegister(service);//from w w  w .j  av  a  2 s .com
        }
    }

    // Find new registered services...
    DiscoveryListener discoveryListener = this.discoveryListener.get();
    if (discoveryListener != null) {
        Set<String> activeServices = doLookup(updateInterval * 3);
        // If there is error talking the the central server, then
        // activeServices == null
        if (activeServices != null) {
            synchronized (discoveredServices) {

                HashSet<String> removedServices = new HashSet<String>(discoveredServices.keySet());
                removedServices.removeAll(activeServices);

                HashSet<String> addedServices = new HashSet<String>(activeServices);
                addedServices.removeAll(discoveredServices.keySet());
                addedServices.removeAll(removedServices);

                for (String service : addedServices) {
                    SimpleDiscoveryEvent e = new SimpleDiscoveryEvent(service);
                    discoveredServices.put(service, e);
                    discoveryListener.onServiceAdd(e);
                }

                for (String service : removedServices) {
                    SimpleDiscoveryEvent e = discoveredServices.remove(service);
                    if (e != null) {
                        e.removed.set(true);
                    }
                    discoveryListener.onServiceRemove(e);
                }
            }
        }
    }
}