Example usage for java.util Set removeAll

List of usage examples for java.util Set removeAll

Introduction

In this page you can find the example usage for java.util Set removeAll.

Prototype

boolean removeAll(Collection<?> c);

Source Link

Document

Removes from this set all of its elements that are contained in the specified collection (optional operation).

Usage

From source file:de.saly.elasticsearch.importer.imap.mailsource.ParallelPollingIMAPMailSource.java

@SuppressWarnings({ "rawtypes", "unchecked" })
protected void fetch(final Folder folder) throws MessagingException, IOException {

    if ((folder.getType() & Folder.HOLDS_MESSAGES) == 0) {
        logger.warn("Folder {} cannot hold messages", folder.getFullName());
        return;//from   w w  w .  j  a  va2  s . c  om

    }

    final int messageCount = folder.getMessageCount();

    final UIDFolder uidfolder = (UIDFolder) folder;
    final long servervalidity = uidfolder.getUIDValidity();
    final State riverState = stateManager.getRiverState(folder);
    final Long localvalidity = riverState.getUidValidity();

    logger.info("Fetch mails from folder {} ({})", folder.getURLName().toString(), messageCount);

    logger.debug("Server uid validity: {}, Local uid validity: {}", servervalidity, localvalidity);

    if (localvalidity == null || localvalidity.longValue() != servervalidity) {
        logger.debug("UIDValidity fail, full resync " + localvalidity + "!=" + servervalidity);

        if (localvalidity != null) {
            mailDestination.clearDataForFolder(folder);
        }

        final ProcessResult result = process(messageCount, 1, folder.getFullName());

        riverState.setLastCount(result.getProcessedCount());

        if (result.getProcessedCount() > 0) {
            riverState.setLastIndexed(new Date());
        }

        if (result.getProcessedCount() > 0) {
            riverState.setLastTook(result.getTook());
        }

        riverState.setLastSchedule(new Date());

        if (result.getProcessedCount() > 0 && result.getHighestUid() > 0) {
            riverState.setLastUid(result.getHighestUid());
        }

        riverState.setUidValidity(servervalidity);
        stateManager.setRiverState(riverState);

        logger.info("Initiailly processed {} mails for folder {}", result.getProcessedCount(),
                folder.getFullName());
        logger.debug("Processed result {}", result.toString());

    } else {

        if (messageCount == 0) {
            logger.debug("Folder {} is empty", folder.getFullName());
        } else {

            if (withFlagSync) {
                // detect flag change
                final Message[] flagMessages = folder.getMessages();
                folder.fetch(flagMessages, IMAPUtils.FETCH_PROFILE_FLAGS_UID);

                for (final Message message : flagMessages) {
                    try {

                        final long uid = ((UIDFolder) message.getFolder()).getUID(message);

                        final String id = uid + "::" + message.getFolder().getURLName();

                        final int storedHashcode = mailDestination.getFlaghashcode(id);

                        if (storedHashcode == -1) {
                            // New mail which is not indexed yet
                            continue;
                        }

                        final int flagHashcode = message.getFlags().hashCode();

                        if (flagHashcode != storedHashcode) {
                            // flags change for this message, must update
                            mailDestination.onMessage(message);

                            if (logger.isDebugEnabled()) {
                                logger.debug("Update " + id + " because of flag change");
                            }
                        }
                    } catch (final Exception e) {
                        logger.error("Error detecting flagchanges for message "
                                + ((MimeMessage) message).getMessageID(), e);
                        stateManager.onError("Error detecting flagchanges", message, e);
                    }
                }
            }

            long highestUID = riverState.getLastUid(); // this uid is
                                                       // already
                                                       // processed

            logger.debug("highestUID: {}", highestUID);

            if (highestUID < 1) {
                logger.error("highestUID: {} not valid, set it to 1", highestUID);
                highestUID = 1;
            }

            Message[] msgsnew = uidfolder.getMessagesByUID(highestUID, UIDFolder.LASTUID);

            if (msgsnew.length > 0) {

                System.out.println("lastuid: " + uidfolder.getUID(msgsnew[msgsnew.length - 1]));

                // msgnew.size is always >= 1
                if (highestUID > 1 && uidfolder.getUID(msgsnew[msgsnew.length - 1]) <= highestUID) {
                    msgsnew = (Message[]) ArrayUtils.remove(msgsnew, msgsnew.length - 1);
                }

                if (msgsnew.length > 0) {

                    logger.info("{} new messages in folder {}", msgsnew.length, folder.getFullName());

                    final int start = msgsnew[0].getMessageNumber();

                    final ProcessResult result = process(messageCount, start, folder.getFullName());

                    riverState.setLastCount(result.getProcessedCount());

                    if (result.getProcessedCount() > 0) {
                        riverState.setLastIndexed(new Date());
                    }

                    if (result.getProcessedCount() > 0) {
                        riverState.setLastTook(result.getTook());
                    }

                    riverState.setLastSchedule(new Date());

                    if (result.getProcessedCount() > 0 && result.getHighestUid() > 0) {
                        riverState.setLastUid(result.getHighestUid());
                    }

                    riverState.setUidValidity(servervalidity);
                    stateManager.setRiverState(riverState);

                    logger.info("Not initiailly processed {} mails for folder {}", result.getProcessedCount(),
                            folder.getFullName());
                    logger.debug("Processed result {}", result.toString());
                } else {
                    logger.debug("no new messages");
                }
            } else {
                logger.debug("no new messages");
            }

        }
        // check for expunged/deleted messages
        final Set<Long> serverMailSet = new HashSet<Long>();

        final long oldmailUid = riverState.getLastUid();
        logger.debug("oldmailuid {}", oldmailUid);

        final Message[] msgsold = uidfolder.getMessagesByUID(1, oldmailUid);

        folder.fetch(msgsold, IMAPUtils.FETCH_PROFILE_UID);

        for (final Message m : msgsold) {
            try {
                final long uid = uidfolder.getUID(m);
                serverMailSet.add(uid);

            } catch (final Exception e) {
                stateManager.onError("Unable to handle old message ", m, e);
                logger.error("Unable to handle old message due to {}", e, e.toString());

                IMAPUtils.open(folder);
            }
        }

        if (deleteExpungedMessages) {

            final Set localMailSet = new HashSet(mailDestination.getCurrentlyStoredMessageUids(folder));

            logger.debug("Check now " + localMailSet.size() + " server mails for expunge");

            localMailSet.removeAll(serverMailSet);
            // localMailSet has now the ones that are not on server             

            logger.info(localMailSet.size()
                    + " messages were locally deleted, because they are expunged on server.");

            mailDestination.onMessageDeletes(localMailSet, folder);

        }

    }

}

From source file:com.bdaum.zoom.gps.internal.operations.GeotagOperation.java

protected boolean updateKeywords(Asset asset, Location newLocation, Meta meta, List<String> oldKeywords) {
    if (gpsConfiguration.includeNames) {
        Set<String> keywords = new HashSet<String>(Arrays.asList(asset.getKeyword()));
        List<String> newKeywords = new ArrayList<String>(3);
        Utilities.extractKeywords(newLocation, newKeywords);
        addKeywordsToCat(meta, newKeywords);
        if (oldKeywords != null)
            keywords.removeAll(oldKeywords);
        keywords.addAll(newKeywords);//from  ww w.  jav  a2  s.  c  o m
        String[] kws = keywords.toArray(new String[keywords.size()]);
        Arrays.sort(kws, Utilities.KEYWORDCOMPARATOR);
        asset.setKeyword(kws);
        return true;
    }
    return false;
}

From source file:com.microsoft.tfs.core.clients.workitem.internal.metadata.ConstantSet.java

private void populate(final DBConnection connection, final int[] startingRootIds, final boolean oneLevel,
        final boolean twoPlusLevels, final boolean leaf, final boolean interior) {
    if (log.isDebugEnabled()) {
        final StringBuffer sb = new StringBuffer("["); //$NON-NLS-1$
        for (int i = 0; i < startingRootIds.length; i++) {
            sb.append(i);//w  ww  . ja  va2  s  .c om
            if (i < startingRootIds.length - 1) {
                sb.append(","); //$NON-NLS-1$
            }
        }
        sb.append("]"); //$NON-NLS-1$

        log.debug(MessageFormat.format(
                "populate ConstantSet startingRootIds={0} oneLevel={1} twoPlusLevels={2} leaf={3} interior={4}", //$NON-NLS-1$
                sb.toString(), oneLevel, twoPlusLevels, leaf, interior));
    }

    if (!oneLevel && !twoPlusLevels) {
        /*
         * singleton case: each root id is a singleton constant
         */

        final StringBuffer sb = new StringBuffer("("); //$NON-NLS-1$
        for (int i = 0; i < startingRootIds.length; i++) {
            sb.append(startingRootIds[i]);
            if (i < (startingRootIds.length - 1)) {
                sb.append(","); //$NON-NLS-1$
            }
        }
        sb.append(")"); //$NON-NLS-1$

        final String SQL = "select ConstID, String, DisplayName from Constants where ConstID in " //$NON-NLS-1$
                + sb.toString();

        ++queryCount;
        connection.createStatement(SQL).executeQuery(new ResultHandler() {
            @Override
            public void handleRow(final ResultSet rset) throws SQLException {
                final int constId = rset.getInt(1);
                final String string = rset.getString(2);
                final String displayName = rset.getString(3);
                values.add(displayName != null ? displayName : string);
                constIds.add(new Integer(constId));
            }
        });
    } else {
        /*
         * We keep track of all parent IDs. This is because cycles can exist
         * in the graph and we only want to visit each parent once.
         */
        final Set<Integer> allParentIds = new HashSet<Integer>();

        /*
         * create an initial set of root IDs
         */
        Set<Integer> rootIds = new HashSet<Integer>();
        for (int i = 0; i < startingRootIds.length; i++) {
            rootIds.add(new Integer(startingRootIds[i]));
        }
        allParentIds.addAll(rootIds);

        rootIds = query(connection, rootIds, (oneLevel && leaf), // do we
                // want
                // first-level
                // leaf
                // nodes?
                (oneLevel && interior), // do we want first-level non-leaf
                // nodes?
                twoPlusLevels); // do we want to process deeper levels?

        rootIds.removeAll(allParentIds);
        allParentIds.addAll(rootIds);

        while (rootIds.size() > 0) {
            rootIds = query(connection, rootIds, leaf, interior, true);

            rootIds.removeAll(allParentIds);
            allParentIds.addAll(rootIds);
        }
    }
}

From source file:org.onosproject.sse.SseTopologyViewWebSocket.java

private void processHoverExtendedSelection(long sid, String hover) {
    Set<Host> hoverSelHosts = new HashSet<>(selectedHosts);
    Set<Device> hoverSelDevices = new HashSet<>(selectedDevices);
    addHover(hoverSelHosts, hoverSelDevices, hover);

    List<Intent> primary = selectedIntents == null ? new ArrayList<>()
            : intentFilter.findPathIntents(hoverSelHosts, hoverSelDevices, selectedIntents);
    Set<Intent> secondary = new HashSet<>(selectedIntents);
    secondary.removeAll(primary);

    // Send a message to highlight all links of all monitored intents.
    sendMessage(trafficMessage(sid, new TrafficClass("primary", primary),
            new TrafficClass("secondary", secondary)));
}

From source file:dk.netarkivet.archive.arcrepository.bitpreservation.FileBasedActiveBitPreservation.java

/**
 * This method takes as input the name of a replica for which we wish to
 * retrieve the list of files, either through a FileListJob or a 
 * GetAllFilenamesMessage. It also reads in the known files in the
 * arcrepository from the AdminData directory specified in the Setting
 * DIRS_ARCREPOSITORY_ADMIN. The two file lists are compared and a
 * subdirectory missingFiles is created with two unsorted files:
 * 'missingba.txt' containing missing files, ie those registered in the
 * admin data, but not found in the replica, and 'missingadmindata.txt'
 * containing extra files, ie. those found in the replica but not in the
 * arcrepository admin data./*from  w w  w .j  av a 2 s.c  o m*/
 *
 * TODO The second file is never used on the current implementation.
 *
 * FIXME: It is unclear if the decision if which files are missing isn't
 * better suited to be in getMissingFiles, so this method only runs the
 * batch job.
 *
 * @param replica the replica to search for missing files
 *
 * @throws ArgumentNotValid If the given directory does not contain a file
 *                          filelistOutput/sorted.txt, or the argument
 *                          replica is null.
 * @throws PermissionDenied If the output directory cannot be created.
 */
public void findMissingFiles(Replica replica) throws ArgumentNotValid, PermissionDenied {
    ArgumentNotValid.checkNotNull(replica, "Replica replica");
    runFileListJob(replica);
    log.trace("Finding missing files in directory '" + WorkFiles.getPreservationDir(replica) + "'");
    admin.synchronize();

    // Create set of file names from replica data
    Set<String> filesInReplica = new HashSet<String>(WorkFiles.getLines(replica, WorkFiles.FILES_ON_BA));

    // Get set of files in arcrepository
    Set<String> arcrepNameSet = admin.getAllFileNames();

    // Find difference set 1 (the files missing from the replica).
    Set<String> extraFilesInAdminData = new HashSet<String>(arcrepNameSet);
    extraFilesInAdminData.removeAll(filesInReplica);

    // Log result
    if (extraFilesInAdminData.size() > 0) {
        log.warn("The " + extraFilesInAdminData.size() + " files '"
                + new ArrayList<String>(extraFilesInAdminData).subList(0,
                        Math.min(extraFilesInAdminData.size(), MAX_LIST_SIZE))
                + "' are not present in the replica listing in '"
                + WorkFiles.getPreservationDir(replica).getAbsolutePath() + "'");
    }

    // Write output data
    WorkFiles.write(replica, WorkFiles.MISSING_FILES_BA, extraFilesInAdminData);

    // Find difference set 2 (the files missing in admin.data).
    Set<String> extraFilesInRep = new HashSet<String>(filesInReplica);
    extraFilesInRep.removeAll(arcrepNameSet);

    // Log result
    if (extraFilesInRep.size() > 0) {
        log.warn("The " + extraFilesInRep.size() + " files '"
                + new ArrayList<String>(extraFilesInRep).subList(0,
                        Math.min(extraFilesInRep.size(), MAX_LIST_SIZE))
                + "' have been found in the replica listing in '"
                + WorkFiles.getPreservationDir(replica).getAbsolutePath()
                + "' though they are not known by the " + "system.");
    }

    // Write output data
    WorkFiles.write(replica, WorkFiles.MISSING_FILES_ADMINDATA, extraFilesInRep);
    log.trace("Finished finding missing files.");
}

From source file:com.spotify.styx.docker.KubernetesDockerRunner.java

void examineRunningWFISandAssociatedPods(PodList podList) {
    final Set<WorkflowInstance> runningWorkflowInstances = stateManager.activeStates().values().stream()
            .filter(runState -> runState.state().equals(RUNNING)).map(RunState::workflowInstance)
            .collect(toSet());/*from ww  w  .j a  v  a  2 s.  com*/

    final Set<WorkflowInstance> workflowInstancesForPods = podList.getItems().stream()
            .filter(pod -> pod.getMetadata().getAnnotations().containsKey(STYX_WORKFLOW_INSTANCE_ANNOTATION))
            .map(pod -> WorkflowInstance
                    .parseKey(pod.getMetadata().getAnnotations().get(STYX_WORKFLOW_INSTANCE_ANNOTATION)))
            .collect(toSet());

    runningWorkflowInstances.removeAll(workflowInstancesForPods);
    runningWorkflowInstances.forEach(workflowInstance -> stateManager
            .receiveIgnoreClosed(Event.runError(workflowInstance, "No pod associated with this instance")));
}

From source file:com.evolveum.midpoint.model.impl.validator.ResourceValidatorImpl.java

private void checkSchemaHandlingExistenceForSynchronizationObjectTypes(ResourceValidationContext ctx) {
    ResourceType resource = ctx.resourceObject.asObjectable();
    Set<ObjectTypeRecord> synchronizationFor = new HashSet<>(
            ObjectTypeRecord.extractFrom(resource.getSynchronization()));
    Collection<ObjectTypeRecord> schemaHandlingFor = ObjectTypeRecord.extractFrom(resource.getSchemaHandling());
    synchronizationFor.removeAll(schemaHandlingFor);
    if (!synchronizationFor.isEmpty()) {
        ctx.validationResult.add(Issue.Severity.INFO, CAT_SCHEMA_HANDLING, C_NO_SCHEMA_HANDLING_DEFINITION,
                getString(ctx.bundle, CLASS_DOT + C_NO_SCHEMA_HANDLING_DEFINITION,
                        ObjectTypeRecord.asFormattedList(synchronizationFor)),
                ctx.resourceRef, ITEM_PATH_SCHEMA_HANDLING);
    }/*w w  w. j  a  v  a2 s  .co m*/
}

From source file:com.t3.persistence.PackedFile.java

/**
 * Get all of the path names for this packed file.
 *
 * @return All the path names. Changing this set does not affect the packed file. Changes to the
 * file made after this method is called are not reflected in the path and do not cause a
 * ConcurrentModificationException. Directories in the packed file are also included in the set.
 * @throws IOException Problem with the zip file.
 *///from ww  w.  j  av a 2s.c  om
public Set<String> getPaths() throws IOException {
    Set<String> paths = new HashSet<String>(addedFileSet);
    paths.add(CONTENT_FILE);
    paths.add(PROPERTY_FILE);
    if (file.exists()) {
        ZipFile zf = getZipFile();
        Enumeration<? extends ZipEntry> e = zf.entries();
        while (e.hasMoreElements()) {
            paths.add(e.nextElement().getName());
        }
    }
    paths.removeAll(removedFileSet);
    return paths;
}

From source file:dk.netarkivet.archive.arcrepository.bitpreservation.FileBasedActiveBitPreservation.java

/**
 * This method finds out which files in a given bitarchive are
 * misrepresented in the admin data: Either having the wrong checksum or not
 * being marked as uploaded when it actually is. <p/> It uses the admindata
 * file from the DIRS_ARCREPOSITORY_ADMIN directory, as well as the files
 * output by a runChecksumJob.  The erroneous files are stored in files.
 *
 * FIXME: It is unclear if the decision if which files are changed isn't
 * better suited to be in getChangedFiles, so this method only runs the
 * batch job./* w  w w  . j  a va  2s  .  c om*/
 *
 * @param replica the bitarchive replica the checksumjob came from
 *
 * @throws IOFailure        On file or network trouble.
 * @throws PermissionDenied if the output directory cannot be created
 * @throws ArgumentNotValid if argument replica is null
 */
public void findChangedFiles(Replica replica) throws IOFailure, PermissionDenied, ArgumentNotValid {
    ArgumentNotValid.checkNotNull(replica, "Replica replica");
    runChecksumJob(replica);
    admin.synchronize();

    // Create set of checksums from bitarchive data
    Set<String> replicaChecksumSet = new HashSet<String>(
            WorkFiles.getLines(replica, WorkFiles.CHECKSUMS_ON_BA));

    // Get set of files in arcrepository
    Set<String> arcrepChecksumSet = new HashSet<String>();
    for (String fileName : admin.getAllFileNames()) {
        arcrepChecksumSet.add(ChecksumJob.makeLine(fileName, admin.getCheckSum(fileName)));
    }

    // Get set of completed files in arcrepository
    // Note that these files use the format <filename>##<checksum> to
    // conform to the checksum output.
    Set<String> arcrepCompletedChecksumSet = new HashSet<String>();
    for (String fileName : admin.getAllFileNames(replica, ReplicaStoreState.UPLOAD_COMPLETED)) {
        arcrepCompletedChecksumSet.add(ChecksumJob.makeLine(fileName, admin.getCheckSum(fileName)));
    }

    // Find files where checksums differ
    Set<String> wrongChecksums = new HashSet<String>(replicaChecksumSet);
    wrongChecksums.removeAll(arcrepChecksumSet);

    // Find files where state is wrong
    Set<String> wrongStates = new HashSet<String>(replicaChecksumSet);
    wrongStates.removeAll(wrongChecksums);
    wrongStates.removeAll(arcrepCompletedChecksumSet);

    // Remove files unknown in admin data (note  - these are not ignored,
    // they will be handled by missing files operations)
    for (String checksum : new ArrayList<String>(wrongChecksums)) {
        Map.Entry<String, String> entry = ChecksumJob.parseLine(checksum);
        if (!admin.hasEntry(entry.getKey())) {
            wrongChecksums.remove(checksum);
            wrongStates.remove(checksum);
        }
    }

    // Log result
    if (wrongChecksums.size() > 0) {
        log.warn("The " + wrongChecksums.size() + " files '"
                + new ArrayList<String>(wrongChecksums).subList(0,
                        Math.min(wrongChecksums.size(), MAX_LIST_SIZE))
                + "' have wrong checksum in the bitarchive listing in '"
                + WorkFiles.getPreservationDir(replica).getAbsolutePath() + "'");
    }
    if (wrongStates.size() > 0) {
        log.warn("The " + wrongStates.size() + " files '"
                + new ArrayList<String>(wrongStates).subList(0, Math.min(wrongStates.size(), MAX_LIST_SIZE))
                + "' have wrong states in the bitarchive listing in '"
                + WorkFiles.getPreservationDir(replica).getAbsolutePath() + "'");
    }

    // Collect all names of files with the wrong checksum
    Set<String> wrongChecksumFilenames = new HashSet<String>();
    for (String checksum : wrongChecksums) {
        Map.Entry<String, String> entry = ChecksumJob.parseLine(checksum);
        wrongChecksumFilenames.add(entry.getKey());
    }

    // Collect all names of files with the wrong state
    Set<String> wrongStateFilenames = new HashSet<String>();
    for (String checksum : wrongStates) {
        Map.Entry<String, String> entry = ChecksumJob.parseLine(checksum);
        wrongStateFilenames.add(entry.getKey());
    }

    // Write output data to the files.
    WorkFiles.write(replica, WorkFiles.WRONG_FILES, wrongChecksumFilenames);
    WorkFiles.write(replica, WorkFiles.WRONG_STATES, wrongStateFilenames);
}

From source file:com.evolveum.midpoint.model.impl.validator.ResourceValidatorImpl.java

private void checkSynchronizationExistenceForSchemaHandlingObjectTypes(ResourceValidationContext ctx) {
    ResourceType resource = ctx.resourceObject.asObjectable();
    Set<ObjectTypeRecord> schemaHandlingFor = new HashSet<>(
            ObjectTypeRecord.extractFrom(resource.getSchemaHandling()));
    Collection<ObjectTypeRecord> synchronizationFor = ObjectTypeRecord
            .extractFrom(resource.getSynchronization());
    schemaHandlingFor.removeAll(synchronizationFor);
    if (!schemaHandlingFor.isEmpty()) {
        ctx.validationResult.add(Issue.Severity.INFO, CAT_SYNCHRONIZATION, C_NO_SYNCHRONIZATION_DEFINITION,
                getString(ctx.bundle, CLASS_DOT + C_NO_SYNCHRONIZATION_DEFINITION,
                        ObjectTypeRecord.asFormattedList(schemaHandlingFor)),
                ctx.resourceRef, ITEM_PATH_SYNCHRONIZATION);
    }/* w  w  w .ja  v  a2s.  c  om*/
}