Example usage for java.util Set removeAll

List of usage examples for java.util Set removeAll

Introduction

In this page you can find the example usage for java.util Set removeAll.

Prototype

boolean removeAll(Collection<?> c);

Source Link

Document

Removes from this set all of its elements that are contained in the specified collection (optional operation).

Usage

From source file:biz.netcentric.cq.tools.actool.validators.impl.AceBeanValidatorImpl.java

@Override
public boolean validateRestrictions(final AceBean tmpAceBean, final AccessControlManager aclManager)
        throws InvalidRepGlobException, InvalidRestrictionsException {
    boolean valid = true;

    final List<Restriction> restrictions = tmpAceBean.getRestrictions();
    if (restrictions.isEmpty()) {
        return true;
    }//from   w ww. j a v a2 s.  c  o m

    final String principal = tmpAceBean.getPrincipalName();

    final Set<String> restrictionNamesFromAceBean = new HashSet<String>();
    for (Restriction restriction : restrictions) {
        restrictionNamesFromAceBean.add(restriction.getName());
    }

    final Set<String> allowedRestrictionNames = getSupportedRestrictions(aclManager);

    if (!allowedRestrictionNames.containsAll(restrictionNamesFromAceBean)) {
        restrictionNamesFromAceBean.removeAll(allowedRestrictionNames);
        valid = false;
        final String errorMessage = getBeanDescription(this.currentBeanCounter, principal)
                + ",  this repository doesn't support following restriction(s): " + restrictionNamesFromAceBean;
        throw new InvalidRestrictionsException(errorMessage);
    }

    return valid;
}

From source file:edu.txstate.dmlab.clusteringwiki.rest.ClusterController.java

/**
 * Query transfer - get a similar query if the current query has 
 * not already been executed/*from w w w  . j  a  va  2s.  c o  m*/
 * @param query  Executed query
 * @param analyzedQuery  Analyzed executed query string terms
 * @param userId  User id for logged in user
 * @param allUserId  User id for "all" user
 * @param loggedIn  Whether user is logged in
 * @param search  Search results collection
 * @param service  Service used to execute search
 * @param numResults  Number of results retrieved
 * @param clusteringAlgo  Clustering algorithm used to cluster results
 * @return
 */
protected Query transfer(String query, String analyzedQuery, Integer userId, Integer allUserId,
        boolean loggedIn, ICWSearchResultCol search, String service, Integer numResults,
        Integer clusteringAlgo) {

    //query for logged in user
    Query q = queryDao.selectExistingUserQuery(userId, service, numResults, query);

    if (q != null)
        return q;

    List<Query> matches = queryDao.selectUserQueryMatchingSearch(query, analyzedQuery, allUserId,
            ApplicationSettings.getTermSimQueryResultsLimit());

    //find query with largest similarity
    double sim = 0.0D;
    Query qPrime = null;
    ISimilarityCalculator calc = new JaccardSimilarityCalculator();
    for (Query a : matches) {
        double currentSim = calc.computeSimilarity(analyzedQuery, a.getParsedText());
        if (Double.compare(currentSim, sim) > 0) {
            qPrime = a;
            sim = currentSim;
        }
    }
    //make sure it is similar enough
    if (Double.compare(sim, ApplicationSettings.getTermSimThreshold()) < 0 || qPrime == null)
        return null;

    //check the result similarity between the top k results received and the query found
    List<String> responseUrls = search.getTopKResponseUrls(ApplicationSettings.getTopKQueryUrls());
    Set<String> responseUrlsSet = new HashSet<String>(responseUrls);
    Set<String> queryUrlsSet = qPrime.retrieveTopKQueryResponseUrlsSet();
    Set<String> intersection = new HashSet<String>(responseUrlsSet);
    intersection.removeAll(queryUrlsSet);
    Set<String> union = responseUrlsSet;
    union.addAll(queryUrlsSet);
    sim = intersection.size() / (double) union.size();

    //make sure it is similar enough
    if (Double.compare(sim, ApplicationSettings.getResultSimThreshold()) < 0 || qPrime == null)
        return null;

    //found q' that is similar enough to q
    //save q and copy preferences from q' to q
    if (loggedIn) {
        List<String> urls = search.getTopKResponseUrls(ApplicationSettings.getTopKQueryUrls());
        //save new queries
        q = new Query(userId, service, numResults, query, null, urls);
        q.setParsedText(analyzedQuery);
        queryDao.saveQuery(q);
        Query qAll = new Query(allUserId, service, numResults, query, null, urls);
        qAll.setParsedText(analyzedQuery);
        queryDao.saveQuery(qAll);
        //associate new edits
        Integer queryId = q.getId();
        List<ClusterEdit> edits = clusterEditDao.selectClusterEditsForUserQuery(qPrime.getId(), clusteringAlgo,
                qPrime.getUserId().equals(allUserId));
        for (ClusterEdit ePrime : edits) {
            ClusterEdit e = new ClusterEdit();
            e.setCardinality(ePrime.getCardinality());
            e.setClusteringAlgo(ePrime.getClusteringAlgo());
            e.setQueryId(queryId);
            e.setPath1(ePrime.getPath1());
            e.setPath2(ePrime.getPath2());
            e.setPath3(ePrime.getPath3());
            e.setPath4(ePrime.getPath4());
            e.setPath5(ePrime.getPath5());
            clusterEditDao.saveClusterEdit(e);
        }
    } else {
        q = qPrime;
    }
    return q;
}

From source file:io.pravega.controller.server.eventProcessor.ControllerEventProcessors.java

private CompletableFuture<Void> handleOrphanedReaders(
        final EventProcessorGroup<? extends ControllerEvent> group, final Supplier<Set<String>> processes) {
    return withRetriesAsync(() -> CompletableFuture.supplyAsync(() -> {
        try {/*w w  w .  jav  a  2 s  .c o  m*/
            return group.getProcesses();
        } catch (CheckpointStoreException e) {
            if (e.getType().equals(CheckpointStoreException.Type.NoNode)) {
                return Collections.<String>emptySet();
            }
            throw new CompletionException(e);
        }
    }, executor), RETRYABLE_PREDICATE, Integer.MAX_VALUE, executor)
            .thenComposeAsync(groupProcesses -> withRetriesAsync(() -> CompletableFuture.supplyAsync(() -> {
                try {
                    return new ImmutablePair<>(processes.get(), groupProcesses);
                } catch (Exception e) {
                    log.error(String.format("Error fetching current processes%s", group.toString()), e);
                    throw new CompletionException(e);
                }
            }, executor), RETRYABLE_PREDICATE, Integer.MAX_VALUE, executor)).thenComposeAsync(pair -> {
                Set<String> activeProcesses = pair.getLeft();
                Set<String> registeredProcesses = pair.getRight();

                if (registeredProcesses == null || registeredProcesses.isEmpty()) {
                    return CompletableFuture.completedFuture(null);
                }

                if (activeProcesses != null) {
                    registeredProcesses.removeAll(activeProcesses);
                }

                List<CompletableFuture<Void>> futureList = new ArrayList<>();
                for (String process : registeredProcesses) {
                    futureList.add(withRetriesAsync(() -> CompletableFuture.runAsync(() -> {
                        try {
                            group.notifyProcessFailure(process);
                        } catch (CheckpointStoreException e) {
                            log.error(String.format(
                                    "Error notifying failure of process=%s in event processor group %s",
                                    process, group.toString()), e);
                            throw new CompletionException(e);
                        }
                    }, executor), RETRYABLE_PREDICATE, Integer.MAX_VALUE, executor));
                }

                return FutureHelpers.allOf(futureList);
            });
}

From source file:dk.netarkivet.harvester.indexserver.distribute.IndexRequestServer.java

/**
 * Method that handles the processing of an indexRequestMessage. Returns the requested index immediately, if already
 * available, otherwise proceeds with the index generation of the requested index. Must be run in its own thread,
 * because it blocks while the index is generated.
 *
 * @param irMsg A message requesting an index
 * @see #visit(IndexRequestMessage)/*  w ww  .  j a  v a  2s  . co  m*/
 */
private void doProcessIndexRequestMessage(final IndexRequestMessage irMsg) {
    final boolean mustReturnIndex = irMsg.mustReturnIndex();
    try {
        checkMessage(irMsg);
        RequestType type = irMsg.getRequestType();
        Set<Long> jobIDs = irMsg.getRequestedJobs();

        if (log.isInfoEnabled()) {
            log.info("Request received for an index of type '{}' for the {} jobs [{}]", type, jobIDs.size(),
                    StringUtils.conjoin(",", jobIDs));
        }
        FileBasedCache<Set<Long>> handler = handlers.get(type);

        // Here we need to make sure that we don't accidentally process more than
        // one message at the time before the whole process is over
        List<Long> sortedList = new ArrayList<Long>(jobIDs);
        String allIDsString = StringUtils.conjoin("-", sortedList);
        String checksum = ChecksumCalculator.calculateMd5(allIDsString.getBytes());
        log.debug(
                "Waiting to enter the synchronization zone for the indexing job of size {} with checksum '{}'",
                jobIDs.size(), checksum);
        // Begin synchronization
        synchronized (checksum.intern()) {
            log.debug("The indexing job of size {} with checksum '{}' is now in the synchronization zone",
                    jobIDs.size(), checksum);
            Set<Long> foundIDs = handler.cache(jobIDs);
            irMsg.setFoundJobs(foundIDs);
            if (foundIDs.equals(jobIDs)) {
                if (log.isInfoEnabled()) {
                    log.info("Retrieved successfully index of type '{}' for the {} jobs [{}]", type,
                            jobIDs.size(), StringUtils.conjoin(",", jobIDs));
                }
                File cacheFile = handler.getCacheFile(jobIDs);
                if (mustReturnIndex) { // return index now!
                    packageResultFiles(irMsg, cacheFile);
                }
            } else if (satisfactoryTresholdReached(foundIDs, jobIDs)) {
                log.info(
                        "Data for full index w/ {} jobs not available. Only found data for {} jobs - "
                                + "but satisfactoryTreshold reached, so assuming presence of all data",
                        jobIDs.size(), foundIDs.size());
                // Make sure that the index of the data available is generated
                Set<Long> theFoundIDs = handler.cache(foundIDs);
                // TheFoundIDS should be identical to foundIDs
                // Lets make sure of that
                Set<Long> diffSet = new HashSet<Long>(foundIDs);
                diffSet.removeAll(theFoundIDs);

                // Make a copy of the index available, and give it the name of
                // the index cache file wanted.
                File cacheFileWanted = handler.getCacheFile(jobIDs);
                File cacheFileCreated = handler.getCacheFile(foundIDs);

                log.info("Satisfactory threshold reached - copying index {} '{}' to full index: {}",
                        (cacheFileCreated.isDirectory() ? "dir" : "file"), cacheFileCreated.getAbsolutePath(),
                        cacheFileWanted.getAbsolutePath());
                if (cacheFileCreated.isDirectory()) {
                    // create destination cacheFileWanted, and
                    // copy all files in cacheFileCreated to cacheFileWanted.
                    cacheFileWanted.mkdirs();
                    FileUtils.copyDirectory(cacheFileCreated, cacheFileWanted);
                } else {
                    FileUtils.copyFile(cacheFileCreated, cacheFileWanted);
                }

                // TODO This delete-operation commented out, because it is deemed too dangerous,
                // as the cachedir represented by cacheFileCreated may still be used

                // log.info("Deleting the temporary index "
                // + cacheFileCreated.getAbsolutePath());
                // FileUtils.removeRecursively(cacheFileCreated);
                log.info("We keep the index '{}', as we don't know if anybody is using it",
                        cacheFileCreated.getAbsolutePath());

                // Information needed by recipient to store index in local cache
                irMsg.setFoundJobs(jobIDs);
                if (mustReturnIndex) { // return index now.
                    packageResultFiles(irMsg, cacheFileWanted);
                }
            } else {
                Set<Long> missingJobIds = new HashSet<Long>(jobIDs);
                missingJobIds.removeAll(foundIDs);
                log.warn("Failed generating index of type '{}' for the jobs [{}]. Missing data for jobs [{}].",
                        type, StringUtils.conjoin(",", jobIDs), StringUtils.conjoin(",", missingJobIds));
            }

        } // End of synchronization block
    } catch (Throwable t) {
        log.warn("Unable to generate index for jobs [" + StringUtils.conjoin(",", irMsg.getRequestedJobs())
                + "]", t);
        irMsg.setNotOk(t);
    } finally {
        // Remove job from currentJobs Set
        synchronized (currentJobs) {
            currentJobs.remove(irMsg.getID());
        }
        // delete stored message
        deleteStoredMessage(irMsg);
        String state = "failed";
        if (irMsg.isOk()) {
            state = "successful";
        }
        if (mustReturnIndex) {
            log.info("Sending {} reply for IndexRequestMessage back to sender '{}'.", state,
                    irMsg.getReplyTo());
            JMSConnectionFactory.getInstance().reply(irMsg);
        } else {
            log.info("Sending {} IndexReadyMessage to Scheduler for harvest {}", state, irMsg.getHarvestId());
            boolean isindexready = true;
            if (state.equalsIgnoreCase("failed")) {
                isindexready = false;
            }
            IndexReadyMessage irm = new IndexReadyMessage(irMsg.getHarvestId(), isindexready,
                    irMsg.getReplyTo(), Channels.getTheIndexServer());
            JMSConnectionFactory.getInstance().send(irm);
        }
    }
}

From source file:com.streamsets.pipeline.stage.origin.lib.DataFormatParser.java

public List<Stage.ConfigIssue> init(Stage.Context context, String configPrefix) {
    List<Stage.ConfigIssue> issues = new ArrayList<>();
    final String prefix = configPrefix + DATA_FORMAT_CONFIG_PREFIX;
    switch (dataFormat) {
    case JSON://from  w  ww  .ja va  2s  .co  m
        if (dataFormatConfig.jsonMaxObjectLen < 1) {
            issues.add(context.createConfigIssue(DataFormat.JSON.name(), prefix + "maxJsonObjectLen",
                    ParserErrors.PARSER_04));
        }
        break;
    case TEXT:
        if (dataFormatConfig.textMaxLineLen < 1) {
            issues.add(context.createConfigIssue(DataFormat.TEXT.name(), prefix + "maxLogLineLength",
                    ParserErrors.PARSER_04));
        }
        break;
    case DELIMITED:
        if (dataFormatConfig.csvMaxObjectLen < 1) {
            issues.add(context.createConfigIssue(DataFormat.DELIMITED.name(), prefix + "csvMaxObjectLen",
                    ParserErrors.PARSER_04));
        }
        break;
    case XML:
        if (messageConfig != null && messageConfig.produceSingleRecordPerMessage) {
            issues.add(context.createConfigIssue(parentName,
                    configPrefix + "messageConfig.produceSingleRecordPerMessage", ParserErrors.PARSER_06));
        }
        if (dataFormatConfig.xmlMaxObjectLen < 1) {
            issues.add(context.createConfigIssue(DataFormatGroups.DATA_FORMAT.name(),
                    prefix + "maxXmlObjectLen", ParserErrors.PARSER_04));
        }
        if (StringUtils.isNotBlank(dataFormatConfig.xmlRecordElement)) {
            String invalidXPathError = XPathValidatorUtil
                    .getXPathValidationError(dataFormatConfig.xmlRecordElement);
            if (StringUtils.isNotBlank(invalidXPathError)) {
                issues.add(context.createConfigIssue(DataFormatGroups.DATA_FORMAT.name(),
                        prefix + "xmlRecordElement", ParserErrors.PARSER_02, dataFormatConfig.xmlRecordElement,
                        invalidXPathError));
            } else {
                final Set<String> nsPrefixes = XPathValidatorUtil
                        .getNamespacePrefixes(dataFormatConfig.xmlRecordElement);
                nsPrefixes.removeAll(dataFormatConfig.xPathNamespaceContext.keySet());
                if (!nsPrefixes.isEmpty()) {
                    issues.add(context.createConfigIssue(DataFormatGroups.DATA_FORMAT.name(),
                            prefix + "xPathNamespaceContext", ParserErrors.PARSER_09,
                            StringUtils.join(nsPrefixes, ", ")));
                }
            }
        }
        break;
    case SDC_JSON:
        break;
    case LOG:
        logDataFormatValidator = new LogDataFormatValidator(dataFormatConfig.logMode,
                dataFormatConfig.logMaxObjectLen, dataFormatConfig.retainOriginalLine,
                dataFormatConfig.customLogFormat, dataFormatConfig.regex,
                dataFormatConfig.grokPatternDefinition, dataFormatConfig.grokPattern,
                dataFormatConfig.enableLog4jCustomLogFormat, dataFormatConfig.log4jCustomLogFormat,
                dataFormatConfig.onParseError, dataFormatConfig.maxStackTraceLines, DataFormat.LOG.name(),
                getFieldPathToGroupMap(dataFormatConfig.fieldPathsToGroupName));
        logDataFormatValidator.validateLogFormatConfig(context, prefix, issues);
        break;
    case AVRO:
        if (dataFormatConfig.avroSchemaSource == OriginAvroSchemaSource.INLINE
                && isEmpty(dataFormatConfig.avroSchema)) {
            issues.add(context.createConfigIssue(DataFormat.AVRO.name(), prefix + "avroSchema",
                    ParserErrors.PARSER_07, dataFormatConfig.avroSchema));
        }
        break;
    case PROTOBUF:
        if (dataFormatConfig.protoDescriptorFile == null || dataFormatConfig.protoDescriptorFile.isEmpty()) {
            issues.add(context.createConfigIssue(DataFormatGroups.DATA_FORMAT.name(),
                    prefix + "protoDescriptorFile", DataFormatErrors.DATA_FORMAT_07));
        } else {
            File file = new File(context.getResourcesDirectory(), dataFormatConfig.protoDescriptorFile);
            if (!file.exists()) {
                issues.add(context.createConfigIssue(DataFormatGroups.DATA_FORMAT.name(),
                        prefix + "protoDescriptorFile", DataFormatErrors.DATA_FORMAT_09,
                        file.getAbsolutePath()));
            }
            if (dataFormatConfig.messageType == null || dataFormatConfig.messageType.isEmpty()) {
                issues.add(context.createConfigIssue(DataFormatGroups.DATA_FORMAT.name(),
                        prefix + "messageType", DataFormatErrors.DATA_FORMAT_08));
            }
        }
        break;
    case WHOLE_FILE:
        if (dataFormatConfig.wholeFileMaxObjectLen < 1) {
            issues.add(context.createConfigIssue(DataFormat.XML.name(), prefix + "maxWholeFileObjectLen",
                    ParserErrors.PARSER_04));
        }
        break;
    case BINARY:
        if (dataFormatConfig.binaryMaxObjectLen < 1) {
            issues.add(context.createConfigIssue(DataFormatGroups.DATA_FORMAT.name(),
                    prefix + "binaryMaxObjectLen", ParserErrors.PARSER_04));
        }
        break;
    case DATAGRAM:
        if (dataFormatConfig.datagramMode == DatagramMode.COLLECTD) {
            dataFormatConfig.checkCollectdParserConfigs(context, prefix, issues);
        } else if (dataFormatConfig.datagramMode == DatagramMode.NETFLOW) {
            NetflowDataParserFactory.validateConfigs(context, issues, DataFormatGroups.DATA_FORMAT.name(),
                    prefix, dataFormatConfig.maxTemplateCacheSizeDatagram,
                    dataFormatConfig.templateCacheTimeoutMsDatagram, "maxTemplateCacheSizeDatagram",
                    "templateCacheTimeoutMsDatagram");
        }
        break;
    case NETFLOW:
        NetflowDataParserFactory.validateConfigs(context, issues, DataFormatGroups.DATA_FORMAT.name(), prefix,
                dataFormatConfig.maxTemplateCacheSize, dataFormatConfig.templateCacheTimeoutMs);
        break;
    case SYSLOG:
        // nothing to validate
        break;
    case EXCEL:
        // nothing to validate
        break;
    default:
        issues.add(context.createConfigIssue(parentName, configPrefix + "dataFormat", ParserErrors.PARSER_05,
                dataFormat));
    }

    DataParserFactoryBuilder builder = new DataParserFactoryBuilder(context, dataFormat.getParserFormat())
            .setCharset(Charset.defaultCharset());
    if (dataFormatConfig.charset == null) {
        messageCharset = StandardCharsets.UTF_8;
    } else {
        try {
            messageCharset = Charset.forName(dataFormatConfig.charset);
        } catch (UnsupportedCharsetException ex) {
            // setting it to a valid one so the parser factory can be configured and tested for more errors
            messageCharset = StandardCharsets.UTF_8;
            issues.add(context.createConfigIssue(parentName, "charset", ParserErrors.PARSER_01,
                    dataFormatConfig.charset));
        }
    }
    builder.setCharset(messageCharset).setRemoveCtrlChars(dataFormatConfig.removeCtrlChars);

    switch (dataFormat) {
    case TEXT:
        builder.setMaxDataLen(dataFormatConfig.textMaxLineLen)
                .setConfig(TextDataParserFactory.USE_CUSTOM_DELIMITER_KEY, dataFormatConfig.useCustomDelimiter)
                .setConfig(TextDataParserFactory.CUSTOM_DELIMITER_KEY, dataFormatConfig.customDelimiter)
                .setConfig(TextDataParserFactory.INCLUDE_CUSTOM_DELIMITER_IN_TEXT_KEY,
                        dataFormatConfig.includeCustomDelimiterInTheText);
        break;
    case JSON:
        builder.setMode(dataFormatConfig.jsonContent);
        builder.setMaxDataLen(dataFormatConfig.jsonMaxObjectLen);
        break;
    case DELIMITED:
        builder.setMaxDataLen(dataFormatConfig.csvMaxObjectLen).setMode(dataFormatConfig.csvFileFormat)
                .setMode(dataFormatConfig.csvHeader).setMode(dataFormatConfig.csvRecordType)
                .setConfig(DelimitedDataConstants.DELIMITER_CONFIG, dataFormatConfig.csvCustomDelimiter)
                .setConfig(DelimitedDataConstants.ESCAPE_CONFIG, dataFormatConfig.csvCustomEscape)
                .setConfig(DelimitedDataConstants.QUOTE_CONFIG, dataFormatConfig.csvCustomQuote)
                .setConfig(DelimitedDataConstants.PARSE_NULL, dataFormatConfig.parseNull)
                .setConfig(DelimitedDataConstants.NULL_CONSTANT, dataFormatConfig.nullConstant)
                .setConfig(DelimitedDataConstants.COMMENT_ALLOWED_CONFIG, dataFormatConfig.csvEnableComments)
                .setConfig(DelimitedDataConstants.COMMENT_MARKER_CONFIG, dataFormatConfig.csvCommentMarker)
                .setConfig(DelimitedDataConstants.IGNORE_EMPTY_LINES_CONFIG,
                        dataFormatConfig.csvIgnoreEmptyLines)
                .setConfig(DelimitedDataConstants.ALLOW_EXTRA_COLUMNS, dataFormatConfig.csvAllowExtraColumns)
                .setConfig(DelimitedDataConstants.EXTRA_COLUMN_PREFIX, dataFormatConfig.csvExtraColumnPrefix);
        break;
    case XML:
        builder.setMaxDataLen(dataFormatConfig.xmlMaxObjectLen);
        builder.setConfig(XmlDataParserFactory.RECORD_ELEMENT_KEY, dataFormatConfig.xmlRecordElement);
        builder.setConfig(XmlDataParserFactory.INCLUDE_FIELD_XPATH_ATTRIBUTES_KEY,
                dataFormatConfig.includeFieldXpathAttributes);
        builder.setConfig(XmlDataParserFactory.RECORD_ELEMENT_XPATH_NAMESPACES_KEY,
                dataFormatConfig.xPathNamespaceContext);
        builder.setConfig(XmlDataParserFactory.USE_FIELD_ATTRIBUTES, dataFormatConfig.outputFieldAttributes);
        break;
    case SDC_JSON:
        builder.setMaxDataLen(-1);
        break;
    case LOG:
        logDataFormatValidator.populateBuilder(builder);
        break;
    case AVRO:
        builder.setMaxDataLen(Integer.MAX_VALUE).setConfig(SCHEMA_KEY, dataFormatConfig.avroSchema)
                .setConfig(SUBJECT_KEY, dataFormatConfig.subject)
                .setConfig(SCHEMA_ID_KEY, dataFormatConfig.schemaId)
                .setConfig(SCHEMA_SOURCE_KEY, dataFormatConfig.avroSchemaSource)
                .setConfig(SCHEMA_REPO_URLS_KEY, dataFormatConfig.schemaRegistryUrls);
        break;
    case PROTOBUF:
        builder.setConfig(ProtobufConstants.PROTO_DESCRIPTOR_FILE_KEY, dataFormatConfig.protoDescriptorFile)
                .setConfig(ProtobufConstants.MESSAGE_TYPE_KEY, dataFormatConfig.messageType)
                .setConfig(ProtobufConstants.DELIMITED_KEY, dataFormatConfig.isDelimited).setMaxDataLen(-1);
        break;
    case WHOLE_FILE:
        builder.setMaxDataLen(dataFormatConfig.wholeFileMaxObjectLen);
        break;
    case BINARY:
        builder.setMaxDataLen(dataFormatConfig.binaryMaxObjectLen);
        break;
    case DATAGRAM:
        dataFormatConfig.buildDatagramParser(builder);
        break;
    case SYSLOG:
        builder.setMaxDataLen(-1);
        break;
    case NETFLOW:
        builder.setMaxDataLen(-1)
                .setConfig(NetflowDataParserFactory.OUTPUT_VALUES_MODE_KEY,
                        dataFormatConfig.netflowOutputValuesMode)
                .setConfig(NetflowDataParserFactory.MAX_TEMPLATE_CACHE_SIZE_KEY,
                        dataFormatConfig.maxTemplateCacheSize)
                .setConfig(NetflowDataParserFactory.TEMPLATE_CACHE_TIMEOUT_MS_KEY,
                        dataFormatConfig.templateCacheTimeoutMs);
        break;
    case EXCEL:
        builder.setMaxDataLen(-1).setConfig(WorkbookParserConstants.HEADER, dataFormatConfig.excelHeader);
        break;
    default:
        throw new IllegalStateException(Utils.format("Unknown data format: {}", dataFormat));
    }
    try {
        parserFactory = builder.build();
    } catch (Exception ex) {
        issues.add(context.createConfigIssue(null, null, DataFormatErrors.DATA_FORMAT_06, ex.toString(), ex));
    }
    return issues;
}

From source file:mitm.common.cache.RateCounterImpl.java

private synchronized void checkValidity(String key) {
    Set<SubKeyItem> items = getKeyedItems(key, false);

    if (logger.isDebugEnabled()) {
        logger.debug("key {}, #items {}", key, CollectionUtils.getSize(items));
    }//from   ww w.  jav a2s. c o  m

    if (items != null) {
        List<SubKeyItem> itemsToRemove = new LinkedList<SubKeyItem>();

        for (SubKeyItem item : items) {
            if (!item.isValid()) {
                itemsToRemove.add(item);
            }
        }

        items.removeAll(itemsToRemove);

        /*
         * If there are no more items left associated with the key we can remove the
         * key
         */
        if (items.size() == 0) {
            keyMap.remove(key);
        }
    }
}

From source file:net.solarnetwork.central.dras.biz.dao.DaoBizSupport.java

/**
 * Helper method for maintaining the membership relationship in a group.
 * //from   www .  j a  v a 2  s . c  o m
 * @param membership the command
 * @param access API for updating the group membership
 * @return the EffectiveCollection representing the new membership status
 */
protected <T extends Entity<Long>, E extends Member> EffectiveCollection<T, E> maintainGroupMembership(
        MembershipCommand membership, MembershipMaintenance<T, E> access) {
    Effective eff;
    boolean newEffective = false;
    if (membership.getEffectiveId() != null) {
        eff = effectiveDao.get(membership.getEffectiveId());
    } else {
        eff = createEffective(null);
        newEffective = true;
    }
    Set<E> members = access.getMembers(membership.getParentId(), eff);
    Set<E> newMembers = new HashSet<E>(members.size() + membership.getGroup().size());
    Set<E> actionMembers = new LinkedHashSet<E>(membership.getGroup().size());
    for (Long memberId : membership.getGroup()) {
        actionMembers.add(access.createMember(memberId));
    }

    switch (membership.getMode()) {
    case Append:
        newMembers.addAll(members);
        newMembers.addAll(actionMembers);
        break;

    case Delete:
        newMembers.addAll(members);
        newMembers.removeAll(actionMembers);
        break;

    case Replace:
        newMembers.addAll(actionMembers);

    }

    if (newMembers.equals(members)) {
        // no change, clean up and return
        if (newEffective) {
            effectiveDao.delete(eff);
            eff = null; // TODO: or get the actual Effective from userGroupDao?
        }
    } else {
        Set<Long> ids = new HashSet<Long>(newMembers.size());
        for (Member m : newMembers) {
            ids.add(m.getId());
        }
        access.assignMembers(membership.getParentId(), ids, eff);
    }

    T userGroup = access.getDao().get(membership.getParentId());
    EffectiveCollection<T, E> result = new EffectiveCollection<T, E>(eff, userGroup, newMembers);
    return result;
}

From source file:alfio.manager.system.ConfigurationManager.java

private Map<ConfigurationKeys.SettingCategory, List<Configuration>> groupByCategory(
        Map<ConfigurationKeys.SettingCategory, List<Configuration>> all,
        Map<ConfigurationKeys.SettingCategory, List<Configuration>> existing) {
    return all.entrySet().stream().map(e -> {
        Set<Configuration> entries = new TreeSet<>();
        ConfigurationKeys.SettingCategory key = e.getKey();
        entries.addAll(e.getValue());//w  ww.  ja va 2 s.  c o  m
        if (existing.containsKey(key)) {
            List<Configuration> configurations = existing.get(key);
            entries.removeAll(configurations);
            entries.addAll(configurations);
        }
        return Pair.of(key, new ArrayList<>(entries));
    }).collect(Collectors.toMap(Pair::getKey, Pair::getValue));
}

From source file:com.aurel.track.item.ItemBL.java

/**
 * Compute the dates/planned values/expenses for all summary items
 *//*from   ww  w  .ja v  a 2  s. com*/
public static void computeSummaryItems() {
    List<TWorkItemBean> workItemBeansWithParent = workItemDAO.loadAllWithParent();
    Set<Integer> workItemIDs = new HashSet<Integer>();
    Set<Integer> parentIDs = new HashSet<Integer>();
    for (TWorkItemBean workItemBean : workItemBeansWithParent) {
        workItemIDs.add(workItemBean.getObjectID());
        parentIDs.add(workItemBean.getSuperiorworkitem());
    }
    //get only the top level parents
    parentIDs.removeAll(workItemIDs);
    List<TWorkItemBean> topLevelParents = null;
    if (!parentIDs.isEmpty()) {
        LOGGER.debug("Computing summary date / planned value / remaining plan / totl expense for a number of "
                + parentIDs.size() + " top level workItems...");
        topLevelParents = loadByWorkItemKeys(GeneralUtils.createIntArrFromSet(parentIDs));
        workItemBeansWithParent.addAll(topLevelParents);
    }
    Map<Integer, TWorkItemBean> workItemBeansMap = GeneralUtils.createMapFromList(workItemBeansWithParent);
    Map<Integer, List<Integer>> parentToChildrenMap = getParentToChildrenMap(workItemBeansWithParent);
    if (topLevelParents != null) {
        for (TWorkItemBean workItemBean : topLevelParents) {
            LOGGER.debug("Computing summary date and planned value for workItem " + workItemBean.getObjectID());
            Map<Integer, Double> hoursPerWorkdayForProject = new HashMap<Integer, Double>();
            //compute planned works
            ComputedValueBL.computeBottomUpValues(workItemBean, parentToChildrenMap, workItemBeansMap,
                    TComputedValuesBean.EFFORTTYPE.TIME, TComputedValuesBean.COMPUTEDVALUETYPE.PLAN,
                    hoursPerWorkdayForProject);
            //compute planned costs
            ComputedValueBL.computeBottomUpValues(workItemBean, parentToChildrenMap, workItemBeansMap,
                    TComputedValuesBean.EFFORTTYPE.COST, TComputedValuesBean.COMPUTEDVALUETYPE.PLAN,
                    hoursPerWorkdayForProject);
            //compute work expenses
            ComputedValueBL.computeBottomUpValues(workItemBean, parentToChildrenMap, workItemBeansMap,
                    TComputedValuesBean.EFFORTTYPE.TIME, TComputedValuesBean.COMPUTEDVALUETYPE.EXPENSE,
                    hoursPerWorkdayForProject);
            //compute cost expenses
            ComputedValueBL.computeBottomUpValues(workItemBean, parentToChildrenMap, workItemBeansMap,
                    TComputedValuesBean.EFFORTTYPE.COST, TComputedValuesBean.COMPUTEDVALUETYPE.EXPENSE,
                    hoursPerWorkdayForProject);
            ComputedValueBL.computeBottomUpPersonValues(workItemBean, parentToChildrenMap, workItemBeansMap,
                    TComputedValuesBean.EFFORTTYPE.TIME, TComputedValuesBean.COMPUTEDVALUETYPE.EXPENSE,
                    hoursPerWorkdayForProject);
            ComputedValueBL.computeBottomUpPersonValues(workItemBean, parentToChildrenMap, workItemBeansMap,
                    TComputedValuesBean.EFFORTTYPE.COST, TComputedValuesBean.COMPUTEDVALUETYPE.EXPENSE,
                    hoursPerWorkdayForProject);
            RemainingPlanBL.computeBottomUpRemainingPlannedValues(workItemBean, parentToChildrenMap,
                    workItemBeansMap, hoursPerWorkdayForProject);
        }
    }
    LOGGER.debug("Summary date and planned values computed");
}

From source file:com.adobe.acs.commons.mcp.impl.processes.asset.UrlAssetImport.java

protected Set<FileOrRendition> extractFilesAndFolders(List<Map<String, CompositeVariant>> fileData) {
    Set<FileOrRendition> allFiles = fileData.stream().peek(this::extractFolder).map(this::extractFile)
            .filter(t -> t != null).collect(Collectors.toSet());

    // Remove renditions from the data set and file them with their original renditions
    Set<FileOrRendition> renditions = allFiles.stream().filter(FileOrRendition::isRendition)
            .collect(Collectors.toSet());
    allFiles.removeAll(renditions);
    renditions.forEach(r -> {/*from w ww  . j  a v a2 s .  c  o m*/
        Optional<FileOrRendition> asset = findOriginalRendition(allFiles, r);
        if (asset.isPresent()) {
            asset.get().addRendition(r);
        } else {
            unmatchedRenditions.add(r.getProperties());
        }
    });
    return allFiles;
}