Example usage for java.util LinkedHashSet addAll

List of usage examples for java.util LinkedHashSet addAll

Introduction

In this page you can find the example usage for java.util LinkedHashSet addAll.

Prototype

boolean addAll(Collection<? extends E> c);

Source Link

Document

Adds all of the elements in the specified collection to this set if they're not already present (optional operation).

Usage

From source file:edu.internet2.middleware.psp.Psp.java

/**
 * Execute an {@link CalcRequest} and update the {@link CalcResponse}.
 * /* www  . j a va 2s.c  o  m*/
 * The psp context argument allows for the caching of references during bulk requests.
 * 
 * @param calcRequest the SPML calc request
 * @param calcResponse the SPML calc response
 * @param pspContext the psp context
 */
public void execute(CalcRequest calcRequest, CalcResponse calcResponse, PspContext pspContext) {

    try {
        // Set the response id.
        calcResponse.setId(calcRequest.getId());

        // provisioning context
        // PspContext pspContext = new PspContext();
        pspContext.setProvisioningServiceProvider(this);
        pspContext.setProvisioningRequest(calcRequest);
        pspContext.setAttributes(null);

        // attribute request context
        BaseSAMLProfileRequestContext attributeRequestContext = new BaseSAMLProfileRequestContext();
        attributeRequestContext.setPrincipalName(calcRequest.getId());

        // get targets specified in request before building the context
        Map<String, List<Pso>> map = getTargetAndObjectDefinitions(calcRequest);

        // determine attribute resolver requested attributes
        LinkedHashSet<String> attributeIds = new LinkedHashSet<String>();
        for (String targetId : map.keySet()) {
            for (Pso psoDefinition : map.get(targetId)) {
                attributeIds.addAll(psoDefinition.getSourceIds(calcRequest.getReturnData()));
            }
        }
        attributeRequestContext.setRequestedAttributes(attributeIds);

        // resolve attributes
        LOG.debug("PSP '{}' - Calc {} Resolving attributes '{}'.",
                new Object[] { getId(), calcRequest, attributeIds });
        Map<String, BaseAttribute<?>> attributes = getAttributeAuthority()
                .getAttributes(attributeRequestContext);
        LOG.debug("PSP '{}' - Calc {} Resolved attributes '{}'.",
                new Object[] { getId(), calcRequest, attributes.keySet() });
        pspContext.setAttributes(attributes);

        // get PSOs based on attributes in psp context
        for (String targetId : map.keySet()) {
            for (Pso psoDefinition : map.get(targetId)) {
                for (PSO pso : psoDefinition.getPSO(pspContext)) {
                    calcResponse.addPSO(pso);
                }
            }
        }

        if (calcResponse.getPSOs().isEmpty()) {
            fail(calcResponse, ErrorCode.NO_SUCH_IDENTIFIER, "Unable to calculate provisioned object.");
            return;
        }

    } catch (PspException e) {
        fail(calcResponse, ErrorCode.CUSTOM_ERROR, e);
    } catch (AttributeRequestException e) {
        fail(calcResponse, ErrorCode.CUSTOM_ERROR, e);
    } catch (Spml2Exception e) {
        fail(calcResponse, ErrorCode.CUSTOM_ERROR, e);
    }
}

From source file:org.codice.alliance.catalog.transformer.mgmp.MgmpTransformer.java

@Override
public LinkedHashSet<Path> buildPaths() {
    LinkedHashSet<Path> paths = Stream.of(MgmpConstants.RESOURCE_ORIGINATOR_SECURITY_PATH,
            MgmpConstants.RESOURCE_SECURITY_RELEASABILITY_PATH, MgmpConstants.LANGUAGE_PATH,
            MgmpConstants.CLOUD_COVERAGE_PATH, MgmpConstants.FORMAT_PATH, GmdConstants.FORMAT_VERSION_PATH,

            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_NUMBER_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_TYPE_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_DATUM_TYPE_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_DATUM_NUMBER_TYPE_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_ELLIPSOID_TYPE_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_ELLIPSOID_NUMBER_TYPE_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_PROJECTION_TYPE_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_PROJECTION_NUMBER_TYPE_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_GRID_TYPE_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_GRID_NUMBER_TYPE_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_GRID_WKT_TYPE_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_PROJECTION_WKT_TYPE_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_ELLIPSOID_WKT_TYPE_PATH,
            MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_DATUM_WKT_TYPE_PATH,

            MgmpConstants.ISR_COVERAGE_CATEGORY_PATH, MgmpConstants.ISR_COVERAGE_COMMENT_PATH,
            MgmpConstants.ISR_IMAGE_COMMENT_PATH, MgmpConstants.ISR_IMAGE_DESCRIPTION_PATH,
            MgmpConstants.ISR_MD_IMAGE_COMMENT_PATH, MgmpConstants.ISR_MD_IMAGE_DESCRIPTION_PATH,
            MgmpConstants.ISR_VIDEO_COMMENT_PATH, MgmpConstants.ISR_VIDEO_DESCRIPTION_PATH,

            MgmpConstants.METADATA_ORIGINATOR_SECURITY_PATH, MgmpConstants.METADATA_RELEASABILITY_PATH,
            MgmpConstants.RESOURCE_SECURITY_RELEASABILITY_PATH, MgmpConstants.NIIRS_PATH,
            MgmpConstants.NIIRS_RATING_PATH, MgmpConstants.RESOURCE_SECURITY_PATH,
            MgmpConstants.METADATA_SECURITY_PATH, MgmpConstants.MGMP_SPATIAL_REFERENCE_SYSTEM_CRS_WKT_TYPE_PATH)
            .map(this::toPath).collect(Collectors.toCollection(LinkedHashSet::new));

    paths.addAll(super.buildPaths());
    return paths;
}

From source file:org.alfresco.repo.security.permissions.impl.IntersectPermissionServiceImpl.java

/**
 * Key for a cache object is built from all the known Authorities (which can change dynamically so they must all be
 * used) the NodeRef ID and the permission reference itself. This gives a unique key for each permission test.
 *//*from  www.  j ava 2s  .c  o  m*/
Serializable generateKey(Set<String> auths, NodeRef nodeRef, PermissionReference perm, CacheType type) {
    LinkedHashSet<Serializable> key = new LinkedHashSet<Serializable>();
    key.add(perm.toString());
    // We will just have to key our dynamic sets by username. We wrap it so as not to be confused with a static set
    if (auths instanceof AuthorityServiceImpl.UserAuthoritySet) {
        key.add((Serializable) Collections
                .singleton(((AuthorityServiceImpl.UserAuthoritySet) auths).getUsername()));
    } else {
        key.addAll(auths);
    }
    key.add(nodeRef);
    // Ensure some concept of node version or transaction is included in the key so we can track without cache replication 
    NodeRef.Status nodeStatus = nodeService.getNodeStatus(nodeRef);
    key.add(nodeStatus == null ? "null" : nodeStatus.getChangeTxnId());
    key.add(type);
    return key;
}

From source file:org.ncic.bioinfo.sparkseq.algorithms.utils.GATKVariantContextUtils.java

/**
 * Merges VariantContexts into a single hybrid.  Takes genotypes for common samples in priority order, if provided.
 * If uniquifySamples is true, the priority order is ignored and names are created by concatenating the VC name with
 * the sample name.//from   w  w w. j  a  v  a2 s  .  co m
 * simpleMerge does not verify any more unique sample names EVEN if genotypeMergeOptions == GenotypeMergeType.REQUIRE_UNIQUE. One should use
 * SampleUtils.verifyUniqueSamplesNames to check that before using simpleMerge.
 *
 * For more information on this method see: http://www.thedistractionnetwork.com/programmer-problem/
 *
 * @param unsortedVCs               collection of unsorted VCs
 * @param priorityListOfVCs         priority list detailing the order in which we should grab the VCs
 * @param filteredRecordMergeType   merge type for filtered records
 * @param genotypeMergeOptions      merge option for genotypes
 * @param annotateOrigin            should we annotate the set it came from?
 * @param printMessages             should we print messages?
 * @param setKey                    the key name of the set
 * @param filteredAreUncalled       are filtered records uncalled?
 * @param mergeInfoWithMaxAC        should we merge in info from the VC with maximum allele count?
 * @return new VariantContext       representing the merge of unsortedVCs
 */
public static VariantContext simpleMerge(final Collection<VariantContext> unsortedVCs,
        final List<String> priorityListOfVCs, final int originalNumOfVCs,
        final FilteredRecordMergeType filteredRecordMergeType, final GenotypeMergeType genotypeMergeOptions,
        final boolean annotateOrigin, final boolean printMessages, final String setKey,
        final boolean filteredAreUncalled, final boolean mergeInfoWithMaxAC) {
    if (unsortedVCs == null || unsortedVCs.size() == 0)
        return null;

    if (priorityListOfVCs != null && originalNumOfVCs != priorityListOfVCs.size())
        throw new IllegalArgumentException(
                "the number of the original VariantContexts must be the same as the number of VariantContexts in the priority list");

    if (annotateOrigin && priorityListOfVCs == null && originalNumOfVCs == 0)
        throw new IllegalArgumentException(
                "Cannot merge calls and annotate their origins without a complete priority list of VariantContexts or the number of original VariantContexts");

    final List<VariantContext> preFilteredVCs = sortVariantContextsByPriority(unsortedVCs, priorityListOfVCs,
            genotypeMergeOptions);
    // Make sure all variant contexts are padded with reference base in case of indels if necessary
    List<VariantContext> VCs = new ArrayList<>();

    for (final VariantContext vc : preFilteredVCs) {
        if (!filteredAreUncalled || vc.isNotFiltered())
            VCs.add(vc);
    }

    if (VCs.size() == 0) // everything is filtered out and we're filteredAreUncalled
        return null;

    // establish the baseline info from the first VC
    final VariantContext first = VCs.get(0);
    final String name = first.getSource();
    final Allele refAllele = determineReferenceAllele(VCs);

    final LinkedHashSet<Allele> alleles = new LinkedHashSet<>();
    final Set<String> filters = new HashSet<>();
    final Map<String, Object> attributes = new LinkedHashMap<>();
    final Set<String> inconsistentAttributes = new HashSet<>();
    final Set<String> variantSources = new HashSet<>(); // contains the set of sources we found in our set of VCs that are variant
    final Set<String> rsIDs = new LinkedHashSet<>(1); // most of the time there's one id

    VariantContext longestVC = first;
    int depth = 0;
    int maxAC = -1;
    final Map<String, Object> attributesWithMaxAC = new LinkedHashMap<>();
    double log10PError = CommonInfo.NO_LOG10_PERROR;
    boolean anyVCHadFiltersApplied = false;
    VariantContext vcWithMaxAC = null;
    GenotypesContext genotypes = GenotypesContext.create();

    // counting the number of filtered and variant VCs
    int nFiltered = 0;

    boolean remapped = false;

    // cycle through and add info from the other VCs, making sure the loc/reference matches
    for (final VariantContext vc : VCs) {
        if (longestVC.getStart() != vc.getStart())
            throw new IllegalStateException(
                    "BUG: attempting to merge VariantContexts with different start sites: first="
                            + first.toString() + " second=" + vc.toString());

        if (VariantContextUtils.getSize(vc) > VariantContextUtils.getSize(longestVC))
            longestVC = vc; // get the longest location

        nFiltered += vc.isFiltered() ? 1 : 0;
        if (vc.isVariant())
            variantSources.add(vc.getSource());

        AlleleMapper alleleMapping = resolveIncompatibleAlleles(refAllele, vc, alleles);
        remapped = remapped || alleleMapping.needsRemapping();

        alleles.addAll(alleleMapping.values());

        mergeGenotypes(genotypes, vc, alleleMapping, genotypeMergeOptions == GenotypeMergeType.UNIQUIFY);

        // We always take the QUAL of the first VC with a non-MISSING qual for the combined value
        if (log10PError == CommonInfo.NO_LOG10_PERROR)
            log10PError = vc.getLog10PError();

        filters.addAll(vc.getFilters());
        anyVCHadFiltersApplied |= vc.filtersWereApplied();

        //
        // add attributes
        //
        // special case DP (add it up) and ID (just preserve it)
        //
        if (vc.hasAttribute(VCFConstants.DEPTH_KEY))
            depth += vc.getAttributeAsInt(VCFConstants.DEPTH_KEY, 0);
        if (vc.hasID())
            rsIDs.add(vc.getID());
        if (mergeInfoWithMaxAC && vc.hasAttribute(VCFConstants.ALLELE_COUNT_KEY)) {
            String rawAlleleCounts = vc.getAttributeAsString(VCFConstants.ALLELE_COUNT_KEY, null);
            // lets see if the string contains a "," separator
            if (rawAlleleCounts.contains(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR)) {
                final List<String> alleleCountArray = Arrays
                        .asList(rawAlleleCounts.substring(1, rawAlleleCounts.length() - 1)
                                .split(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR));
                for (final String alleleCount : alleleCountArray) {
                    final int ac = Integer.valueOf(alleleCount.trim());
                    if (ac > maxAC) {
                        maxAC = ac;
                        vcWithMaxAC = vc;
                    }
                }
            } else {
                final int ac = Integer.valueOf(rawAlleleCounts);
                if (ac > maxAC) {
                    maxAC = ac;
                    vcWithMaxAC = vc;
                }
            }
        }

        for (final Map.Entry<String, Object> p : vc.getAttributes().entrySet()) {
            final String key = p.getKey();
            final Object value = p.getValue();
            // only output annotations that have the same value in every input VC
            // if we don't like the key already, don't go anywhere
            if (!inconsistentAttributes.contains(key)) {
                final boolean alreadyFound = attributes.containsKey(key);
                final Object boundValue = attributes.get(key);
                final boolean boundIsMissingValue = alreadyFound
                        && boundValue.equals(VCFConstants.MISSING_VALUE_v4);

                if (alreadyFound && !boundValue.equals(value) && !boundIsMissingValue) {
                    // we found the value but we're inconsistent, put it in the exclude list
                    inconsistentAttributes.add(key);
                    attributes.remove(key);
                } else if (!alreadyFound || boundIsMissingValue) { // no value
                    attributes.put(key, value);
                }
            }
        }
    }

    // if we have more alternate alleles in the merged VC than in one or more of the
    // original VCs, we need to strip out the GL/PLs (because they are no longer accurate), as well as allele-dependent attributes like AC,AF, and AD
    for (final VariantContext vc : VCs) {
        if (vc.getAlleles().size() == 1)
            continue;
        if (hasPLIncompatibleAlleles(alleles, vc.getAlleles())) {
            genotypes = stripPLsAndAD(genotypes);
            // this will remove stale AC,AF attributed from vc
            VariantContextUtils.calculateChromosomeCounts(vc, attributes, true);
            break;
        }
    }

    // take the VC with the maxAC and pull the attributes into a modifiable map
    if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
        attributesWithMaxAC.putAll(vcWithMaxAC.getAttributes());
    }

    // if at least one record was unfiltered and we want a union, clear all of the filters
    if ((filteredRecordMergeType == FilteredRecordMergeType.KEEP_IF_ANY_UNFILTERED && nFiltered != VCs.size())
            || filteredRecordMergeType == FilteredRecordMergeType.KEEP_UNCONDITIONAL)
        filters.clear();

    if (annotateOrigin) { // we care about where the call came from
        String setValue;
        if (nFiltered == 0 && variantSources.size() == originalNumOfVCs) // nothing was unfiltered
            setValue = MERGE_INTERSECTION;
        else if (nFiltered == VCs.size()) // everything was filtered out
            setValue = MERGE_FILTER_IN_ALL;
        else if (variantSources.isEmpty()) // everyone was reference
            setValue = MERGE_REF_IN_ALL;
        else {
            final LinkedHashSet<String> s = new LinkedHashSet<>();
            for (final VariantContext vc : VCs)
                if (vc.isVariant())
                    s.add(vc.isFiltered() ? MERGE_FILTER_PREFIX + vc.getSource() : vc.getSource());
            setValue = Utils.join("-", s);
        }

        if (setKey != null) {
            attributes.put(setKey, setValue);
            if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
                attributesWithMaxAC.put(setKey, setValue);
            }
        }
    }

    if (depth > 0)
        attributes.put(VCFConstants.DEPTH_KEY, String.valueOf(depth));

    final String ID = rsIDs.isEmpty() ? VCFConstants.EMPTY_ID_FIELD : Utils.join(",", rsIDs);

    final VariantContextBuilder builder = new VariantContextBuilder().source(name).id(ID);
    builder.loc(longestVC.getChr(), longestVC.getStart(), longestVC.getEnd());
    builder.alleles(alleles);
    builder.genotypes(genotypes);
    builder.log10PError(log10PError);
    if (anyVCHadFiltersApplied) {
        builder.filters(filters.isEmpty() ? filters : new TreeSet<>(filters));
    }
    builder.attributes(new TreeMap<>(mergeInfoWithMaxAC ? attributesWithMaxAC : attributes));

    // Trim the padded bases of all alleles if necessary
    final VariantContext merged = builder.make();
    if (printMessages && remapped)
        System.out.printf("Remapped => %s%n", merged);
    return merged;
}

From source file:org.codehaus.mojo.jsimport.AbstractImportMojo.java

/**
 * Build up the dependency graph and global symbol table by parsing the project's dependencies.
 * /*  w  w w .j av a2  s  .  c o m*/
 * @param scope compile or test.
 * @param fileDependencyGraphModificationTime the time that the dependency graph was updated. Used for file time
 *            comparisons to check the age of them.
 * @param processedFiles an insert-ordered set of files that have been processed.
 * @param targetFolder Where the target files live.
 * @param workFolder Where we can create some long lived information that may be useful to subsequent builds.
 * @param compileWorkFolder Ditto but in the case of testing it points to where the compile working folder is.
 * @return true if the dependency graph has been updated.
 * @throws MojoExecutionException if something bad happens.
 */
private boolean buildDependencyGraphForDependencies(Scope scope, long fileDependencyGraphModificationTime,
        LinkedHashSet<File> processedFiles, File targetFolder, File workFolder, File compileWorkFolder)
        throws MojoExecutionException {
    File targetJsFolder = new File(targetFolder, "js");

    boolean fileDependencyGraphUpdated = false;

    // Determine how we need to filter things both for direct filtering and transitive filtering.

    String scopeStr = (scope == Scope.COMPILE ? Artifact.SCOPE_COMPILE : Artifact.SCOPE_TEST);

    AndArtifactFilter jsArtifactFilter = new AndArtifactFilter();
    jsArtifactFilter.add(new ScopeArtifactFilter(scopeStr));
    jsArtifactFilter.add(new TypeArtifactFilter("js"));

    AndArtifactFilter wwwZipArtifactFilter = new AndArtifactFilter();
    wwwZipArtifactFilter.add(new ScopeArtifactFilter(scopeStr));
    wwwZipArtifactFilter.add(new TypeArtifactFilter("zip"));
    wwwZipArtifactFilter.add(new ArtifactFilter() {
        public boolean include(Artifact artifact) {
            return artifact.hasClassifier() && artifact.getClassifier().equals("www");
        }
    });

    // Determine the artifacts to resolve and associate their transitive dependencies.

    Map<Artifact, LinkedHashSet<Artifact>> directArtifactWithTransitives = new HashMap<Artifact, LinkedHashSet<Artifact>>(
            dependencies.size());

    Set<Artifact> directArtifacts = new HashSet<Artifact>(dependencies.size());
    LinkedHashSet<Artifact> transitiveArtifacts = new LinkedHashSet<Artifact>();

    for (Dependency dependency : dependencies) {
        // Process imports and symbols of this dependencies' transitives
        // first.
        Artifact directArtifact = artifactFactory.createDependencyArtifact(dependency.getGroupId(),
                dependency.getArtifactId(), VersionRange.createFromVersion(dependency.getVersion()),
                dependency.getType(), dependency.getClassifier(), dependency.getScope());

        if (!jsArtifactFilter.include(directArtifact) && !wwwZipArtifactFilter.include(directArtifact)) {
            continue;
        }

        Set<Artifact> artifactsToResolve = new HashSet<Artifact>(1);
        artifactsToResolve.add(directArtifact);

        ArtifactResolutionResult result;
        try {
            result = resolver.resolveTransitively(artifactsToResolve, project.getArtifact(), remoteRepositories,
                    localRepository, artifactMetadataSource);
        } catch (ArtifactResolutionException e) {
            throw new MojoExecutionException("Problem resolving dependencies", e);
        } catch (ArtifactNotFoundException e) {
            throw new MojoExecutionException("Problem resolving dependencies", e);
        }

        // Associate the transitive dependencies with the direct dependency and aggregate all transitives for
        // collection later.

        LinkedHashSet<Artifact> directTransitiveArtifacts = new LinkedHashSet<Artifact>(
                result.getArtifacts().size());
        for (Object o : result.getArtifacts()) {
            Artifact resolvedArtifact = (Artifact) o;
            if (jsArtifactFilter.include(resolvedArtifact) && //
                    !resolvedArtifact.equals(directArtifact)) {
                directTransitiveArtifacts.add(resolvedArtifact);
            }
        }

        directArtifacts.add(directArtifact);
        transitiveArtifacts.addAll(directTransitiveArtifacts);
        directArtifactWithTransitives.put(directArtifact, directTransitiveArtifacts);
    }

    // Resolve the best versions of the transitives to use by asking Maven to collect them.

    Set<Artifact> collectedArtifacts = new HashSet<Artifact>(
            directArtifacts.size() + transitiveArtifacts.size());
    Map<ArtifactId, Artifact> indexedCollectedDependencies = new HashMap<ArtifactId, Artifact>(
            collectedArtifacts.size());
    try {
        // Note that we must pass an insert-order set into the collector. The collector appears to assume that order
        // is significant, even though it is undocumented.
        LinkedHashSet<Artifact> collectableArtifacts = new LinkedHashSet<Artifact>(directArtifacts);
        collectableArtifacts.addAll(transitiveArtifacts);

        ArtifactResolutionResult resolutionResult = artifactCollector.collect(collectableArtifacts,
                project.getArtifact(), localRepository, remoteRepositories, artifactMetadataSource, null, //
                Collections.EMPTY_LIST);
        for (Object o : resolutionResult.getArtifacts()) {
            Artifact collectedArtifact = (Artifact) o;
            collectedArtifacts.add(collectedArtifact);

            // Build up an index of of collected transitive dependencies so that we can we refer back to them as we
            // process the direct dependencies.
            ArtifactId collectedArtifactId = new ArtifactId(collectedArtifact.getGroupId(),
                    collectedArtifact.getArtifactId());
            indexedCollectedDependencies.put(collectedArtifactId, collectedArtifact);
        }

        if (getLog().isDebugEnabled()) {
            getLog().debug("Dependencies collected: " + collectedArtifacts.toString());
        }
    } catch (ArtifactResolutionException e) {
        throw new MojoExecutionException("Cannot collect dependencies", e);
    }

    // Now go through direct artifacts and process their transitives.

    LocalRepositoryCollector localRepositoryCollector = new LocalRepositoryCollector(project, localRepository,
            new File[] {});

    for (Entry<Artifact, LinkedHashSet<Artifact>> entry : directArtifactWithTransitives.entrySet()) {
        Artifact directArtifact = entry.getKey();
        LinkedHashSet<Artifact> directArtifactTransitives = entry.getValue();

        LinkedHashSet<String> transitivesAsImports = new LinkedHashSet<String>(
                directArtifactTransitives.size());

        for (Object o : directArtifactTransitives) {
            Artifact directTransitiveArtifact = (Artifact) o;

            // Get the transitive artifact that Maven decided was the best to use.

            ArtifactId directTransitiveArtifactId = new ArtifactId(directTransitiveArtifact.getGroupId(),
                    directTransitiveArtifact.getArtifactId());
            Artifact transitiveArtifact = indexedCollectedDependencies.get(directTransitiveArtifactId);

            List<File> transitiveArtifactFiles = getArtifactFiles(transitiveArtifact, targetFolder, workFolder,
                    compileWorkFolder, localRepositoryCollector);

            // Only process this dependency if we've not done so
            // already.
            for (File transitiveArtifactFile : transitiveArtifactFiles) {
                if (!processedFiles.contains(transitiveArtifactFile)) {
                    String localRepository = localRepositoryCollector
                            .findLocalRepository(transitiveArtifactFile.getAbsolutePath());
                    if (localRepository != null) {
                        if (processFileForImportsAndSymbols(new File(localRepository), targetJsFolder,
                                transitiveArtifactFile, fileDependencyGraphModificationTime,
                                directArtifactTransitives)) {

                            processedFiles.add(transitiveArtifactFile);

                            fileDependencyGraphUpdated = true;
                        }
                    } else {
                        throw new MojoExecutionException(
                                "Problem determining local repository for transitive file: "
                                        + transitiveArtifactFile);
                    }
                }

                // Add transitives to the artifacts set of dependencies -
                // as if they were @import statements themselves.
                transitivesAsImports.add(transitiveArtifactFile.getPath());
            }
        }

        // Now deal with the pom specified dependency.
        List<File> artifactFiles = getArtifactFiles(directArtifact, targetFolder, workFolder, compileWorkFolder,
                localRepositoryCollector);
        for (File artifactFile : artifactFiles) {
            String artifactPath = artifactFile.getAbsolutePath();

            // Process imports and symbols of this dependency if we've not
            // already done so.
            if (!processedFiles.contains(artifactFile)) {
                String localRepository = localRepositoryCollector
                        .findLocalRepository(artifactFile.getAbsolutePath());
                if (localRepository != null) {
                    if (processFileForImportsAndSymbols(new File(localRepository), targetJsFolder, artifactFile,
                            fileDependencyGraphModificationTime, null)) {
                        processedFiles.add(artifactFile);

                        fileDependencyGraphUpdated = true;
                    }
                } else {
                    throw new MojoExecutionException(
                            "Problem determining local repository for file: " + artifactFile);
                }
            }

            // Add in our transitives to the dependency graph if they're not
            // already there.
            LinkedHashSet<String> existingImports = fileDependencies.get(artifactPath);
            if (existingImports.addAll(transitivesAsImports)) {
                if (getLog().isDebugEnabled()) {
                    getLog().debug("Using transitives as import: " + transitivesAsImports + " for file: "
                            + artifactPath);
                }
                fileDependencyGraphUpdated = true;
            }
        }

    }

    return fileDependencyGraphUpdated;
}

From source file:org.broadinstitute.gatk.utils.variant.GATKVariantContextUtils.java

/**
 * Merges VariantContexts into a single hybrid.  Takes genotypes for common samples in priority order, if provided.
 * If uniquifySamples is true, the priority order is ignored and names are created by concatenating the VC name with
 * the sample name.//w  w  w . ja v a 2s  .  co m
 * simpleMerge does not verify any more unique sample names EVEN if genotypeMergeOptions == GenotypeMergeType.REQUIRE_UNIQUE. One should use
 * SampleUtils.verifyUniqueSamplesNames to check that before using simpleMerge.
 *
 * For more information on this method see: http://www.thedistractionnetwork.com/programmer-problem/
 *
 * @param unsortedVCs               collection of unsorted VCs
 * @param priorityListOfVCs         priority list detailing the order in which we should grab the VCs
 * @param filteredRecordMergeType   merge type for filtered records
 * @param genotypeMergeOptions      merge option for genotypes
 * @param annotateOrigin            should we annotate the set it came from?
 * @param printMessages             should we print messages?
 * @param setKey                    the key name of the set
 * @param filteredAreUncalled       are filtered records uncalled?
 * @param mergeInfoWithMaxAC        should we merge in info from the VC with maximum allele count?
 * @return new VariantContext       representing the merge of unsortedVCs
 */
public static VariantContext simpleMerge(final Collection<VariantContext> unsortedVCs,
        final List<String> priorityListOfVCs, final int originalNumOfVCs,
        final FilteredRecordMergeType filteredRecordMergeType, final GenotypeMergeType genotypeMergeOptions,
        final boolean annotateOrigin, final boolean printMessages, final String setKey,
        final boolean filteredAreUncalled, final boolean mergeInfoWithMaxAC) {
    if (unsortedVCs == null || unsortedVCs.isEmpty())
        return null;

    if (priorityListOfVCs != null && originalNumOfVCs != priorityListOfVCs.size())
        throw new IllegalArgumentException(
                "the number of the original VariantContexts must be the same as the number of VariantContexts in the priority list");

    if (annotateOrigin && priorityListOfVCs == null && originalNumOfVCs == 0)
        throw new IllegalArgumentException(
                "Cannot merge calls and annotate their origins without a complete priority list of VariantContexts or the number of original VariantContexts");

    final List<VariantContext> preFilteredVCs = sortVariantContextsByPriority(unsortedVCs, priorityListOfVCs,
            genotypeMergeOptions);
    // Make sure all variant contexts are padded with reference base in case of indels if necessary
    List<VariantContext> VCs = new ArrayList<>();

    for (final VariantContext vc : preFilteredVCs) {
        if (!filteredAreUncalled || vc.isNotFiltered())
            VCs.add(vc);
    }

    if (VCs.isEmpty()) // everything is filtered out and we're filteredAreUncalled
        return null;

    // establish the baseline info from the first VC
    final VariantContext first = VCs.get(0);
    final String name = first.getSource();
    final Allele refAllele = determineReferenceAllele(VCs);

    final LinkedHashSet<Allele> alleles = new LinkedHashSet<>();
    final Set<String> filters = new HashSet<>();
    final Map<String, Object> attributes = new LinkedHashMap<>();
    final Set<String> inconsistentAttributes = new HashSet<>();
    final Set<String> variantSources = new HashSet<>(); // contains the set of sources we found in our set of VCs that are variant
    final Set<String> rsIDs = new LinkedHashSet<>(1); // most of the time there's one id

    VariantContext longestVC = first;
    int depth = 0;
    int maxAC = -1;
    final Map<String, Object> attributesWithMaxAC = new LinkedHashMap<>();
    double log10PError = CommonInfo.NO_LOG10_PERROR;
    boolean anyVCHadFiltersApplied = false;
    VariantContext vcWithMaxAC = null;
    GenotypesContext genotypes = GenotypesContext.create();

    // counting the number of filtered and variant VCs
    int nFiltered = 0;

    boolean remapped = false;

    // cycle through and add info from the other VCs, making sure the loc/reference matches
    for (final VariantContext vc : VCs) {
        if (longestVC.getStart() != vc.getStart())
            throw new IllegalStateException(
                    "BUG: attempting to merge VariantContexts with different start sites: first="
                            + first.toString() + " second=" + vc.toString());

        if (VariantContextUtils.getSize(vc) > VariantContextUtils.getSize(longestVC))
            longestVC = vc; // get the longest location

        nFiltered += vc.isFiltered() ? 1 : 0;
        if (vc.isVariant())
            variantSources.add(vc.getSource());

        AlleleMapper alleleMapping = resolveIncompatibleAlleles(refAllele, vc, alleles);
        remapped = remapped || alleleMapping.needsRemapping();

        alleles.addAll(alleleMapping.values());

        mergeGenotypes(genotypes, vc, alleleMapping, genotypeMergeOptions == GenotypeMergeType.UNIQUIFY);

        // We always take the QUAL of the first VC with a non-MISSING qual for the combined value
        if (log10PError == CommonInfo.NO_LOG10_PERROR)
            log10PError = vc.getLog10PError();

        filters.addAll(vc.getFilters());
        anyVCHadFiltersApplied |= vc.filtersWereApplied();

        //
        // add attributes
        //
        // special case DP (add it up) and ID (just preserve it)
        //
        if (vc.hasAttribute(VCFConstants.DEPTH_KEY))
            depth += vc.getAttributeAsInt(VCFConstants.DEPTH_KEY, 0);
        if (vc.hasID())
            rsIDs.add(vc.getID());
        if (mergeInfoWithMaxAC && vc.hasAttribute(VCFConstants.ALLELE_COUNT_KEY)) {
            String rawAlleleCounts = vc.getAttributeAsString(VCFConstants.ALLELE_COUNT_KEY, null);
            // lets see if the string contains a "," separator
            if (rawAlleleCounts.contains(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR)) {
                final List<String> alleleCountArray = Arrays
                        .asList(rawAlleleCounts.substring(1, rawAlleleCounts.length() - 1)
                                .split(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR));
                for (final String alleleCount : alleleCountArray) {
                    final int ac = Integer.valueOf(alleleCount.trim());
                    if (ac > maxAC) {
                        maxAC = ac;
                        vcWithMaxAC = vc;
                    }
                }
            } else {
                final int ac = Integer.valueOf(rawAlleleCounts);
                if (ac > maxAC) {
                    maxAC = ac;
                    vcWithMaxAC = vc;
                }
            }
        }

        for (final Map.Entry<String, Object> p : vc.getAttributes().entrySet()) {
            final String key = p.getKey();
            final Object value = p.getValue();
            // only output annotations that have the same value in every input VC
            // if we don't like the key already, don't go anywhere
            if (!inconsistentAttributes.contains(key)) {
                final boolean alreadyFound = attributes.containsKey(key);
                final Object boundValue = attributes.get(key);
                final boolean boundIsMissingValue = alreadyFound
                        && boundValue.equals(VCFConstants.MISSING_VALUE_v4);

                if (alreadyFound && !boundValue.equals(value) && !boundIsMissingValue) {
                    // we found the value but we're inconsistent, put it in the exclude list
                    inconsistentAttributes.add(key);
                    attributes.remove(key);
                } else if (!alreadyFound || boundIsMissingValue) { // no value
                    attributes.put(key, value);
                }
            }
        }
    }

    // if we have more alternate alleles in the merged VC than in one or more of the
    // original VCs, we need to strip out the GL/PLs (because they are no longer accurate), as well as allele-dependent attributes like AC,AF, and AD
    for (final VariantContext vc : VCs) {
        if (vc.getAlleles().size() == 1)
            continue;
        if (hasPLIncompatibleAlleles(alleles, vc.getAlleles())) {
            if (!genotypes.isEmpty()) {
                logger.debug(String.format(
                        "Stripping PLs at %s:%d-%d due to incompatible alleles merged=%s vs. single=%s",
                        vc.getChr(), vc.getStart(), vc.getEnd(), alleles, vc.getAlleles()));
            }
            genotypes = stripPLsAndAD(genotypes);
            // this will remove stale AC,AF attributed from vc
            VariantContextUtils.calculateChromosomeCounts(vc, attributes, true);
            break;
        }
    }

    // take the VC with the maxAC and pull the attributes into a modifiable map
    if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
        attributesWithMaxAC.putAll(vcWithMaxAC.getAttributes());
    }

    // if at least one record was unfiltered and we want a union, clear all of the filters
    if ((filteredRecordMergeType == FilteredRecordMergeType.KEEP_IF_ANY_UNFILTERED && nFiltered != VCs.size())
            || filteredRecordMergeType == FilteredRecordMergeType.KEEP_UNCONDITIONAL)
        filters.clear();

    if (annotateOrigin) { // we care about where the call came from
        String setValue;
        if (nFiltered == 0 && variantSources.size() == originalNumOfVCs) // nothing was unfiltered
            setValue = MERGE_INTERSECTION;
        else if (nFiltered == VCs.size()) // everything was filtered out
            setValue = MERGE_FILTER_IN_ALL;
        else if (variantSources.isEmpty()) // everyone was reference
            setValue = MERGE_REF_IN_ALL;
        else {
            final LinkedHashSet<String> s = new LinkedHashSet<>();
            for (final VariantContext vc : VCs)
                if (vc.isVariant())
                    s.add(vc.isFiltered() ? MERGE_FILTER_PREFIX + vc.getSource() : vc.getSource());
            setValue = Utils.join("-", s);
        }

        if (setKey != null) {
            attributes.put(setKey, setValue);
            if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
                attributesWithMaxAC.put(setKey, setValue);
            }
        }
    }

    if (depth > 0)
        attributes.put(VCFConstants.DEPTH_KEY, String.valueOf(depth));

    final String ID = rsIDs.isEmpty() ? VCFConstants.EMPTY_ID_FIELD : Utils.join(",", rsIDs);

    final VariantContextBuilder builder = new VariantContextBuilder().source(name).id(ID);
    builder.loc(longestVC.getChr(), longestVC.getStart(), longestVC.getEnd());
    builder.alleles(alleles);
    builder.genotypes(genotypes);
    builder.log10PError(log10PError);
    if (anyVCHadFiltersApplied) {
        builder.filters(filters.isEmpty() ? filters : new TreeSet<>(filters));
    }
    builder.attributes(new TreeMap<>(mergeInfoWithMaxAC ? attributesWithMaxAC : attributes));

    // Trim the padded bases of all alleles if necessary
    final VariantContext merged = builder.make();
    if (printMessages && remapped)
        System.out.printf("Remapped => %s%n", merged);
    return merged;
}

From source file:com.gtwm.pb.model.manageSchema.DatabaseDefn.java

public void removeTable(SessionDataInfo sessionData, HttpServletRequest request, TableInfo tableToRemove,
        Connection conn) throws SQLException, DisallowedException, CantDoThatException,
        TableDependencyException, CodingErrorException, ObjectNotFoundException {
    if (!(this.authManager.getAuthenticator().loggedInUserAllowedTo(request, PrivilegeType.ADMINISTRATE))) {
        throw new DisallowedException(this.authManager.getLoggedInUser(request), PrivilegeType.ADMINISTRATE);
    }//from ww w .j  ava  2 s  .  c om
    // Check the table doesn't have any user-added fields
    for (BaseField field : tableToRemove.getFields()) {
        if (!(field.equals(tableToRemove.getPrimaryKey()) || field.getHidden())) {
            throw new CantDoThatException("Please remove all fields before removing the table");
        }
    }
    // Check that it doesn't have any reports
    if (tableToRemove.getReports().size() > 1) {
        throw new CantDoThatException(
                "Please remove reports " + tableToRemove.getReports() + " before removing the table");
    }
    // Get a set of dependent tables. If empty proceed with the deletion of
    // the table, otherwise, raise an exception
    LinkedHashSet<TableInfo> dependentTables = new LinkedHashSet<TableInfo>();
    this.getDependentTables(tableToRemove, dependentTables, request);
    if (dependentTables.size() > 0) {
        LinkedHashSet<BaseReportInfo> dependentReports = new LinkedHashSet<BaseReportInfo>();
        for (TableInfo dependentTable : dependentTables) {
            dependentReports.addAll(dependentTable.getReports());
        }
        throw new TableDependencyException(
                "Unable to remove table - other tables are linked to it, that need to be removed first",
                dependentTables, dependentReports);
    }
    // No dependencies exist so remove the table & its default report:
    BaseReportInfo defaultReport = tableToRemove.getDefaultReport();
    this.removeReportWithoutChecks(sessionData, request, defaultReport, conn);
    // Remove any privileges on the table
    this.getAuthManager().removePrivilegesOnTable(request, tableToRemove);
    this.tableCache.remove(tableToRemove.getInternalTableName());
    // Delete from persistent store
    HibernateUtil.currentSession().delete(tableToRemove);
    try {
        // Delete the table from the relational database.
        // The CASCADE is to drop the related sequence.
        // TODO: replace this with a specific sequence drop
        PreparedStatement statement = conn
                .prepareStatement("DROP TABLE " + tableToRemove.getInternalTableName() + " CASCADE");
        statement.execute();
        statement.close();
    } catch (SQLException sqlex) {
        String errorCode = sqlex.getSQLState();
        if (errorCode.equals("42P01")) {
            logger.warn("Can't delete table " + tableToRemove + " from relational database, it's not there");
            // TODO: review why we're swallowing this error
        } else {
            throw new SQLException(sqlex + ": error code " + errorCode, sqlex);
        }
    }
    this.authManager.getCompanyForLoggedInUser(request).removeTable(tableToRemove);
    UsageLogger usageLogger = new UsageLogger(this.relationalDataSource);
    AppUserInfo user = this.authManager.getUserByUserName(request, request.getRemoteUser());
    usageLogger.logTableSchemaChange(user, tableToRemove, AppAction.REMOVE_TABLE, "");
    UsageLogger.startLoggingThread(usageLogger);
}

From source file:org.sakaiproject.content.tool.ListItem.java

/**
 * Asks the Server Configuration Service to get a list of available roles with the prefix "resources.enabled.roles""
 * We should expect language strings for these to be defined in the bundles.
 * @return a set of role ids that can be used
 *//*from w  w  w  .j a  v a2  s.c o  m*/
public Set<String> availableRoleIds() {
    String[] configStrings = ServerConfigurationService.getStrings("resources.enabled.roles");

    LinkedHashSet<String> availableIds = new LinkedHashSet<String>();

    if (configStrings != null) {
        availableIds.addAll(Arrays.asList(configStrings));
    } else {
        // By default just include the public
        availableIds.add(PUBVIEW_ROLE);
    }

    return availableIds;
}

From source file:org.osaf.cosmo.atom.provider.ItemCollectionAdapter.java

private NoteItem processEntryUpdate(ContentProcessor processor, Entry entry, NoteItem item)
        throws ValidationException, ProcessorException {
    EventStamp es = StampUtils.getEventStamp(item);
    Date oldstart = es != null && es.isRecurring() ? es.getStartDate() : null;

    processor.processContent(entry.getContent(), item);

    // oldStart will have a value if the item has an EventStamp
    // and the EventStamp is recurring
    if (oldstart != null) {
        es = StampUtils.getEventStamp(item);
        // Case 1: EventStamp was removed from recurring event, so we
        // have to remove all modifications (a modification doesn't make
        // sense if there is no recurring event)
        if (es == null) {
            LinkedHashSet<ContentItem> updates = new LinkedHashSet<ContentItem>();
            for (NoteItem mod : item.getModifications()) {
                mod.setIsActive(false);//from w  w  w .  j av  a  2s.co  m
                updates.add(mod);
            }
            updates.add(item);

            // Update item and remove modifications in one atomic service call
            contentService.updateContentItems(item.getParents(), updates);
        }
        // Case 2: Start date may have changed on master event.
        else {
            Date newstart = es.getStartDate();

            // If changed, we have to update all the recurrenceIds
            // for any modifications
            if (newstart != null && !newstart.equals(oldstart)) {
                long delta = newstart.getTime() - oldstart.getTime();
                if (log.isDebugEnabled())
                    log.debug("master event start date changed; " + "adjusting modifications by " + delta
                            + " milliseconds");

                LinkedHashSet<ContentItem> updates = new LinkedHashSet<ContentItem>();
                HashSet<NoteItem> copies = new HashSet<NoteItem>();
                HashSet<NoteItem> removals = new HashSet<NoteItem>();

                // copy each modification and update the copy's uid
                // with the new start date
                Iterator<NoteItem> mi = item.getModifications().iterator();
                while (mi.hasNext()) {
                    NoteItem mod = mi.next();

                    // ignore modifications without event stamp
                    if (StampUtils.getEventExceptionStamp(mod) == null)
                        continue;

                    mod.setIsActive(false);
                    removals.add(mod);

                    NoteItem copy = (NoteItem) mod.copy();
                    copy.setModifies(item);

                    EventExceptionStamp ees = StampUtils.getEventExceptionStamp(copy);

                    DateTime oldRid = (DateTime) ees.getRecurrenceId();
                    java.util.Date newRidTime = new java.util.Date(oldRid.getTime() + delta);
                    DateTime newRid = (DateTime) Dates.getInstance(newRidTime, Value.DATE_TIME);
                    if (oldRid.isUtc())
                        newRid.setUtc(true);
                    else
                        newRid.setTimeZone(oldRid.getTimeZone());

                    copy.setUid(new ModificationUid(item, newRid).toString());
                    ees.setRecurrenceId(newRid);

                    // If the modification's dtstart is missing, then
                    // we have to adjust dtstart to be equal to the
                    // recurrenceId.
                    if (isDtStartMissing(StampUtils.getBaseEventStamp(mod))) {
                        ees.setStartDate(ees.getRecurrenceId());
                    }

                    copies.add(copy);
                }

                // add removals first
                updates.addAll(removals);
                // then additions
                updates.addAll(copies);
                // then updates
                updates.add(item);

                // Update everything in one atomic service call
                contentService.updateContentItems(item.getParents(), updates);
            } else {
                // otherwise use simple update
                item = (NoteItem) contentService.updateContent((ContentItem) item);
            }
        }
    } else {
        // use simple update
        item = (NoteItem) contentService.updateContent((ContentItem) item);
    }

    return item;
}

From source file:com.sonicle.webtop.calendar.CalendarManager.java

public LinkedHashSet<String> calculateAvailabilitySpans(int minRange, UserProfileId pid, DateTime fromDate,
        DateTime toDate, DateTimeZone userTz, boolean busy) throws WTException {
    CalendarDAO calDao = CalendarDAO.getInstance();
    EventDAO evtDao = EventDAO.getInstance();
    LinkedHashSet<String> hours = new LinkedHashSet<>();
    Connection con = null;/*from   w ww .  ja  va2 s. c o m*/

    //TODO: review this method

    try {
        con = WT.getConnection(SERVICE_ID);

        // Lists desired calendars by profile
        final List<VVEventInstance> veis = new ArrayList<>();
        for (OCalendar ocal : calDao.selectByProfile(con, pid.getDomainId(), pid.getUserId())) {
            for (VVEvent ve : evtDao.viewByCalendarRangeCondition(con, ocal.getCalendarId(), fromDate, toDate,
                    null)) {
                veis.add(new VVEventInstance(ve));
            }
            for (VVEvent ve : evtDao.viewRecurringByCalendarRangeCondition(con, ocal.getCalendarId(), fromDate,
                    toDate, null)) {
                veis.add(new VVEventInstance(ve));
            }
        }

        DateTime startDt, endDt;
        for (VVEventInstance vei : veis) {
            if (vei.getBusy() != busy)
                continue; // Ignore events that are not marked as busy!

            if (vei.getRecurrenceId() == null) {
                startDt = vei.getStartDate().withZone(userTz);
                endDt = vei.getEndDate().withZone(userTz);
                hours.addAll(generateTimeSpans(minRange, startDt.toLocalDate(), endDt.toLocalDate(),
                        startDt.toLocalTime(), endDt.toLocalTime(), userTz));
            } else {
                final List<VVEventInstance> instances = calculateRecurringInstances(con,
                        new VVEventInstanceMapper(vei), fromDate, toDate, userTz);
                for (VVEventInstance instance : instances) {
                    startDt = instance.getStartDate().withZone(userTz);
                    endDt = instance.getEndDate().withZone(userTz);
                    hours.addAll(generateTimeSpans(minRange, startDt.toLocalDate(), endDt.toLocalDate(),
                            startDt.toLocalTime(), endDt.toLocalTime(), userTz));
                }
            }
        }

    } catch (SQLException | DAOException ex) {
        throw wrapException(ex);
    } finally {
        DbUtils.closeQuietly(con);
    }
    return hours;
}