Example usage for java.util SortedSet addAll

List of usage examples for java.util SortedSet addAll

Introduction

In this page you can find the example usage for java.util SortedSet addAll.

Prototype

boolean addAll(Collection<? extends E> c);

Source Link

Document

Adds all of the elements in the specified collection to this set if they're not already present (optional operation).

Usage

From source file:pt.ist.expenditureTrackingSystem.presentationTier.actions.organization.OrganizationAction.java

public ActionForward listCPVReferences(final ActionMapping mapping, final ActionForm form,
        final HttpServletRequest request, final HttpServletResponse response) {
    final Set<CPVReference> cvpReferences = Bennu.getInstance().getCPVReferencesSet();
    final SortedSet<CPVReference> sortedCPVReferences = new TreeSet<CPVReference>(
            CPVReference.COMPARATOR_BY_DESCRIPTION);
    sortedCPVReferences.addAll(cvpReferences);
    request.setAttribute("cvpReferences", sortedCPVReferences);
    return forward("/expenditureTrackingOrganization/listCPVReferences.jsp");
}

From source file:org.alfresco.repo.search.impl.solr.facet.SolrFacetServiceImpl.java

@Override
public List<SolrFacetProperties> getFacets() {
    // Sort the facets into display order
    final SolrFacetComparator comparator = new SolrFacetComparator(getFacetOrder());

    SortedSet<SolrFacetProperties> result = new TreeSet<>(comparator);

    final NodeRef facetsRoot = getFacetsRoot();
    if (facetsRoot != null) {
        for (ChildAssociationRef ref : nodeService.getChildAssocs(facetsRoot)) {
            // MNT-13812 Check that child has facetField type
            if (nodeService.getType(ref.getChildRef()).equals(SolrFacetModel.TYPE_FACET_FIELD)) {
                result.add(getFacetProperties(ref.getChildRef()));
            }/*from w  w  w  . j  a va  2  s.c om*/
        }
    }

    // add the default filters
    result.addAll(defaultFacetsMap.values());

    return new ArrayList<>(result);
}

From source file:net.sourceforge.fenixedu.domain.organizationalStructure.Unit.java

public SortedSet<Function> getOrderedFunctions() {
    SortedSet<Function> functions = new TreeSet<Function>(Function.COMPARATOR_BY_ORDER);
    functions.addAll(getFunctionsSet());

    return functions;
}

From source file:net.sourceforge.fenixedu.domain.organizationalStructure.Unit.java

public SortedSet<Function> getOrderedActiveFunctions() {
    SortedSet<Function> functions = new TreeSet<Function>(Function.COMPARATOR_BY_ORDER);
    functions.addAll(getActiveFunctions());

    return functions;
}

From source file:org.codehaus.mojo.license.api.DefaultThirdPartyTool.java

/**
 * {@inheritDoc}// ww w .  j a  v  a 2s  .  c o m
 */
public void mergeLicenses(LicenseMap licenseMap, String mainLicense, Set<String> licenses) {

    if (licenses.isEmpty()) {

        // nothing to merge, is this can really happen ?
        return;
    }

    SortedSet<MavenProject> mainSet = licenseMap.get(mainLicense);
    if (mainSet == null) {
        if (isVerbose()) {
            getLogger().warn("No license [" + mainLicense + "] found, will create it.");
        }
        mainSet = new TreeSet<MavenProject>(projectComparator);
        licenseMap.put(mainLicense, mainSet);
    }
    for (String license : licenses) {
        SortedSet<MavenProject> set = licenseMap.get(license);
        if (set == null) {
            if (isVerbose()) {
                getLogger().warn("No license [" + license + "] found, skip the merge to [" + mainLicense + "]");
            }
            continue;
        }
        if (isVerbose()) {
            getLogger().info("Merge license [" + license + "] to [" + mainLicense + "] (" + set.size()
                    + " dependencies).");
        }
        mainSet.addAll(set);
        set.clear();
        licenseMap.remove(license);
    }
}

From source file:com.github.FraggedNoob.GitLabTransfer.GitlabRelatedData.java

/**
 * Obtains the notes for a project issue.
 * @param i The Gitlab Issue to pull from
 * @param notes A SortedSet of notes by IID.  Empty means error or no
 * notes in the issue, see return value.
 * //  w  w  w.j a  v a  2  s  .  co m
 * @return F indicates a problem, beware T may mean there are no notes.
 */
public boolean getIssueNotes(final GitlabIssue i, SortedSet<GitlabNote> notes) {

    if (!createApi()) {
        return false;
    }

    notes.clear();
    List<GitlabNote> noteList;

    try {
        noteList = api.getNotes(i);
    } catch (IOException e) {
        System.out.printf("Error getting notes from issue IID=%d.\n", i.getIid());
        e.printStackTrace();
        return false;
    }

    /*
     * Uniquely sort the list of issues by IID
     */
    try {
        notes.addAll(noteList);
    } catch (Exception e) {
        System.out.println("Error parsing notes - bad data.");
        e.printStackTrace();
        return false;
    }

    return true;
}

From source file:org.eclipse.winery.repository.client.WineryRepositoryClient.java

/**
 * {@inheritDoc}/* ww w.  jav a2s  .  com*/
 */
@Override
public SortedSet<String> getNamespaces() {
    SortedSet<String> res = new TreeSet<String>();
    for (WebResource wr : this.repositoryResources) {
        WebResource namespacesResource = wr.path("admin").path("namespaces");

        // this could be parsed using JAXB
        // (http://jersey.java.net/nonav/documentation/latest/json.html),
        // but we are short in time, so we do a quick hack
        String nsList = namespacesResource.accept(MediaType.APPLICATION_JSON).get(String.class);
        LOGGER.trace(nsList);
        List<String> nsListList;
        try {
            nsListList = this.mapper.readValue(nsList, new TypeReference<List<String>>() {
            });
        } catch (Exception e) {
            LOGGER.error(e.getMessage(), e);
            continue;
        }
        res.addAll(nsListList);
    }
    return res;
}

From source file:org.torproject.ernie.db.SanitizedBridgesWriter.java

/**
 * Rewrite all network statuses that might contain references to server
 * descriptors we added or updated in this execution. This applies to
 * all statuses that have been published up to 24 hours after any added
 * or updated server descriptor.//  w ww  .  jav  a 2s. c o  m
 */
public void finishWriting() {

    /* Prepare parsing and formatting timestamps. */
    SimpleDateFormat dateTimeFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
    dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
    dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
    SimpleDateFormat statusFileFormat = new SimpleDateFormat("yyyyMMdd-HHmmss");
    statusFileFormat.setTimeZone(TimeZone.getTimeZone("UTC"));

    /* Iterate over publication timestamps of previously sanitized
     * descriptors. For every publication timestamp, we want to re-write
     * the network statuses that we published up to 24 hours after that
     * descriptor. We keep the timestamp of the last re-written network
     * status in order to make sure we re-writing any network status at
     * most once. */
    String lastDescriptorPublishedPlus24Hours = "1970-01-01 00:00:00";
    for (String published : this.descriptorPublicationTimes) {
        if (published.compareTo(lastDescriptorPublishedPlus24Hours) <= 0) {
            continue;
        }
        // find statuses 24 hours after published
        SortedSet<File> statusesToRewrite = new TreeSet<File>();
        long publishedTime;
        try {
            publishedTime = dateTimeFormat.parse(published).getTime();
        } catch (ParseException e) {
            this.logger.log(Level.WARNING,
                    "Could not parse publication " + "timestamp '" + published + "'. Skipping.", e);
            continue;
        }
        String[] dayOne = dateFormat.format(publishedTime).split("-");

        File publishedDayOne = new File(
                this.sanitizedBridgesDir + "/" + dayOne[0] + "/" + dayOne[1] + "/statuses/" + dayOne[2]);
        if (publishedDayOne.exists()) {
            statusesToRewrite.addAll(Arrays.asList(publishedDayOne.listFiles()));
        }
        long plus24Hours = publishedTime + 24L * 60L * 60L * 1000L;
        lastDescriptorPublishedPlus24Hours = dateFormat.format(plus24Hours);
        String[] dayTwo = dateFormat.format(plus24Hours).split("-");
        File publishedDayTwo = new File(
                this.sanitizedBridgesDir + "/" + dayTwo[0] + "/" + dayTwo[1] + "/statuses/" + dayTwo[2]);
        if (publishedDayTwo.exists()) {
            statusesToRewrite.addAll(Arrays.asList(publishedDayTwo.listFiles()));
        }
        for (File status : statusesToRewrite) {
            String statusPublished = status.getName().substring(0, 15);
            long statusTime;
            try {
                statusTime = statusFileFormat.parse(statusPublished).getTime();
            } catch (ParseException e) {
                this.logger.log(Level.WARNING, "Could not parse network " + "status publication timestamp '"
                        + published + "'. Skipping.", e);
                continue;
            }
            if (statusTime < publishedTime || statusTime > plus24Hours) {
                continue;
            }
            this.rewriteNetworkStatus(status, dateTimeFormat.format(statusTime));
        }
    }

    /* Write descriptor mappings to disk. */
    try {
        BufferedWriter bw = new BufferedWriter(new FileWriter(this.bridgeDescriptorMappingsFile));
        for (DescriptorMapping mapping : this.bridgeDescriptorMappings.values()) {
            bw.write(mapping.toString() + "\n");
        }
        bw.close();
    } catch (IOException e) {
        this.logger.log(Level.WARNING, "Could not write descriptor " + "mappings to disk.", e);
    }
}

From source file:org.cloudata.core.tabletserver.DiskSSTable.java

public SortedSet<MapFileIndexRecord> getMapFileIndex() {
    lock.obtainReadLock();/*w w w .  j ava2 s  .co  m*/
    try {
        SortedSet<MapFileIndexRecord> result = new TreeSet<MapFileIndexRecord>();
        for (List<TabletMapFile> eachMapFiles : mapFiles.values()) {
            for (TabletMapFile tabletMapFile : eachMapFiles) {
                SortedSet<MapFileIndexRecord> indexRecords = tabletMapFile.getMapFileIndexRecords();
                result.addAll(indexRecords);
            }
        }

        return result;
    } finally {
        lock.releaseReadLock();
    }
}