Example usage for java.util Set addAll

List of usage examples for java.util Set addAll

Introduction

In this page you can find the example usage for java.util Set addAll.

Prototype

boolean addAll(Collection<? extends E> c);

Source Link

Document

Adds all of the elements in the specified collection to this set if they're not already present (optional operation).

Usage

From source file:annis.CommonHelper.java

public static Set<String> getOrderingTypes(SaltProject p) {
    Set<String> result = new TreeSet<String>();

    for (SCorpusGraph corpusGraphs : p.getSCorpusGraphs()) {
        for (SDocument doc : corpusGraphs.getSDocuments()) {
            SDocumentGraph g = doc.getSDocumentGraph();
            if (g != null) {
                EList<SOrderRelation> orderRelations = g.getSOrderRelations();
                if (orderRelations != null) {
                    for (SOrderRelation rel : orderRelations) {
                        if (rel.getSTypes() != null) {
                            result.addAll(rel.getSTypes());
                        }/*w  ww .j  a  va  2s.  c  om*/
                    }
                }
            }
        }
    }

    return result;
}

From source file:act.installer.brenda.BrendaChebiOntology.java

/**
 * This method processes relatioships "is subtype of" to produce a mapping between each application and its main
 * application, used subsequently (outside of this) to compute each ontology's main application.
 * @param isSubtypeOfRelationships map {chebi id -> subtype's chebi ids}
 * @param applicationChebiId main application's chebi id
 * @return a map {application's chebi id -> related main application's chebi ids}
 *//*  w  w w .j av a  2  s  . c  o m*/
public static Map<String, Set<String>> getApplicationToMainApplicationsMap(
        Map<String, Set<String>> isSubtypeOfRelationships, String applicationChebiId) {

    // Compute the set of main applications. These are the ontologies that are subtypes of the ontology 'application'.
    Set<String> mainApplicationsChebiId = isSubtypeOfRelationships.get(applicationChebiId);

    // Compute the initial list of applications to visit from the set of main applications.
    ArrayList<String> applicationsToVisit = new ArrayList<>(mainApplicationsChebiId);

    // For each main application, map it to a set containing only itself.
    Map<String, Set<String>> applicationToMainApplicationsMap = applicationsToVisit.stream()
            .collect(Collectors.toMap(e -> e, Collections::singleton));

    // Then visit all applications in a BFS fashion, appending new applications to visit to the applicationsToVisit
    // and propagating/merging the set of main applications as we progress down the relationship graph.
    int currentIndex = 0;
    while (currentIndex < applicationsToVisit.size()) {

        String currentApplication = applicationsToVisit.get(currentIndex);
        Set<String> subApplications = isSubtypeOfRelationships.get(currentApplication);

        if (subApplications != null) {
            // add all sub-applications to the set of applications to visit
            applicationsToVisit.addAll(subApplications);
            for (String subApplication : subApplications) {
                Set<String> mainApplicationsSet = applicationToMainApplicationsMap.get(subApplication);
                if (mainApplicationsSet == null) {
                    mainApplicationsSet = new HashSet<>();
                    applicationToMainApplicationsMap.put(subApplication, mainApplicationsSet);
                }
                mainApplicationsSet.addAll(applicationToMainApplicationsMap.get(currentApplication));
            }
        }
        currentIndex++;
    }

    return applicationToMainApplicationsMap;
}

From source file:eu.itesla_project.modules.validation.OfflineValidationTool.java

private static void writeAttributesFiles(Set<RuleId> rulesIds,
        Map<String, Map<RuleId, Map<HistoDbAttributeId, Object>>> valuesPerRulePerCase, Path outputDir)
        throws IOException {
    for (RuleId ruleId : rulesIds) {
        Path attributesFile = outputDir.resolve("attributes_" + ruleId.toString() + ".csv");

        System.out.println("writing " + attributesFile + "...");

        try (BufferedWriter writer = Files.newBufferedWriter(attributesFile, StandardCharsets.UTF_8)) {
            writer.write("base case");

            Set<HistoDbAttributeId> allAttributeIds = new LinkedHashSet<>();
            for (Map<RuleId, Map<HistoDbAttributeId, Object>> valuesPerRule : valuesPerRulePerCase.values()) {
                Map<HistoDbAttributeId, Object> values = valuesPerRule.get(ruleId);
                if (values != null) {
                    allAttributeIds.addAll(values.keySet());
                }//from w ww . jav  a 2s.c  o m
            }

            for (HistoDbAttributeId attributeId : allAttributeIds) {
                writer.write(CSV_SEPARATOR);
                writer.write(attributeId.toString());
            }
            writer.newLine();

            for (Map.Entry<String, Map<RuleId, Map<HistoDbAttributeId, Object>>> e : valuesPerRulePerCase
                    .entrySet()) {
                String baseCaseName = e.getKey();
                Map<RuleId, Map<HistoDbAttributeId, Object>> valuesPerRule = e.getValue();
                writer.write(baseCaseName);

                Map<HistoDbAttributeId, Object> values = valuesPerRule.get(ruleId);
                for (HistoDbAttributeId attributeId : allAttributeIds) {
                    writer.write(CSV_SEPARATOR);
                    Object value = values.get(attributeId);
                    if (value != null && !(value instanceof Float && Float.isNaN((Float) value))) {
                        writer.write(Objects.toString(values.get(attributeId)));
                    }
                }

                writer.newLine();
            }
        }
    }
}

From source file:com.bluexml.xforms.demo.Util.java

public static Set<Vector<String>> getInstances(String alfrescohost, String user, String definitionName) {
    Set<Vector<String>> result = new HashSet<Vector<String>>();
    try {//w w  w.j ava  2s . c o  m
        List<String> ids = getIdentifiers(alfrescohost, user, definitionName);

        for (String id : ids)
            result.addAll(getInstancesById(alfrescohost, user, id));

    } catch (Exception e) {
        e.printStackTrace();
    }
    return result;
}

From source file:expansionBlocks.ProcessCommunities.java

public static Pair<Map<Entity, Double>, Map<Entity, Double>> execute(Configuration configuration, Query query)
        throws Exception {
    Map<Set<Long>, Map<Entity, Double>> mapPathCommunities = query.getCommunities();
    HashSet<Map<Entity, Double>> initialCommunities = new HashSet<>(mapPathCommunities.values());

    Set<Map<Entity, Double>> scaledCommunities = new HashSet<>();

    AbstractCommunityScalator as = configuration.getAbstractCommunityScalator();

    for (Map<Entity, Double> community : initialCommunities) {
        Map<Entity, Double> scaledCommunity = as.scaledEmphasisArticlesInCommunity(configuration, query,
                community);/*from w  w  w . ja va  2 s  .  c om*/
        scaledCommunities.add(scaledCommunity);
    }

    Set<Map<Entity, Double>> communitiesFusioned = getCommunitiesFromCommunitiesBasedOnSimilarity(
            scaledCommunities, configuration.getFusionThreshold());
    if (configuration.DEBUG_INFO) {
        println("Fusion communities based on similarity communities: ");
        for (Map<Entity, Double> community : communitiesFusioned) {
            println(community);

        }
    }
    println(initialCommunities.size() + " communities have been fusioned into " + communitiesFusioned.size());

    println("[[WARNING]] - Select best community algorithm seems to differ from select best path. You may want to double ckeck it.");
    Set<Map<Entity, Double>> selectBestCommunities = selectBestCommunities(configuration, communitiesFusioned,
            query.getTokenNames());

    if (configuration.DEBUG_INFO) {
        println("Selected best communities: ");
        for (Map<Entity, Double> community : selectBestCommunities) {
            println(StringUtilsQueryExpansion.MapDoubleValueToString(community));
        }
    }

    Map<Entity, Double> result = agregateCommunities(selectBestCommunities);

    if (configuration.DEBUG_INFO) {
        println("Agragated community(size: " + result.size() + "): ");
        println(StringUtilsQueryExpansion.MapDoubleValueToString(result));
    }

    Set<Entity> entitiesToRemove = new HashSet<>();
    /*for (Map.Entry<Entity, Double> e : result.entrySet())
     {
     Set<Category> categories = e.getKey().getCategories();
     println("Categories of \"" + e.getKey() + "\": " + categories);
     if (categories.isEmpty())
     entitiesToRemove.add(e.getKey());
     }*/

    entitiesToRemove.addAll(removableAccordingToCategories(result));

    Map<Entity, Double> filteredCommunity = new HashMap<>(result);
    for (Entity e : entitiesToRemove) {
        filteredCommunity.remove(e);
    }
    println("Based on category analisy I would suggest to remove: " + entitiesToRemove);
    println("New Community  in case of category based filtering"
            + StringUtilsQueryExpansion.MapDoubleValueToString(filteredCommunity));

    query.setCommunityAfterRemoval(filteredCommunity);
    query.setCommunity(result);
    return new Pair<>(result, filteredCommunity);

}

From source file:com.alibaba.dubbo.governance.web.governance.module.screen.Routes.java

/**
 * ?Owner//  ww  w  .ja  va2s.  c o  m
 * 
 * @param usernames ??
 * @param serviceName ???
 */
public static void addOwnersOfService(Set<String> usernames, String serviceName, OwnerService ownerDAO) {
    List<String> serviceNamePatterns = ownerDAO.findAllServiceNames();
    for (String p : serviceNamePatterns) {
        if (ParseUtils.isMatchGlobPattern(p, serviceName)) {
            List<String> list = ownerDAO.findUsernamesByServiceName(p);
            usernames.addAll(list);
        }
    }
}

From source file:com.ikanow.aleph2.analytics.storm.utils.StormControllerUtil.java

/**
 * Starts up a storm job./*from   www .  ja v a 2  s.  c o m*/
 * 
 * 1. gets the storm instance from the yarn config
 * 2. Makes a mega jar consisting of:
 *    A. Underlying artefacts (system libs)
 *  B. User supplied libraries
 * 3. Submit megajar to storm with jobname of the bucket id
 * 
 * @param bucket
 * @param underlying_artefacts
 * @param yarn_config_dir
 * @param user_lib_paths
 * @param topology
 * @return
 */
public static CompletableFuture<BasicMessageBean> startJob(final IStormController storm_controller,
        final DataBucketBean bucket, final Optional<String> sub_job,
        final Collection<Object> underlying_artefacts, final Collection<String> user_lib_paths,
        final StormTopology topology, final Map<String, String> config, final String cached_jar_dir) {
    if (null == topology) {
        return CompletableFuture.completedFuture(ErrorUtils.buildErrorMessage(StormControllerUtil.class,
                "startJob", ErrorUtils.TOPOLOGY_NULL_ERROR, bucket.full_name()));
    }

    _logger.info("Retrieved user Storm config topology: spouts=" + topology.get_spouts_size() + " bolts="
            + topology.get_bolts_size() + " configs=" + config.toString());

    final Set<String> jars_to_merge = new TreeSet<String>();

    final CompletableFuture<String> jar_future = Lambdas.get(() -> {
        if (RemoteStormController.class.isAssignableFrom(storm_controller.getClass())) {
            // (This is only necessary in the remote case)

            jars_to_merge.addAll(underlying_artefacts.stream()
                    .map(artefact -> LiveInjector.findPathJar(artefact.getClass(), ""))
                    .filter(f -> !f.equals("")).collect(Collectors.toSet()));

            if (jars_to_merge.isEmpty()) { // special case: no aleph2 libs found, this is almost certainly because this is being run from eclipse...
                final GlobalPropertiesBean globals = ModuleUtils.getGlobalProperties();
                _logger.warn(
                        "WARNING: no library files found, probably because this is running from an IDE - instead taking all JARs from: "
                                + (globals.local_root_dir() + "/lib/"));
                try {
                    //... and LiveInjecter doesn't work on classes ... as a backup just copy everything from "<LOCAL_ALEPH2_HOME>/lib" into there 
                    jars_to_merge
                            .addAll(FileUtils
                                    .listFiles(new File(globals.local_root_dir() + "/lib/"),
                                            new String[] { "jar" }, false)
                                    .stream().map(File::toString).collect(Collectors.toList()));
                } catch (Exception e) {
                    throw new RuntimeException("In eclipse/IDE mode, directory not found: "
                            + (globals.local_root_dir() + "/lib/"));
                }
            }
            //add in the user libs
            jars_to_merge.addAll(user_lib_paths);

            //create jar
            return buildOrReturnCachedStormTopologyJar(jars_to_merge, cached_jar_dir);
        } else {
            return CompletableFuture.completedFuture("/unused/dummy.jar");
        }
    });

    //submit to storm
    @SuppressWarnings("unchecked")
    final CompletableFuture<BasicMessageBean> submit_future = Lambdas.get(() -> {
        long retries = 0;
        while (retries < MAX_RETRIES) {
            try {
                _logger.debug("Trying to submit job, try: " + retries + " of " + MAX_RETRIES);
                final String jar_file_location = jar_future.get();
                return storm_controller.submitJob(bucketPathToTopologyName(bucket, sub_job), jar_file_location,
                        topology, (Map<String, Object>) (Map<String, ?>) config);
            } catch (Exception ex) {
                if (ex instanceof AlreadyAliveException) {
                    retries++;
                    //sleep 1s, was seeing about 2s of sleep required before job successfully submitted on restart
                    try {
                        Thread.sleep(1000);
                    } catch (Exception e) {
                        final CompletableFuture<BasicMessageBean> error_future = new CompletableFuture<BasicMessageBean>();
                        error_future.completeExceptionally(e);
                        return error_future;
                    }
                } else {
                    retries = MAX_RETRIES; //we threw some other exception, bail out
                    final CompletableFuture<BasicMessageBean> error_future = new CompletableFuture<BasicMessageBean>();
                    error_future.completeExceptionally(ex);
                    return error_future;
                }
            }
        }
        //we maxed out our retries, throw failure
        final CompletableFuture<BasicMessageBean> error_future = new CompletableFuture<BasicMessageBean>();
        error_future.completeExceptionally(new Exception(
                "Error submitting job, ran out of retries (previous (same name) job is probably still alive)"));
        return error_future;
    });
    return submit_future;
}

From source file:edu.wpi.checksims.submission.Submission.java

/**
 * Recursively find all files matching in a directory.
 *
 * @param directory Directory to search in
 * @param glob Match pattern used to identify files to include
 * @return List of all matching files in this directory and subdirectories
 *///  ww w  .  j a v  a  2 s.  c o m
static Set<File> getAllMatchingFiles(File directory, String glob, boolean recursive)
        throws NoSuchFileException, NotDirectoryException {
    checkNotNull(directory);
    checkNotNull(glob);
    checkArgument(!glob.isEmpty(), "Glob pattern cannot be empty");

    Set<File> allFiles = new HashSet<>();
    Logger logs = LoggerFactory.getLogger(Submission.class);

    if (recursive) {
        logs.trace("Recursively traversing directory " + directory.getName());
    }

    // Add this directory
    Collections.addAll(allFiles, getMatchingFilesFromDir(directory, glob));

    // Get subdirectories
    File[] subdirs = directory.listFiles(File::isDirectory);

    // Recursively call on all subdirectories if specified
    if (recursive) {
        for (File subdir : subdirs) {
            allFiles.addAll(getAllMatchingFiles(subdir, glob, true));
        }
    }

    return allFiles;
}

From source file:net.sourceforge.fenixedu.domain.Professorship.java

public static List<Professorship> readByDegreeCurricularPlanAndExecutionYearAndBasic(
        DegreeCurricularPlan degreeCurricularPlan, ExecutionYear executionYear, Boolean basic) {

    Set<Professorship> professorships = new HashSet<Professorship>();
    for (CurricularCourse curricularCourse : degreeCurricularPlan.getCurricularCoursesSet()) {
        if (curricularCourse.getBasic().equals(basic)) {
            for (ExecutionCourse executionCourse : curricularCourse
                    .getExecutionCoursesByExecutionYear(executionYear)) {
                professorships.addAll(executionCourse.getProfessorshipsSet());
            }/*from   w  w w.  java  2s  .co m*/
        }
    }
    return new ArrayList<Professorship>(professorships);
}

From source file:com.t3.model.AssetManager.java

/**
 * <p>/*www  .  j  a v  a 2  s .c o  m*/
 * Constructs a set of all assets in the given list of repositories, then builds a map of <code>MD5Key</code> and
 * <code>Asset</code> for all assets that do not appear in that set.
 * </p>
 * <p>
 * This provides the calling function with a list of all assets currently in use by the campaign that do not appear
 * in one of the listed repositories. It's entirely possible that the asset is in a different repository or in none
 * at all.
 * </p>
 * 
 * @param repos
 *            list of repositories to exclude
 * @return Map of all known assets that are NOT in the specified repositories
 */
public static Map<MD5Key, Asset> findAllAssetsNotInRepositories(List<String> repos) {
    // For performance reasons, we calculate the size of the Set in advance...
    int size = 0;
    for (String repo : repos) {
        size += assetLoader.getRepositoryMap(repo).size();
    }

    // Now create the aggregate of all repositories.
    Set<String> aggregate = new HashSet<String>(size);
    for (String repo : repos) {
        aggregate.addAll(assetLoader.getRepositoryMap(repo).keySet());
    }

    /*
     * The 'aggregate' now holds the sum total of all asset keys that are in repositories. Now we go through the
     * 'assetMap' and copy over <K,V> pairs that are NOT in 'aggregate' to our 'missing' Map.
     * 
     * Unfortunately, the repository is a Map<String, String> while the return value is going to be a Map<MD5Key,
     * Asset>, which means each individual entry needs to be checked and references copied. If both were the same
     * data type, converting both to Set<String> would allow for an addAll() and removeAll() and be done with it!
     */
    Map<MD5Key, Asset> missing = new HashMap<MD5Key, Asset>(Math.min(assetMap.size(), aggregate.size()));
    for (MD5Key key : assetMap.keySet()) {
        if (aggregate.contains(key) == false) // Not in any repository so add it.
            missing.put(key, assetMap.get(key));
    }
    return missing;
}