Example usage for java.util TreeSet add

List of usage examples for java.util TreeSet add

Introduction

In this page you can find the example usage for java.util TreeSet add.

Prototype

public boolean add(E e) 

Source Link

Document

Adds the specified element to this set if it is not already present.

Usage

From source file:cz.matfyz.oskopek.learnr.tools.DatasetIO.java

/**
 * A manual method of importing a dataset from a text file.
 * <p/>/*from www. j  a  v a  2 s  .  c om*/
 * Used for plain dataset distribution. Does not import statistics of any kind.
 * <p/>
 * <b>Warning:</b> Expects a syntactically perfect dataset according to the TXT dataset format specification! (See documentation).
 *
 * @param filename the filename from which to import
 * @return the imported dataset
 * @throws IOException if an error during read occurs
 */
public static Dataset importTXTDataset(String filename) throws IOException {
    LOGGER.debug("Import dataset from TXT: \'{}\'", filename);
    Dataset dataset = new Dataset();
    BufferedReader br = new BufferedReader(new FileReader(filename));

    br.readLine(); // PREAMBLE
    dataset.setName(br.readLine().split(":")[1].trim());
    dataset.setDescription(br.readLine().split(":")[1].trim());
    dataset.setAuthor(br.readLine().split(":")[1].trim());
    dataset.setCreatedDate(Long.parseLong(br.readLine().split(":")[1].trim()));
    int initWeight = Integer.parseInt(br.readLine().split(":")[1].trim());
    String[] limitsStr = br.readLine().split("/");
    Limits limits = new Limits(Integer.parseInt(limitsStr[0].split(":")[1].trim()),
            Integer.parseInt(limitsStr[1]));
    dataset.setLimits(limits);
    String answerCheckTypeStr = br.readLine().split(":")[1].trim();
    dataset.setAnswerCheckType(Dataset.AnswerCheckType.valueOf(answerCheckTypeStr));
    dataset.setGoodAnswerPenalty(Integer.parseInt(br.readLine().split(":")[1].trim()));
    dataset.setBadAnswerPenalty(Integer.parseInt(br.readLine().split(":")[1].trim()));

    String buffer;
    br.readLine(); // QUESTIONS
    TreeSet<Question> questionSet = new TreeSet<>();
    while ((buffer = br.readLine()) != null) {
        if (StringUtils.isWhitespace(buffer))
            continue;
        String[] split = buffer.split(";");
        String text = split[0].trim();

        List<Answer> answerList = new ArrayList<>();
        for (int i = 1; i < split.length; i++) {
            Answer answer = new Answer();
            answer.setValue(split[i].trim());
            answerList.add(answer);
        }
        Question q = new Question(text, new Statistics(), answerList, initWeight);

        LOGGER.debug("Reading question \'{}\'; weight \'{}\'.", q.getText(), q.getWeight());
        if (!questionSet.add(q)) {
            LOGGER.warn("Question \'{}\' already in dataset, adding as an answer.", q.getText());
            Iterator<Question> descIter = questionSet.descendingIterator(); // Descending iterator, because it's probably last
            while (descIter.hasNext()) {
                Question current = descIter.next();
                if (current.equals(q)) {
                    current.getAnswerList().addAll(q.getAnswerList());
                    break;
                }
            }
        }
    }
    dataset.setQuestionSet(questionSet);
    dataset.setFinishedSet(new TreeSet<Question>());

    br.close();
    return dataset;
}

From source file:com.opendoorlogistics.core.utils.strings.Strings.java

public static String getFiltered(String s, char... acceptChars) {
    TreeSet<Character> set = new TreeSet<>();
    for (char c : acceptChars) {
        set.add(c);
    }/*  w  ww .  ja  va2  s  .c  om*/
    StringBuilder ret = new StringBuilder();
    for (int i = 0; i < s.length(); i++) {
        if (set.contains(s.charAt(i))) {
            ret.append(s.charAt(i));
        }
    }
    return ret.toString();
}

From source file:base.Engine.java

static LinkedList<PatternInstance> massMerge(Set<TreeSet<PatternInstance>> collisions,
        LinkedList<PatternInstance> instances, ReferenceMap<PatternWrapper, PatternEntry> knownPatterns,
        Ruleset rule) {/*from w  w  w  . j  av a2 s. c om*/
    PatternInstance currentInstances = null;
    for (ListIterator<PatternInstance> i = instances.listIterator(); i.hasNext();) {
        currentInstances = i.next();
        boolean shouldRemove = false;
        for (TreeSet<PatternInstance> groups : collisions) {
            if (groups.contains(currentInstances)) {
                shouldRemove = true;
                break;
            }
        }
        if (shouldRemove)
            i.remove();
    }

    for (TreeSet<PatternInstance> group : collisions) {
        TreeSet<PatternInstance> runningParts = group;
        boolean stillFindingParts = true;

        while (stillFindingParts) {
            stillFindingParts = false;

            eachMatchLoop: for (PatternInstance part1 : runningParts)
                for (PatternInstance part2 : runningParts)
                    if (part1 != part2 && part1.collides(rule, part2)) {
                        stillFindingParts = true;
                        runningParts.remove(part1);
                        runningParts.remove(part2);
                        runningParts.add(part1.merge(knownPatterns, part2));
                        break eachMatchLoop;
                    }
        }

        for (PatternInstance part : runningParts) {
            instances.add(part);
        }
    }

    return instances;

}

From source file:com.yahoo.druid.hadoop.HiveDatasourceInputFormat.java

private static String[] getFrequentLocations(Iterable<String> hosts) {

    final CountingMap<String> counter = new CountingMap<>();
    for (String location : hosts) {
        counter.add(location, 1);/*from  w w  w . j  av  a2  s. c  o m*/
    }

    final TreeSet<Pair<Long, String>> sorted = Sets
            .<Pair<Long, String>>newTreeSet(new Comparator<Pair<Long, String>>() {
                @Override
                public int compare(Pair<Long, String> o1, Pair<Long, String> o2) {
                    int compare = o2.lhs.compareTo(o1.lhs); // descending
                    if (compare == 0) {
                        compare = o1.rhs.compareTo(o2.rhs); // ascending
                    }
                    return compare;
                }
            });

    for (Map.Entry<String, AtomicLong> entry : counter.entrySet()) {
        sorted.add(Pair.of(entry.getValue().get(), entry.getKey()));
    }

    // use default replication factor, if possible
    final List<String> locations = Lists.newArrayListWithCapacity(3);
    for (Pair<Long, String> frequent : Iterables.limit(sorted, 3)) {
        locations.add(frequent.rhs);
    }
    return locations.toArray(new String[locations.size()]);
}

From source file:org.opendatakit.security.server.SecurityServiceUtil.java

public static void setAuthenticationListsForSpecialUser(UserSecurityInfo userInfo,
        GrantedAuthorityName specialGroup, CallingContext cc) throws DatastoreFailureException {
    RoleHierarchy hierarchy = (RoleHierarchy) cc.getHierarchicalRoleRelationships();
    Set<GrantedAuthority> badGrants = new TreeSet<GrantedAuthority>();
    // The assigned groups are the specialGroup that this user defines
    // (i.e., anonymous or daemon) plus all directly-assigned assignable
    // permissions.
    TreeSet<GrantedAuthorityName> groups = new TreeSet<GrantedAuthorityName>();
    TreeSet<GrantedAuthorityName> authorities = new TreeSet<GrantedAuthorityName>();
    groups.add(specialGroup);
    GrantedAuthority specialAuth = new SimpleGrantedAuthority(specialGroup.name());
    try {// w  w w.  j  av a 2 s.  c o  m
        Set<GrantedAuthority> auths = GrantedAuthorityHierarchyTable
                .getSubordinateGrantedAuthorities(specialAuth, cc);
        for (GrantedAuthority auth : auths) {
            GrantedAuthorityName name = mapName(auth, badGrants);
            if (name != null) {
                groups.add(name);
            }
        }
    } catch (ODKDatastoreException e) {
        e.printStackTrace();
        throw new DatastoreFailureException("Unable to retrieve granted authorities of " + specialGroup.name());
    }

    Collection<? extends GrantedAuthority> auths = hierarchy
            .getReachableGrantedAuthorities(Collections.singletonList(specialAuth));
    for (GrantedAuthority auth : auths) {
        GrantedAuthorityName name = mapName(auth, badGrants);
        if (name != null && !GrantedAuthorityName.permissionsCanBeAssigned(auth.getAuthority())) {
            authorities.add(name);
        }
    }
    userInfo.setAssignedUserGroups(groups);
    userInfo.setGrantedAuthorities(authorities);
    try {
        removeBadGrantedAuthorities(badGrants, cc);
    } catch (ODKDatastoreException e) {
        e.printStackTrace();
    }
}

From source file:com.reactive.hzdfs.dll.JarClassLoader.java

private static TreeSet<String> scanForPackages(String path) throws IOException {
    try (JarFile file = new JarFile(path)) {
        TreeSet<String> packages = new TreeSet<>(new Comparator<String>() {

            @Override//from   w w w . java  2s  .c om
            public int compare(String o1, String o2) {
                if (o2.length() > o1.length() && o2.contains(o1))
                    return -1;
                else if (o2.length() < o1.length() && o1.contains(o2))
                    return 1;
                else
                    return o1.compareTo(o2);
            }
        });
        for (Enumeration<JarEntry> entries = file.entries(); entries.hasMoreElements();) {
            JarEntry entry = entries.nextElement();
            String name = entry.getName();

            if (name.endsWith(".class")) {
                String fqcn = ClassUtils.convertResourcePathToClassName(name);
                fqcn = StringUtils.delete(fqcn, ".class");
                packages.add(ClassUtils.getPackageName(fqcn));
            }
        }

        return packages;
    }
}

From source file:net.java.sip.communicator.impl.history.HistoryReaderImpl.java

/**
 * Used to limit the files if any starting or ending date exist
 * So only few files to be searched./*from   www.  j  a v  a  2s.  com*/
 *
 * @param filelist Iterator
 * @param startDate Date
 * @param endDate Date
 * @param reverseOrder reverse order of files
 * @return Vector
 */
static Vector<String> filterFilesByDate(Iterator<String> filelist, Date startDate, Date endDate,
        final boolean reverseOrder) {
    if (startDate == null && endDate == null) {
        // no filtering needed then just return the same list
        Vector<String> result = new Vector<String>();
        while (filelist.hasNext()) {
            result.add(filelist.next());
        }

        Collections.sort(result, new Comparator<String>() {

            public int compare(String o1, String o2) {
                if (reverseOrder)
                    return o2.compareTo(o1);
                else
                    return o1.compareTo(o2);
            }
        });

        return result;
    }
    // first convert all files to long
    TreeSet<Long> files = new TreeSet<Long>();
    while (filelist.hasNext()) {
        String filename = filelist.next();

        files.add(Long.parseLong(filename.substring(0, filename.length() - 4)));
    }

    TreeSet<Long> resultAsLong = new TreeSet<Long>();

    // Temporary fix of a NoSuchElementException
    if (files.size() == 0) {
        return new Vector<String>();
    }

    Long startLong;
    Long endLong;

    if (startDate == null)
        startLong = Long.MIN_VALUE;
    else
        startLong = startDate.getTime();

    if (endDate == null)
        endLong = Long.MAX_VALUE;
    else
        endLong = endDate.getTime();

    // get all records inclusive the one before the startdate
    for (Long f : files) {
        if (startLong <= f && f <= endLong) {
            resultAsLong.add(f);
        }
    }

    // get the subset before the start date, to get its last element
    // if exists
    if (!files.isEmpty() && files.first() <= startLong) {
        SortedSet<Long> setBeforeTheInterval = files.subSet(files.first(), true, startLong, true);
        if (!setBeforeTheInterval.isEmpty())
            resultAsLong.add(setBeforeTheInterval.last());
    }

    Vector<String> result = new Vector<String>();

    Iterator<Long> iter = resultAsLong.iterator();
    while (iter.hasNext()) {
        Long item = iter.next();
        result.add(item.toString() + ".xml");
    }

    Collections.sort(result, new Comparator<String>() {

        public int compare(String o1, String o2) {
            if (reverseOrder)
                return o2.compareTo(o1);
            else
                return o1.compareTo(o2);
        }
    });

    return result;
}

From source file:dk.netarkivet.harvester.harvesting.HarvestDocumentation.java

/**
 * Write harvestdetails to archive file(s).
 * This includes the order.xml, seeds.txt,
 * specific settings.xml for certain domains,
 * the harvestInfo.xml,//from  w w  w. j a  v  a2 s.c om
 * All available reports (subset of HeritrixFiles.HERITRIX_REPORTS),
 * All available logs (subset of HeritrixFiles.HERITRIX_LOGS).
 *
 * @param jobID the given job Id
 * @param harvestID the id for the harvestdefinition, which created this job
 * @param crawlDir the directory where the crawljob took place
 * @param writer an MetadaFileWriter used to store the harvest configuration,
 *      and harvest logs and reports.
 * @param heritrixVersion the heritrix version used by the harvest. 
 * @throws ArgumentNotValid If null arguments occur
 * @return a list of files added to the archive file.
 */
private static List<File> writeHarvestDetails(long jobID, long harvestID, File crawlDir,
        MetadataFileWriter mdfw, String heritrixVersion) {
    List<File> filesAdded = new ArrayList<File>();

    // We will sort the files by URL
    TreeSet<MetadataFile> files = new TreeSet<MetadataFile>();

    // List heritrix files in the crawl directory
    File[] heritrixFiles = crawlDir.listFiles(new FileFilter() {
        @Override
        public boolean accept(File f) {
            return (f.isFile() && f.getName().matches(MetadataFile.HERITRIX_FILE_PATTERN));
        }
    });

    // Add files in the crawl directory
    for (File hf : heritrixFiles) {
        files.add(new MetadataFile(hf, harvestID, jobID, heritrixVersion));
    }
    // Generate an arcfiles-report.txt if configured to do so.
    boolean genArcFilesReport = Settings.getBoolean(HarvesterSettings.METADATA_GENERATE_ARCHIVE_FILES_REPORT);
    if (genArcFilesReport) {
        log.debug("Creating an arcfiles-report.txt");
        files.add(new MetadataFile(new ArchiveFilesReportGenerator(crawlDir).generateReport(), harvestID, jobID,
                heritrixVersion));
    } else {
        log.debug("Creation of the arcfiles-report.txt has been disabled" + "by the setting '"
                + HarvesterSettings.METADATA_GENERATE_ARCHIVE_FILES_REPORT + "'!");
    }

    // Add log files
    File logDir = new File(crawlDir, "logs");
    if (logDir.exists()) {
        File[] heritrixLogFiles = logDir.listFiles(new FileFilter() {
            @Override
            public boolean accept(File f) {
                return (f.isFile() && f.getName().matches(MetadataFile.LOG_FILE_PATTERN));
            }
        });
        for (File logFile : heritrixLogFiles) {
            files.add(new MetadataFile(logFile, harvestID, jobID, heritrixVersion));
            log.info("Found Heritrix log file " + logFile.getName());
        }
    } else {
        log.debug("No logs dir found in crawldir: " + crawlDir.getAbsolutePath());
    }

    // Check if exists any settings directory (domain-specific settings)
    // if yes, add any settings.xml hiding in this directory.
    // TODO Delete any settings-files found in the settings directory */
    File settingsDir = new File(crawlDir, "settings");
    if (settingsDir.isDirectory()) {
        Map<File, String> domainSettingsFiles = findDomainSpecificSettings(settingsDir);
        for (Map.Entry<File, String> entry : domainSettingsFiles.entrySet()) {

            File dsf = entry.getKey();
            String domain = entry.getValue();
            files.add(new MetadataFile(dsf, harvestID, jobID, heritrixVersion, domain));
        }
    } else {
        log.debug("No settings directory found in crawldir: " + crawlDir.getAbsolutePath());
    }

    // Write files in order to metadata archive file.
    for (MetadataFile mdf : files) {
        File heritrixFile = mdf.getHeritrixFile();
        String heritrixFileName = heritrixFile.getName();
        String mimeType = (heritrixFileName.endsWith(".xml") ? "text/xml" : "text/plain");
        if (mdfw.writeTo(heritrixFile, mdf.getUrl(), mimeType)) {
            filesAdded.add(heritrixFile);
        } else {
            log.warn("The Heritrix file '" + heritrixFile.getAbsolutePath()
                    + "' was not included in the metadata archivefile due to some error.");
        }
    }

    return filesAdded;
}

From source file:com.wormsim.data.GroupDistribution.java

public void sample(TreeSet<AnimalGroup> p_groups) {
    dists.forEach((k, v) -> {/*ww  w  .j a va  2  s. c  o m*/
        p_groups.add(new AnimalGroup(k, v.sample()));
    });
}

From source file:gedi.atac.Atac.java

public static void buildInsertionIndex(String path, GenomicRegionStorage<? extends AlignedReadsData> storage)
        throws IOException {

    DiskGenomicNumericBuilder build = new DiskGenomicNumericBuilder(path);
    int offset = 4;
    build.setReferenceSorted(true);/*from w ww .  j a va2 s  . co m*/

    TreeSet<String> refs = new TreeSet<String>();
    storage.getReferenceSequences().forEach(r -> refs.add(r.getName()));
    ConsoleProgress p = new ConsoleProgress();

    Consumer<MutableReferenceGenomicRegion<? extends AlignedReadsData>> adder = new Consumer<MutableReferenceGenomicRegion<? extends AlignedReadsData>>() {

        @Override
        public void accept(MutableReferenceGenomicRegion<? extends AlignedReadsData> mrgr) {
            try {
                int v = mrgr.getData().getTotalCountOverallInt(ReadCountMode.All);
                if (v > 0) {
                    build.addValue(mrgr.getReference().toPlusStrand(),
                            GenomicRegionPosition.Start.position(mrgr.getReference(), mrgr.getRegion(), offset),
                            v);
                    build.addValue(mrgr.getReference().toPlusStrand(),
                            GenomicRegionPosition.Stop.position(mrgr.getReference(), mrgr.getRegion(), -offset),
                            v);
                }
                p.incrementProgress();
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }

    };

    for (String n : refs) {
        p.init();
        p.setDescription(n + "+");
        storage.iterateMutableReferenceGenomicRegions(Chromosome.obtain(n, Strand.Plus))
                .forEachRemaining(adder);
        p.setDescription(n + "-");
        storage.iterateMutableReferenceGenomicRegions(Chromosome.obtain(n, Strand.Minus))
                .forEachRemaining(adder);
        p.finish();
    }

    build.build();
}