Example usage for java.util TreeSet remove

List of usage examples for java.util TreeSet remove

Introduction

In this page you can find the example usage for java.util TreeSet remove.

Prototype

public boolean remove(Object o) 

Source Link

Document

Removes the specified element from this set if it is present.

Usage

From source file:Main.java

public static void main(String args[]) {
    TreeSet<Character> set1 = new TreeSet<Character>();
    TreeSet<Character> set2 = new TreeSet<Character>();

    set1.add('A');
    set1.add('B');
    set1.add('C');
    set1.add('D');

    set2.add('C');
    set2.add('D');
    set2.add('E');
    set2.add('F');

    System.out.println("set1: " + set1);
    System.out.println("set2: " + set2);

    System.out.println("Union: " + union(set1, set2));
    System.out.println("Intersection: " + intersection(set1, set2));
    System.out.println("Difference (set1 - set2): " + difference(set1, set2));
    System.out.println("Symmetric Difference: " + symDifference(set1, set2));

    TreeSet<Character> set3 = new TreeSet<Character>(set1);

    set3.remove('D');
    System.out.println("set3: " + set3);

    System.out.println("Is set1 a subset of set2? " + isSubset(set1, set3));
    System.out.println("Is set1 a superset of set2? " + isSuperset(set1, set3));
    System.out.println("Is set3 a subset of set1? " + isSubset(set3, set1));
    System.out.println("Is set3 a superset of set1? " + isSuperset(set3, set1));

}

From source file:Main.java

public static void main(String[] args) {

    TreeSet<Integer> treeadd = new TreeSet<Integer>();

    treeadd.add(1);/* w  w  w. j  a v  a2 s. co m*/
    treeadd.add(3);
    treeadd.add(17);
    treeadd.add(2);

    System.out.println("Remove 17: " + treeadd.remove(17));
}

From source file:Main.java

public static void main(String[] args) {
    TreeSet<Integer> tSet = new TreeSet<Integer>();

    tSet.add(new Integer("1"));
    tSet.add(new Integer("2"));
    tSet.add(new Integer("3"));

    System.out.println(tSet);//from  ww w.j av  a  2  s  .co m
    System.out.println("Was 2 removed from TreeSet ? " + tSet.remove(new Integer("2")));
    System.out.println(tSet);
}

From source file:Main.java

public static void main(String[] args) {
    TreeSet<Integer> tSet = new TreeSet<Integer>();
    System.out.println("Size of TreeSet : " + tSet.size());

    tSet.add(new Integer("1"));
    tSet.add(new Integer("2"));
    tSet.add(new Integer("3"));

    System.out.println(tSet.size());

    // remove one element from TreeSet using remove method

    tSet.remove(new Integer("1"));
    System.out.println("Size of TreeSet after removal : " + tSet.size());
}

From source file:org.onosproject.drivers.microsemi.yang.utils.CeVlanMapUtils.java

/**
 * Convert an array of vlan ids in to a string representation.
 * @param vlanArray An array of vlan ids
 * @return A string representation delimited by commas and colons
 *///from   w  w  w. jav a  2  s .  c  o m
public static String vlanListAsString(Short[] vlanArray) {
    boolean colonPending = false;
    StringBuilder ceVlanMapBuilder = new StringBuilder();
    if (vlanArray.length == 0) {
        return "";
    } else if (vlanArray.length == 1 && vlanArray[0] == 0) {
        return "0";
    }

    //To ensure that there are no repeated or out-of-order elements we must convert to TreeSet
    TreeSet<Short> vlanSet = new TreeSet<>(Arrays.asList(vlanArray));

    if (vlanSet.first() == 0) {
        vlanSet.remove(vlanSet.first());
    }
    short prev = vlanSet.first();
    for (short s : vlanSet) {
        if (s == prev) {
            ceVlanMapBuilder.append(Short.valueOf(s));
            continue;
        } else if (prev == (s - 1)) {
            colonPending = true;
        } else {
            if (colonPending) {
                ceVlanMapBuilder.append(":" + Short.valueOf(prev));
                colonPending = false;
            }
            ceVlanMapBuilder.append("," + Short.valueOf(s));
        }
        prev = s;
    }
    if (colonPending) {
        ceVlanMapBuilder.append(":" + Short.valueOf(prev));
    }

    return ceVlanMapBuilder.toString();
}

From source file:de.tudarmstadt.ukp.experiments.argumentation.convincingness.sampling.Step4MTurkOutputCollector.java

/**
 * Creates .success files for updating HITs in order to require more assignments.
 *
 * @param assignmentsPerHits actual assignments per HIT
 * @param hitTypeID          type/*from   www  .  j ava2 s  .  c  o m*/
 * @param resultFile         source MTurk file
 * @throws IOException IO exception
 */

static void prepareUpdateHITsFiles(Map<String, Integer> assignmentsPerHits, String hitTypeID, File resultFile)
        throws IOException {
    TreeSet<Integer> assignmentNumbers = new TreeSet<>(assignmentsPerHits.values());

    System.out.println(assignmentsPerHits);

    // how many is required to be fully annotated
    final int fullyAnnotated = 5;

    assignmentNumbers.remove(fullyAnnotated);

    for (Integer i : assignmentNumbers) {
        // output file
        int annotationsRequired = fullyAnnotated - i;
        File file = new File(resultFile + "_requires_extra_assignments_" + annotationsRequired + ".success");
        PrintWriter pw = new PrintWriter(file, "utf-8");
        pw.println("hitid\thittypeid");

        for (Map.Entry<String, Integer> entry : assignmentsPerHits.entrySet()) {
            if (i.equals(entry.getValue())) {
                pw.println(entry.getKey() + "\t" + hitTypeID);
            }
        }

        pw.close();

        System.out.println(
                "Extra annotations required (" + annotationsRequired + "), saved to " + file.getAbsolutePath());
    }
}

From source file:base.Engine.java

static LinkedList<PatternInstance> massMerge(Set<TreeSet<PatternInstance>> collisions,
        LinkedList<PatternInstance> instances, ReferenceMap<PatternWrapper, PatternEntry> knownPatterns,
        Ruleset rule) {//w w  w  .jav  a 2 s.  com
    PatternInstance currentInstances = null;
    for (ListIterator<PatternInstance> i = instances.listIterator(); i.hasNext();) {
        currentInstances = i.next();
        boolean shouldRemove = false;
        for (TreeSet<PatternInstance> groups : collisions) {
            if (groups.contains(currentInstances)) {
                shouldRemove = true;
                break;
            }
        }
        if (shouldRemove)
            i.remove();
    }

    for (TreeSet<PatternInstance> group : collisions) {
        TreeSet<PatternInstance> runningParts = group;
        boolean stillFindingParts = true;

        while (stillFindingParts) {
            stillFindingParts = false;

            eachMatchLoop: for (PatternInstance part1 : runningParts)
                for (PatternInstance part2 : runningParts)
                    if (part1 != part2 && part1.collides(rule, part2)) {
                        stillFindingParts = true;
                        runningParts.remove(part1);
                        runningParts.remove(part2);
                        runningParts.add(part1.merge(knownPatterns, part2));
                        break eachMatchLoop;
                    }
        }

        for (PatternInstance part : runningParts) {
            instances.add(part);
        }
    }

    return instances;

}

From source file:com.citic.zxyjs.zwlscx.mapreduce.lib.input.HFileOutputFormatBase.java

/**
 * Write out a {@link SequenceFile} that can be read by
 * {@link TotalOrderPartitioner} that contains the split points in
 * startKeys./*  w w w  .  ja  v  a  2s .  co m*/
 */
private static void writePartitions(Configuration conf, Path partitionsPath,
        List<ImmutableBytesWritable> startKeys) throws IOException {
    LOG.info("Writing partition information to " + partitionsPath);
    if (startKeys.isEmpty()) {
        throw new IllegalArgumentException("No regions passed");
    }

    // We're generating a list of split points, and we don't ever
    // have keys < the first region (which has an empty start key)
    // so we need to remove it. Otherwise we would end up with an
    // empty reducer with index 0
    TreeSet<ImmutableBytesWritable> sorted = new TreeSet<ImmutableBytesWritable>(startKeys);

    ImmutableBytesWritable first = sorted.first();
    if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {
        throw new IllegalArgumentException("First region of table should have empty start key. Instead has: "
                + Bytes.toStringBinary(first.get()));
    }
    sorted.remove(first);

    // Write the actual file
    FileSystem fs = partitionsPath.getFileSystem(conf);
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath,
            ImmutableBytesWritable.class, NullWritable.class);

    try {
        for (ImmutableBytesWritable startKey : sorted) {
            writer.append(startKey, NullWritable.get());
        }
    } finally {
        writer.close();
    }
}

From source file:com.neusoft.hbase.test.hadoop.dataload.HFileOutputFormatBase.java

/**
 * Write out a {@link org.apache.hadoop.io.SequenceFile} that can be read by
 * {@link org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner} that contains the split points in
 * startKeys./*  ww  w.  j a v  a  2s.  c  o m*/
 */
@SuppressWarnings("deprecation")
private static void writePartitions(Configuration conf, Path partitionsPath,
        List<ImmutableBytesWritable> startKeys) throws IOException {
    LOG.info("Writing partition information to " + partitionsPath);
    if (startKeys.isEmpty()) {
        throw new IllegalArgumentException("No regions passed");
    }

    // We're generating a list of split points, and we don't ever
    // have keys < the first region (which has an empty start key)
    // so we need to remove it. Otherwise we would end up with an
    // empty reducer with index 0
    TreeSet<ImmutableBytesWritable> sorted = new TreeSet<ImmutableBytesWritable>(startKeys);

    ImmutableBytesWritable first = sorted.first();
    if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {
        throw new IllegalArgumentException("First region of table should have empty start key. Instead has: "
                + Bytes.toStringBinary(first.get()));
    }
    sorted.remove(first);

    // Write the actual file
    FileSystem fs = partitionsPath.getFileSystem(conf);
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath,
            ImmutableBytesWritable.class, NullWritable.class);

    try {
        for (ImmutableBytesWritable startKey : sorted) {
            writer.append(startKey, NullWritable.get());
        }
    } finally {
        writer.close();
    }
}

From source file:org.opendatakit.common.security.server.SecurityServiceUtil.java

/**
 * Method to enforce an access configuration constraining only registered
 * users, authenticated users and anonymous access.
 * //from ww  w  .j  a v a  2  s. c  o m
 * @param users
 * @param anonGrants
 * @param allGroups
 * @param cc
 * @throws DatastoreFailureException
 * @throws AccessDeniedException
 */
public static final void setStandardSiteAccessConfiguration(ArrayList<UserSecurityInfo> users,
        ArrayList<GrantedAuthorityName> allGroups, CallingContext cc)
        throws DatastoreFailureException, AccessDeniedException {

    List<String> anonGrantStrings = new ArrayList<String>();
    for (UserSecurityInfo i : users) {
        if (i.getType() == UserType.ANONYMOUS) {
            for (GrantedAuthorityName a : i.getAssignedUserGroups()) {
                if (anonAuth.getAuthority().equals(a.name()))
                    continue; // avoid circularity...
                anonGrantStrings.add(a.name());
            }
            break;
        }
    }

    try {
        GrantedAuthorityHierarchyTable.assertGrantedAuthorityHierarchy(siteAuth,
                SecurityServiceUtil.siteAdministratorGrants, cc);
        GrantedAuthorityHierarchyTable.assertGrantedAuthorityHierarchy(administerTablesAuth,
                SecurityServiceUtil.administerTablesGrants, cc);
        GrantedAuthorityHierarchyTable.assertGrantedAuthorityHierarchy(synchronizeTablesAuth,
                SecurityServiceUtil.synchronizeTablesGrants, cc);
        GrantedAuthorityHierarchyTable.assertGrantedAuthorityHierarchy(dataOwnerAuth,
                SecurityServiceUtil.dataOwnerGrants, cc);
        GrantedAuthorityHierarchyTable.assertGrantedAuthorityHierarchy(dataViewerAuth,
                SecurityServiceUtil.dataViewerGrants, cc);
        GrantedAuthorityHierarchyTable.assertGrantedAuthorityHierarchy(dataCollectorAuth,
                SecurityServiceUtil.dataCollectorGrants, cc);

        GrantedAuthorityHierarchyTable.assertGrantedAuthorityHierarchy(anonAuth, anonGrantStrings, cc);

        TreeSet<String> authorities = GrantedAuthorityHierarchyTable
                .getAllPermissionsAssignableGrantedAuthorities(cc.getDatastore(), cc.getCurrentUser());
        authorities.remove(siteAuth.getAuthority());
        authorities.remove(administerTablesAuth.getAuthority());
        authorities.remove(synchronizeTablesAuth.getAuthority());
        authorities.remove(dataOwnerAuth.getAuthority());
        authorities.remove(dataViewerAuth.getAuthority());
        authorities.remove(dataCollectorAuth.getAuthority());
        authorities.remove(anonAuth.getAuthority());

        // remove anything else from database...
        List<String> empty = Collections.emptyList();
        for (String s : authorities) {
            GrantedAuthorityHierarchyTable.assertGrantedAuthorityHierarchy(new SimpleGrantedAuthority(s), empty,
                    cc);
        }

        Map<UserSecurityInfo, String> pkMap = setUsers(users, cc);
        setUsersOfGrantedAuthority(pkMap, siteAuth, cc);
        setUsersOfGrantedAuthority(pkMap, administerTablesAuth, cc);
        setUsersOfGrantedAuthority(pkMap, synchronizeTablesAuth, cc);
        setUsersOfGrantedAuthority(pkMap, dataOwnerAuth, cc);
        setUsersOfGrantedAuthority(pkMap, dataViewerAuth, cc);
        setUsersOfGrantedAuthority(pkMap, dataCollectorAuth, cc);

    } catch (ODKDatastoreException e) {
        e.printStackTrace();
        throw new DatastoreFailureException("Incomplete update");
    } finally {
        Datastore ds = cc.getDatastore();
        User user = cc.getCurrentUser();
        try {
            SecurityRevisionsTable.setLastRegisteredUsersRevisionDate(ds, user);
        } catch (ODKDatastoreException e) {
            // if it fails, use RELOAD_INTERVAL to force reload.
            e.printStackTrace();
        }
        try {
            SecurityRevisionsTable.setLastRoleHierarchyRevisionDate(ds, user);
        } catch (ODKDatastoreException e) {
            // if it fails, use RELOAD_INTERVAL to force reload.
            e.printStackTrace();
        }
    }
}