Example usage for java.util HashSet remove

List of usage examples for java.util HashSet remove

Introduction

In this page you can find the example usage for java.util HashSet remove.

Prototype

public boolean remove(Object o) 

Source Link

Document

Removes the specified element from this set if it is present.

Usage

From source file:com.milaboratory.core.tree.SequenceTreeMapTest.java

@Test
public void testRandomizedTest3() throws Exception {
    for (int f = 0; f < repeats * 6; ++f) {
        //System.out.println(f);
        Alphabet alphabet = getAlphabetSequence(f);

        for (byte t = 0; t < 3; ++t) {
            final Sequence seqRight = randomSequence(alphabet, 50, 100),
                    seqLeft = randomSequence(alphabet, 50, 100), spacer = randomSequence(alphabet, 200, 200),
                    goodSequence = concatenate(seqLeft, spacer, seqRight);

            SequenceTreeMap map = new SequenceTreeMap(alphabet);

            int[] mut = new int[3];
            mut[t] = 3;/*  w  w  w  .j  a v  a  2  s  . c  om*/

            HashSet<Sequence> lErr = new HashSet<>(), rErr = new HashSet<>(), lrErr = new HashSet<>();

            Sequence seq1, seq2, mseq;

            for (int i = 0; i < 100; ++i) {
                //Left Error
                seq1 = introduceErrors(seqLeft, mut);
                mseq = concatenate(seq1, spacer, seqRight);
                lErr.add(mseq);
                map.put(mseq, mseq);

                //Right Error
                seq1 = introduceErrors(seqRight, mut);
                mseq = concatenate(seqLeft, spacer, seq1);
                rErr.add(mseq);
                map.put(mseq, mseq);

                //LR Error
                seq1 = introduceErrors(seqLeft, mut);
                seq2 = introduceErrors(seqRight, mut);
                mseq = concatenate(seq1, spacer, seq2);
                lrErr.add(mseq);
                map.put(mseq, mseq);
            }

            SequenceTreeMap.Node<Sequence> n;

            //Left run
            NeighborhoodIterator neighborhoodIterator = map.getNeighborhoodIterator(goodSequence, 1.3,
                    new double[] { 0.1, 0.1, 0.1 }, mut, new MutationGuide() {
                        @Override
                        public boolean allowMutation(Sequence ref, int position, byte type, byte code) {
                            return position < seqLeft.size() + 100;
                        }
                    });

            HashSet<Sequence> acc = new HashSet<>(lErr);

            while ((n = neighborhoodIterator.nextNode()) != null) {
                assertTrue(lErr.contains(n.object));
                assertFalse(rErr.contains(n.object));
                assertFalse(lrErr.contains(n.object));
                acc.remove(n.object);
            }
            assertTrue(acc.isEmpty());

            //Right run
            neighborhoodIterator = map.getNeighborhoodIterator(goodSequence, 1.3,
                    new double[] { 0.1, 0.1, 0.1 }, mut, new MutationGuide() {
                        @Override
                        public boolean allowMutation(Sequence ref, int position, byte type, byte code) {
                            return position > seqLeft.size() + 100;
                        }
                    });

            acc = new HashSet<>(rErr);

            while ((n = neighborhoodIterator.nextNode()) != null) {
                assertTrue(rErr.contains(n.object));
                assertFalse(lErr.contains(n.object));
                assertFalse(lrErr.contains(n.object));
                acc.remove(n.object);
            }
            assertTrue(acc.isEmpty());
        }
    }
}

From source file:org.archive.modules.fetcher.FetchHTTP.java

protected AuthScheme chooseAuthScheme(Map<String, String> challenges, String challengeHeaderKey) {
    HashSet<String> authSchemesLeftToTry = new HashSet<String>(challenges.keySet());
    for (String authSchemeName : new String[] { "digest", "basic" }) {
        if (authSchemesLeftToTry.remove(authSchemeName)) {
            AuthScheme authScheme = AUTH_SCHEME_REGISTRY.lookup(authSchemeName).create(null);
            ;//from   w  ww  . j a  v a  2  s.c  o  m
            BasicHeader challenge = new BasicHeader(challengeHeaderKey, challenges.get(authSchemeName));

            try {
                authScheme.processChallenge(challenge);
            } catch (MalformedChallengeException e) {
                logger.fine(e.getMessage() + " " + challenge);
                continue;
            }
            if (authScheme.isConnectionBased()) {
                logger.fine("Connection based " + authScheme);
                continue;
            }

            if (authScheme.getRealm() == null || authScheme.getRealm().length() <= 0) {
                logger.fine("Empty realm " + authScheme);
                continue;
            }

            return authScheme;
        }
    }

    for (String unsupportedSchemeName : authSchemesLeftToTry) {
        logger.fine("Unsupported http auth scheme: " + unsupportedSchemeName);
    }

    return null;
}

From source file:com.pinterest.arcee.aws.EC2HostInfoDAOImpl.java

@Override
public Set<String> getTerminatedHosts(Set<String> staleIds) throws Exception {
    HashSet<String> ids = new HashSet<>(staleIds);
    HashSet<String> terminatedHosts = new HashSet<>();
    while (!ids.isEmpty()) {
        DescribeInstancesRequest request = new DescribeInstancesRequest();
        request.setInstanceIds(ids);//from w  w  w.  j  av  a2s  .  co  m
        try {
            do {
                DescribeInstancesResult results = ec2Client.describeInstances(request);
                List<Reservation> reservations = results.getReservations();
                for (Reservation reservation : reservations) {
                    for (Instance instance : reservation.getInstances()) {
                        int stateCode = instance.getState().getCode();
                        String id = instance.getInstanceId();
                        if (stateCode == TERMINATED_CODE || stateCode == STOPPED_CODE) {
                            LOG.info(String.format("Instance %s has already been terminated or stopped.", id));
                            terminatedHosts.add(id);
                        }

                        ids.remove(id);
                    }
                }
                if (results.getNextToken() == null || results.getNextToken().isEmpty()) {
                    break;
                }
                request = new DescribeInstancesRequest();
                request.setInstanceIds(ids);
                request.setNextToken(results.getNextToken());
            } while (true);
            LOG.debug("Cannot find the following ids in AWS:", ids);
            terminatedHosts.addAll(ids);
            return terminatedHosts;
        } catch (AmazonServiceException ex) {
            Collection<String> invalidHostIds = handleInvalidInstanceId(ex);
            ids.removeAll(invalidHostIds);
            // add invalid host ids to the terminated host list.
            terminatedHosts.addAll(invalidHostIds);
        } catch (AmazonClientException ex) {
            LOG.error(String.format("Get AmazonClientException, exit with terminiatedHost %s",
                    terminatedHosts.toString()), ex);
            return terminatedHosts;
        }
    }
    return terminatedHosts;
}

From source file:org.janusgraph.diskstorage.solr.SolrIndex.java

@Override
public void mutate(Map<String, Map<String, IndexMutation>> mutations,
        KeyInformation.IndexRetriever informations, BaseTransaction tx) throws BackendException {
    logger.debug("Mutating SOLR");
    try {//ww w  .ja  v  a 2  s.co m
        for (Map.Entry<String, Map<String, IndexMutation>> stores : mutations.entrySet()) {
            String collectionName = stores.getKey();
            String keyIdField = getKeyFieldId(collectionName);

            List<String> deleteIds = new ArrayList<String>();
            Collection<SolrInputDocument> changes = new ArrayList<SolrInputDocument>();

            for (Map.Entry<String, IndexMutation> entry : stores.getValue().entrySet()) {
                String docId = entry.getKey();
                IndexMutation mutation = entry.getValue();
                Preconditions.checkArgument(!(mutation.isNew() && mutation.isDeleted()));
                Preconditions.checkArgument(!mutation.isNew() || !mutation.hasDeletions());
                Preconditions.checkArgument(!mutation.isDeleted() || !mutation.hasAdditions());

                //Handle any deletions
                if (mutation.hasDeletions()) {
                    if (mutation.isDeleted()) {
                        logger.trace("Deleting entire document {}", docId);
                        deleteIds.add(docId);
                    } else {
                        HashSet<IndexEntry> fieldDeletions = Sets.newHashSet(mutation.getDeletions());
                        if (mutation.hasAdditions()) {
                            for (IndexEntry indexEntry : mutation.getAdditions()) {
                                fieldDeletions.remove(indexEntry);
                            }
                        }
                        deleteIndividualFieldsFromIndex(collectionName, keyIdField, docId, fieldDeletions);
                    }
                }

                if (mutation.hasAdditions()) {
                    int ttl = mutation.determineTTL();

                    SolrInputDocument doc = new SolrInputDocument();
                    doc.setField(keyIdField, docId);

                    boolean isNewDoc = mutation.isNew();

                    if (isNewDoc)
                        logger.trace("Adding new document {}", docId);

                    for (IndexEntry e : mutation.getAdditions()) {
                        final Object fieldValue = convertValue(e.value);
                        doc.setField(e.field, isNewDoc ? fieldValue : new HashMap<String, Object>(1) {
                            {
                                put("set", fieldValue);
                            }
                        });
                    }
                    if (ttl > 0) {
                        Preconditions.checkArgument(isNewDoc, "Solr only supports TTL on new documents [%s]",
                                docId);
                        doc.setField(ttlField, String.format("+%dSECONDS", ttl));
                    }
                    changes.add(doc);
                }
            }

            commitDeletes(collectionName, deleteIds);
            commitDocumentChanges(collectionName, changes);
        }
    } catch (IllegalArgumentException e) {
        throw new PermanentBackendException("Unable to complete query on Solr.", e);
    } catch (Exception e) {
        throw storageException(e);
    }
}

From source file:com.thinkaurelius.titan.diskstorage.solr.Solr5Index.java

@Override
public void mutate(Map<String, Map<String, IndexMutation>> mutations,
        KeyInformation.IndexRetriever informations, BaseTransaction tx) throws BackendException {
    logger.debug("Mutating SOLR");
    try {/*from   w  w w.j a  v  a2 s  . co m*/
        for (Map.Entry<String, Map<String, IndexMutation>> stores : mutations.entrySet()) {
            String collectionName = stores.getKey();
            String keyIdField = getKeyFieldId(collectionName);

            List<String> deleteIds = new ArrayList<>();
            Collection<SolrInputDocument> changes = new ArrayList<>();

            for (Map.Entry<String, IndexMutation> entry : stores.getValue().entrySet()) {
                String docId = entry.getKey();
                IndexMutation mutation = entry.getValue();
                Preconditions.checkArgument(!(mutation.isNew() && mutation.isDeleted()));
                Preconditions.checkArgument(!mutation.isNew() || !mutation.hasDeletions());
                Preconditions.checkArgument(!mutation.isDeleted() || !mutation.hasAdditions());

                //Handle any deletions
                if (mutation.hasDeletions()) {
                    if (mutation.isDeleted()) {
                        logger.trace("Deleting entire document {}", docId);
                        deleteIds.add(docId);
                    } else {
                        HashSet<IndexEntry> fieldDeletions = Sets.newHashSet(mutation.getDeletions());
                        if (mutation.hasAdditions()) {
                            for (IndexEntry indexEntry : mutation.getAdditions()) {
                                fieldDeletions.remove(indexEntry);
                            }
                        }
                        deleteIndividualFieldsFromIndex(collectionName, keyIdField, docId, fieldDeletions);
                    }
                }

                if (mutation.hasAdditions()) {
                    int ttl = mutation.determineTTL();

                    SolrInputDocument doc = new SolrInputDocument();
                    doc.setField(keyIdField, docId);

                    boolean isNewDoc = mutation.isNew();

                    if (isNewDoc)
                        logger.trace("Adding new document {}", docId);

                    for (IndexEntry e : mutation.getAdditions()) {
                        final Object fieldValue = convertValue(e.value);
                        doc.setField(e.field, isNewDoc ? fieldValue : new HashMap<String, Object>(1) {
                            {
                                put("set", fieldValue);
                            }
                        });
                    }
                    if (ttl > 0) {
                        Preconditions.checkArgument(isNewDoc, "Solr only supports TTL on new documents [%s]",
                                docId);
                        doc.setField(ttlField, String.format("+%dSECONDS", ttl));
                    }
                    changes.add(doc);
                }
            }

            commitDeletes(collectionName, deleteIds);
            commitDocumentChanges(collectionName, changes);
        }
    } catch (Exception e) {
        throw storageException(e);
    }
}

From source file:azkaban.project.DirectoryYamlFlowLoader.java

private void addEdges(final AzkabanNode node, final AzkabanFlow azkabanFlow, final String flowName,
        final HashSet<String> recStack, final HashSet<String> visited) {
    if (!visited.contains(node.getName())) {
        recStack.add(node.getName());/*  ww  w . ja v  a2 s  . c  om*/
        visited.add(node.getName());
        final List<String> dependsOnList = node.getDependsOn();
        for (final String parent : dependsOnList) {
            final Edge edge = new Edge(parent, node.getName());
            if (!this.edgeMap.containsKey(flowName)) {
                this.edgeMap.put(flowName, new ArrayList<>());
            }
            this.edgeMap.get(flowName).add(edge);

            if (recStack.contains(parent)) {
                // Cycles found, including self cycle.
                edge.setError("Cycles found.");
                this.errors.add("Cycles found at " + edge.getId());
            } else {
                // Valid edge. Continue to process the parent node recursively.
                addEdges(azkabanFlow.getNode(parent), azkabanFlow, flowName, recStack, visited);
            }
        }
        recStack.remove(node.getName());
    }
}

From source file:com.android.mail.ui.AnimatedAdapter.java

private void updateAnimatingConversationItems(Object obj, HashSet<Long> items) {
    if (!items.isEmpty()) {
        if (obj instanceof ConversationItemView) {
            final ConversationItemView target = (ConversationItemView) obj;
            final long id = target.getConversation().id;
            items.remove(id);
            mAnimatingViews.remove(id);/*from   www.  ja v a2  s.  c o m*/
            if (items.isEmpty()) {
                performAndSetNextAction(null);
                notifyDataSetChanged();
            }
        }
    }
}

From source file:com.google.dart.tools.core.DartCore.java

/**
 * Return the list of known Dart-like file extensions. Dart-like extensions are defined in the
 * {@link Platform.getContentManager() content type manager} for the
 * {@link #DART_SOURCE_CONTENT_TYPE}. Note that a Dart-like extension does not include the leading
 * dot, and that the "dart" extension is always defined as a Dart-like extension.
 * /*from w ww .j a  va 2 s.com*/
 * @return the list of known Dart-like file extensions
 */
public static String[] getDartLikeExtensions() {
    IContentType dartContentType = Platform.getContentTypeManager().getContentType(DART_SOURCE_CONTENT_TYPE);
    HashSet<String> extensionSet = new HashSet<String>();
    for (IContentType contentType : Platform.getContentTypeManager().getAllContentTypes()) {
        if (contentType.isKindOf(dartContentType)) {
            for (String extension : contentType.getFileSpecs(IContentType.FILE_EXTENSION_SPEC)) {
                extensionSet.add(extension);
            }
        }
    }
    extensionSet.remove(Extensions.DART);
    ArrayList<String> extensionList = new ArrayList<String>(extensionSet);
    extensionList.add(0, Extensions.DART);
    return extensionList.toArray(new String[extensionList.size()]);
}

From source file:com.linkedin.databus.core.TestDbusEventBufferPersistence.java

@Test
public void testMetaFileCloseMult() throws Exception {
    int maxEventBufferSize = 1144;
    int maxIndividualBufferSize = 500;
    int bufNum = maxEventBufferSize / maxIndividualBufferSize;
    if (maxEventBufferSize % maxIndividualBufferSize > 0)
        bufNum++;//  w  ww  .  j  av  a2s . c o  m

    DbusEventBuffer.StaticConfig config = getConfig(maxEventBufferSize, maxIndividualBufferSize, 100, 500,
            AllocationPolicy.MMAPPED_MEMORY, _mmapDirStr, true);

    // create buffer mult
    DbusEventBufferMult bufMult = createBufferMult(config);

    // Save all the files and validate the meta files.
    bufMult.close();
    for (DbusEventBuffer dbusBuf : bufMult.bufIterable()) {
        File metaFile = new File(_mmapDir, dbusBuf.metaFileName());
        // check that we don't have the files
        Assert.assertTrue(metaFile.exists());
        validateFiles(metaFile, bufNum);
    }
    File[] entries = _mmapDir.listFiles();

    // When we create a new multi-buffer, we should get renamed files as well as new files.
    bufMult = createBufferMult(config);
    entries = _mmapDir.listFiles(); // Has session dirs and renamed meta files.
    // Create an info file for one buffer.
    DbusEventBuffer buf = bufMult.bufIterable().iterator().next();
    buf.saveBufferMetaInfo(true);
    File infoFile = new File(_mmapDir, buf.metaFileName() + ".info");
    Assert.assertTrue(infoFile.exists());

    // Create a session directory that has one file in it.
    File badSes1 = new File(_mmapDir, DbusEventBuffer.getSessionPrefix() + "m");
    badSes1.mkdir();
    badSes1.deleteOnExit();
    File junkFile = new File(badSes1.getAbsolutePath() + "/junkFile");
    junkFile.createNewFile();
    junkFile.deleteOnExit();
    // Create a directory that is empty
    File badSes2 = new File(_mmapDir, DbusEventBuffer.getSessionPrefix() + "n");
    badSes2.mkdir();
    badSes2.deleteOnExit();

    // Create a good file under mmap directory that we don't want to see removed.
    final String goodFile = "GoodFile";
    File gf = new File(_mmapDir, goodFile);
    gf.createNewFile();

    // Now close the multibuf, and see that the new files are still there.
    // We should have deleted the unused sessions and info files.
    bufMult.close();

    HashSet<String> validEntries = new HashSet<String>(bufNum);
    for (DbusEventBuffer dbusBuf : bufMult.bufIterable()) {
        File metaFile = new File(_mmapDir, dbusBuf.metaFileName());
        // check that we don't have the files
        Assert.assertTrue(metaFile.exists());
        validateFiles(metaFile, bufNum);
        validEntries.add(metaFile.getName());
        DbusEventBufferMetaInfo mi = new DbusEventBufferMetaInfo(metaFile);
        mi.loadMetaInfo();
        validEntries.add(mi.getSessionId());
    }

    validEntries.add(goodFile);

    // Now we should be left with meta files, and session dirs and nothing else.
    entries = _mmapDir.listFiles();
    for (File f : entries) {
        Assert.assertTrue(validEntries.contains(f.getName()));
        validEntries.remove(f.getName());
    }
    Assert.assertTrue(validEntries.isEmpty());

    // And everything else should have moved to the .BAK directory
    entries = _mmapBakDir.listFiles();
    HashMap<String, File> fileHashMap = new HashMap<String, File>(entries.length);
    for (File f : entries) {
        fileHashMap.put(f.getName(), f);
    }

    Assert.assertTrue(fileHashMap.containsKey(badSes1.getName()));
    Assert.assertTrue(fileHashMap.get(badSes1.getName()).isDirectory());
    Assert.assertEquals(fileHashMap.get(badSes1.getName()).listFiles().length, 1);
    Assert.assertEquals(fileHashMap.get(badSes1.getName()).listFiles()[0].getName(), junkFile.getName());
    fileHashMap.remove(badSes1.getName());

    Assert.assertTrue(fileHashMap.containsKey(badSes2.getName()));
    Assert.assertTrue(fileHashMap.get(badSes2.getName()).isDirectory());
    Assert.assertEquals(fileHashMap.get(badSes2.getName()).listFiles().length, 0);
    fileHashMap.remove(badSes2.getName());

    // We should have the renamed meta files in the hash now.
    for (File f : entries) {
        if (f.getName().startsWith(DbusEventBuffer.getMmapMetaInfoFileNamePrefix())) {
            Assert.assertTrue(fileHashMap.containsKey(f.getName()));
            Assert.assertTrue(f.isFile());
            fileHashMap.remove(f.getName());
        }
    }

    Assert.assertTrue(fileHashMap.isEmpty());

    // One more test to make sure we create the BAK directory dynamically if it does not exist.
    FileUtils.deleteDirectory(_mmapBakDir);
    bufMult = createBufferMult(config);
    entries = _mmapDir.listFiles();
    // Create an info file for one buffer.
    buf = bufMult.bufIterable().iterator().next();
    buf.saveBufferMetaInfo(true);
    infoFile = new File(_mmapDir, buf.metaFileName() + ".info");
    Assert.assertTrue(infoFile.exists());
    bufMult.close();
    entries = _mmapBakDir.listFiles();
    fileHashMap = new HashMap<String, File>(entries.length);
    for (File f : entries) {
        fileHashMap.put(f.getName(), f);
    }
    Assert.assertTrue(fileHashMap.containsKey(infoFile.getName()));
    Assert.assertTrue(fileHashMap.get(infoFile.getName()).isFile());
}

From source file:org.apache.ambari.server.stack.StackManagerTest.java

@Test
public void testStackVersionInheritance_includeAllServices() {
    StackInfo stack = stackManager.getStack("HDP", "2.1.1");
    assertNotNull(stack);/* w w w . ja v a 2s  . c  o m*/
    assertEquals("HDP", stack.getName());
    assertEquals("2.1.1", stack.getVersion());
    Collection<ServiceInfo> services = stack.getServices();

    //should include all stacks in hierarchy
    assertEquals(16, services.size());
    HashSet<String> expectedServices = new HashSet<String>();
    expectedServices.add("GANGLIA");
    expectedServices.add("HBASE");
    expectedServices.add("HCATALOG");
    expectedServices.add("HDFS");
    expectedServices.add("HIVE");
    expectedServices.add("MAPREDUCE2");
    expectedServices.add("OOZIE");
    expectedServices.add("PIG");
    expectedServices.add("SQOOP");
    expectedServices.add("YARN");
    expectedServices.add("ZOOKEEPER");
    expectedServices.add("STORM");
    expectedServices.add("FLUME");
    expectedServices.add("FAKENAGIOS");
    expectedServices.add("TEZ");
    expectedServices.add("AMBARI_METRICS");

    ServiceInfo pigService = null;
    for (ServiceInfo service : services) {
        if (service.getName().equals("PIG")) {
            pigService = service;
        }
        assertTrue(expectedServices.remove(service.getName()));
    }
    assertTrue(expectedServices.isEmpty());

    // extended values
    assertNotNull(pigService);
    assertEquals("0.12.1.2.1.1", pigService.getVersion());
    assertEquals("Scripting platform for analyzing large datasets (Extended)", pigService.getComment());
    //base value
    ServiceInfo basePigService = stackManager.getStack("HDP", "2.0.5").getService("PIG");
    assertEquals("0.11.1.2.0.5.0", basePigService.getVersion());
    assertEquals(1, basePigService.getComponents().size());
    // new component added in extended version
    assertEquals(2, pigService.getComponents().size());
    // no properties in base service
    assertEquals(0, basePigService.getProperties().size());
    assertEquals(1, pigService.getProperties().size());
    assertEquals("content", pigService.getProperties().get(0).getName());
}