List of usage examples for java.util HashSet remove
public boolean remove(Object o)
From source file:mase.spec.SafeHybridExchanger.java
@Override protected void mergeProcess(EvolutionState state) { super.mergeProcess(state); // add foreign individuals to the populations outside the stability threshold for (MetaPopulation mp : metaPops) { if (mp.age >= stabilityTime) { HashSet<MetaPopulation> current = new HashSet<MetaPopulation>(); for (MetaPopulation mpf : metaPops) { if (mp != mpf && mpf.age >= stabilityTime) { current.add(mpf);/*from ww w . j a v a 2 s .c om*/ } } Iterator<Foreign> iter = mp.foreigns.iterator(); while (iter.hasNext()) { Foreign next = iter.next(); if (current.contains(next.origin)) { current.remove(next.origin); } else { iter.remove(); } } for (MetaPopulation mpf : current) { mp.foreigns.add(new Foreign(mpf)); } } } }
From source file:com.nttec.everychan.ui.presentation.Subscriptions.java
/** * , ? ? ?, ? {@link #detectOwnPost(String, String, String, String)}, * ? ?, ? ? (?)/* w w w. j a v a 2s . c o m*/ * @param page ? * @param startPostIndex ? ( ?) ?, ? ? ?? ? */ @SuppressWarnings("unchecked") public void checkOwnPost(SerializablePage page, int startPostIndex) { if (page.pageModel == null || page.pageModel.type != UrlPageModel.TYPE_THREADPAGE || page.posts == null) return; String chan = page.pageModel.chanName; String board = page.pageModel.boardName; String thread = page.pageModel.threadNumber; Object[] tuple = waitingOwnPost; if (tuple != null && tuple[0].equals(chan) && tuple[1].equals(board) && tuple[2].equals(thread)) { waitingOwnPost = null; int postCount = page.posts.length - startPostIndex; if (postCount <= 1) { if (postCount == 1 && page.posts[startPostIndex] != null) addSubscription(chan, board, thread, page.posts[startPostIndex].number); return; } List<int[]> result = new ArrayList<>(postCount); List<String> waitingWords = (List<String>) tuple[3]; for (int i = startPostIndex; i < page.posts.length; ++i) { if (page.posts[i] == null || page.posts[i].comment == null) continue; HashSet<String> postWords = new HashSet<>(commentToWordsList(htmlToComment(page.posts[i].comment))); //Logger.d(TAG, "checking post i=" + i + "\ncomment: " + page.posts[i].comment+"\nwords:" + postWords); int wordsCount = 0; for (String waitingWord : waitingWords) if (postWords.remove(waitingWord)) ++wordsCount; result.add(new int[] { i, wordsCount, postWords.size() }); //Logger.d(TAG, "result: overlap=" + wordsCount + "; remained=" + postWords.size()); } if (result.size() == 0) return; Collections.sort(result, new Comparator<int[]>() { @Override public int compare(int[] lhs, int[] rhs) { int result = compareInt(rhs[1], lhs[1]); if (result == 0) result = compareInt(lhs[2], rhs[2]); if (result == 0) result = compareInt(lhs[0], rhs[0]); return result; } private int compareInt(int lhs, int rhs) { return lhs < rhs ? -1 : (lhs == rhs ? 0 : 1); } }); //for (int[] entry : result) Logger.d(TAG, "[" + entry[0] + ";" + entry[1] + ";" + entry[2] + "]"); addSubscription(chan, board, thread, page.posts[result.get(0)[0]].number); } }
From source file:com.predic8.membrane.annot.SpringConfigurationXSDGeneratingAnnotationProcessor.java
private Set<? extends Element> getCachedElementsAnnotatedWith(RoundEnvironment roundEnv, Class<? extends Annotation> annotation) { //FileObject o = processingEnv.getFiler().createResource(StandardLocation.CLASS_OUTPUT, "META-INF", "membrane.cache"); if (cache == null) read();//from w w w . j a v a 2s. co m HashSet<Element> result = cache.get(annotation); if (result == null) { // update cache cache.put(annotation, result = new HashSet<Element>(roundEnv.getElementsAnnotatedWith(annotation))); } else { for (Element e : roundEnv.getElementsAnnotatedWith(annotation)) { result.remove(e); result.add(e); } } return result; }
From source file:org.sipfoundry.sipxconfig.phone.polycom.CodecGroupsTest.java
private void assertCodecGroup(CodecGroupType codecGroup, DeviceVersion version) throws DocumentException { // Initialize a phone with the codec group under test. PolycomModel model = new PolycomModel(); Set<String> features = new HashSet<String>(); features.add(String.format("%s_CodecPref", codecGroup)); model.setSupportedFeatures(features); PolycomPhone phone = new PolycomPhone(); phone.setModel(model);/*ww w. j a va2 s . c om*/ phone.setDeviceVersion(version); PhoneTestDriver.supplyTestData(phone, new LinkedList<User>()); // The adaptor setting for the multi-enum setting. Setting codec_adaptor = phone.getSettings().getSetting("voice/codecPref/" + codecGroup); assertNotNull(String.format("Failed to get the '%s' codec group Setting.", codecGroup), codec_adaptor); // The actual multi-enum codec options setting type. MultiEnumSetting codec_type = (MultiEnumSetting) codec_adaptor.getType(); Collection<String> actual_options = codec_type.getEnums().values(); // Subsequent assert messages will be inaccurate if the real problem is duplicate entries. assertEquals(String.format("The '%s' codec group contains one or more duplicate entries.", codecGroup), (new HashSet<String>(actual_options)).size(), actual_options.size()); // *** 1. Test the set of available codec options. *** // The *expected* codec options. HashSet<String> expected_options = new HashSet<String>(CODECGROUP_OPTION_MAP.get(codecGroup)); // Loop though each *actual* option that the setting is offering. HashSet<String> unexpected_actual_options = new HashSet<String>(); for (String actual_option : actual_options) { // Attempt to remove this *actual* option from the *expected* list, but record it if it // was not actually found. if (!expected_options.remove(actual_option)) { unexpected_actual_options.add(actual_option); } } // Were any of the expected options not actually found? String message = String.format("The following '%s' codec group is missing the following options: %s.", codecGroup, expected_options); assertTrue(message, expected_options.isEmpty()); // Were any of the actual options unexpected? message = String.format("The following '%s' codec group options were not expected: %s.", codecGroup, unexpected_actual_options); assertTrue(message, unexpected_actual_options.isEmpty()); // *** 2. Test the set and preference order of default selected codecs. *** String assumed_separator = "|"; String actual_selected = trimToNull( join((Collection<String>) codec_adaptor.getTypedValue(), assumed_separator)); String expected_selected = trimToNull(join(CODECGROUP_SELECTED_MAP.get(codecGroup), assumed_separator)); assertEquals(String.format("The '%s' codec group's default selected codecs are incorrect.", codecGroup), expected_selected, actual_selected); }
From source file:org.entrystore.rest.util.jdil.JDILParser.java
/** * Checks whether a JDIL object <code>jdil</code> contains loops or any "@id" value which also * occurs in <code>ancestors</code>. * // www.ja v a2 s .com * @param jdil any JSONObject which is expanded in the way described in expandJDILObject * @param ancestors any string set * @return true if any object in <code>jdil</code> has a descendant object with the same "@id" * value as itself or if any object in <code>jdil</code> has an "@id" value which occurs in * <code>ancestors</code>, false otherwise. */ private static Boolean hasLoops(JSONObject jdil, HashSet<String> ancestors) { Object tmpObj = null; String id = null; Boolean hasLoop = false; if (jdil.has("@id")) { tmpObj = jdil.opt("@id"); if (tmpObj instanceof String) { id = (String) tmpObj; if (ancestors.contains(id)) { return true; } else { ancestors.add(id); } } } /* Recurse */ Iterator keyIt = jdil.keys(); while (keyIt.hasNext()) { String key = (String) keyIt.next(); tmpObj = jdil.opt(key); if (tmpObj instanceof JSONObject) { if (hasLoops((JSONObject) tmpObj, ancestors)) { hasLoop = true; break; } } else if (tmpObj instanceof JSONArray) { if (hasLoops((JSONArray) tmpObj, ancestors)) { hasLoop = true; break; } } } /* If we added the @id of this object - remove it now */ /* We know that this @id was not in ancestors before so we can safely remove it */ if (id != null) { ancestors.remove(id); } return hasLoop; }
From source file:org.calrissian.accumulorecipes.commons.iterators.support.QueryEvaluator.java
/** * Evaluates the criteria against an event. * * @param eventFields//from w w w .jav a 2 s . c o m */ public boolean evaluate(Key topKey, EventFields eventFields) { this.modifiedQuery = null; boolean rewritten = false; // Copy the criteria StringBuilder q = new StringBuilder(query); // Copy the literals, we are going to remove elements from this set // when they are added to the JEXL context. This will allow us to // determine which items in the criteria where *NOT* in the data. HashSet<String> literalsCopy = new HashSet<String>(literals); // Loop through the event fields and add them to the JexlContext. for (Entry<String, Set<FieldValue>> field : eventFields.asMap().entrySet()) { String fName = normalizeKey(topKey, field.getKey()); fName = removeInvalidChars(fName); // If this field is not part of the expression, then skip it. if (!literals.contains(fName)) { continue; } else { literalsCopy.remove(fName); } // This field may have multiple values. if (field.getValue().size() == 0) { continue; } else if (field.getValue().size() == 1) { ctx.set(fName, new String(field.getValue().iterator().next().getValue())); } else { q = rewriteQuery(q, fName, field.getValue()); rewritten = true; } // End of if } // End of loop // For any literals in the criteria that were not found in the data, add them to the context // with a null value. for (String lit : literalsCopy) { ctx.set(lit, null); } if (log.isDebugEnabled()) { log.debug("Evaluating criteria: " + q.toString()); } this.modifiedQuery = q.toString(); Boolean result = null; if (rewritten) { Script script = engine.createScript(this.modifiedQuery); try { result = (Boolean) script.execute(ctx); } catch (Exception e) { log.error("Error evaluating script: " + this.modifiedQuery + " against event" + eventFields.toString(), e); } } else { Expression expr = engine.createExpression(this.modifiedQuery); try { result = (Boolean) expr.evaluate(ctx); } catch (Exception e) { log.error("Error evaluating expression: " + this.modifiedQuery + " against event" + eventFields.toString(), e); } } if (null != result && result) { return true; } else { return false; } }
From source file:com.illustrationfinder.process.post.HtmlPostProcessor.java
@Override public List<String> generateKeywords() { // TODO If two words are always close to each other, they should be considered as an expression and managed like one word if (this.url == null) return null; try {/*from w w w .j a v a2s .c o m*/ // Retrieve the document and store it temporary try (final InputStream stream = this.url.openStream()) { final String rawText = IOUtils.toString(stream); // Retrieve useful HTML data final Document document = Jsoup.parse(rawText); String htmlTitle = document.title(); String htmlKeywords = document.select("meta[name=keywords]").text(); String htmlDescription = document.select("meta[name=description]").text(); // Extract the content of the raw text String content = ArticleExtractor.getInstance().getText(rawText); // Now we apply a simple algorithm to get keywords // 1) We remove all punctuation marks from the title // 2) We remove all words with less than 4 characters // 3) We remove excessive spacing and tabulations htmlTitle = htmlTitle.toLowerCase(); htmlTitle = htmlTitle.replaceAll(PUNCTUATION_REGEX, ""); htmlTitle = htmlTitle.replaceAll(WORD_WITH_LESS_THAN_4_CHARACTERS_REGEX, ""); htmlTitle = htmlTitle.replaceAll(EXCESSIVE_SPACING_REGEX, " "); final List<String> keywords = new ArrayList<>(); final List<String> keywordsList = Arrays.asList(htmlTitle.split(" ")); for (String tmp : keywordsList) { if (tmp.length() >= MINIMUM_WORD_LENGTH) { keywords.add(tmp); } } // If there is enough keywords, we return if (keywords.size() >= MINIMUM_KEYWORDS_COUNT) { return keywords; } else { // Otherwise, we look for more keywords from the text by taking the more frequent words content = content.toLowerCase(); content = content.replaceAll(PUNCTUATION_REGEX, ""); content = content.replaceAll(WORD_WITH_LESS_THAN_4_CHARACTERS_REGEX, ""); content = content.replaceAll(EXCESSIVE_SPACING_REGEX, " "); final Map<String, Integer> frequencies = new HashMap<>(); final String[] words = content.split(" "); // Count word frequencies for (final String word : words) { if (frequencies.containsKey(word)) { frequencies.put(word, frequencies.get(word) + 1); } else { frequencies.put(word, 1); } } // Sort the words per frequency final SortedMap<Integer, HashSet<String>> sortedWords = new TreeMap<>(); for (Map.Entry<String, Integer> entry : frequencies.entrySet()) { if (sortedWords.containsKey(entry.getValue())) { sortedWords.get(entry.getValue()).add(entry.getKey()); } else { final HashSet<String> set = new HashSet<>(); set.add(entry.getKey()); sortedWords.put(entry.getValue(), set); } } // Add the most frequent words until we reach the minimu keywords count while (keywords.size() < MINIMUM_KEYWORDS_COUNT) { final HashSet<String> set = sortedWords.get(sortedWords.lastKey()); final String keyword = set.iterator().next(); set.remove(keyword); if (set.size() == 0) { sortedWords.remove(sortedWords.lastKey()); } if (keyword.length() > MINIMUM_WORD_LENGTH) { keywords.add(keyword); } } return keywords; } } } catch (BoilerpipeProcessingException e) { // TODO e.printStackTrace(); } catch (IOException e) { // TODO e.printStackTrace(); } return null; }
From source file:com.android.talkback.eventprocessor.ProcessorScreen.java
private void updateWindowTitlesMap(AccessibilityEvent event) { switch (event.getEventType()) { case AccessibilityEvent.TYPE_WINDOW_STATE_CHANGED: { // If split screen mode is NOT available, we only need to care single window. if (!mIsSplitScreenModeAvailable) { mWindowTitlesMap.clear();//from w w w .ja v a 2 s. c o m } int windowId = getWindowId(event); boolean shouldAnnounceEvent = shouldAnnounceEvent(event, windowId); CharSequence title = getWindowTitleFromEvent(event, shouldAnnounceEvent /* useContentDescription */); if (title != null) { if (shouldAnnounceEvent) { // When software keyboard is shown or hidden, TYPE_WINDOW_STATE_CHANGED // is dispatched with text describing the visibility of the keyboard. speakWithFeedback(title); } else { mWindowTitlesMap.put(windowId, title); if (getWindowType(event) == AccessibilityWindowInfo.TYPE_SYSTEM) { mSystemWindowIdsSet.add(windowId); } CharSequence eventWindowClassName = event.getClassName(); mWindowToClassName.put(windowId, eventWindowClassName); mWindowToPackageName.put(windowId, event.getPackageName()); } } } break; case AccessibilityEvent.TYPE_WINDOWS_CHANGED: { HashSet<Integer> windowIdsToBeRemoved = new HashSet<Integer>(mWindowTitlesMap.keySet()); List<AccessibilityWindowInfo> windows = mService.getWindows(); for (AccessibilityWindowInfo window : windows) { windowIdsToBeRemoved.remove(window.getId()); } for (Integer windowId : windowIdsToBeRemoved) { mWindowTitlesMap.remove(windowId); mSystemWindowIdsSet.remove(windowId); mWindowToClassName.remove(windowId); mWindowToPackageName.remove(windowId); } } break; } }
From source file:importer.handler.post.stages.Splitter.java
/** * Percolate the versions accumulated in root to suitable sub-elements * @param elem the start node with its versions to percolate *///from w ww .java2s . c om private void percolateDown(Element elem) { Node parent = elem.getParentNode(); if (parent != null && parent.getNodeType() == Node.ELEMENT_NODE) { System.out.println(elem.getNodeName()); String vers = ((Element) parent).getAttribute(VERSIONS); if (vers != null && vers.length() > 0) { if (!discriminator.isSibling(elem)) { Discriminator.addVersion(elem, vers); addDoneTag(elem); } else if (elem.hasAttribute(FINAL)) { String fVers = elem.getAttribute(FINAL); if (fVers != null && fVers.length() > 0) { // find inverse versions HashSet<String> invVers = new HashSet<String>(); String[] parts = vers.split(" "); String[] iparts = fVers.split(" "); for (int i = 0; i < parts.length; i++) if ( /*!parts[i].startsWith(DEL) &&*/ !parts[i].equals(BASE)) invVers.add(parts[i]); for (int i = 0; i < iparts.length; i++) if (invVers.contains(iparts[i])) invVers.remove(iparts[i]); String newVers = hashsetToString(invVers); Discriminator.addVersion(elem, newVers); addDoneTag(elem); Element lastOChild = discriminator.lastOpenChild(elem); while (lastOChild != null) { Discriminator.addVersion(lastOChild, newVers); lastOChild = discriminator.lastOpenChild(lastOChild); } } } // else ignore it } } // now examine the children of elem Element child = Discriminator.firstChild(elem); while (child != null && !isDone(child)) { percolateDown(child); child = Discriminator.firstChild(child); } // finall the siblings of elem Element brother = Discriminator.nextSibling(elem, true); while (brother != null) { if (!isDone(brother)) percolateDown(brother); brother = Discriminator.nextSibling(brother, true); } }
From source file:com.ikanow.aleph2.data_import.services.TestHarvestContext.java
@Test public void test_produceConsume() throws JsonProcessingException, IOException, InterruptedException { _logger.info("running test_produceConsume"); final DataBucketBean bucket = BeanTemplateUtils.build(DataBucketBean.class) .with(DataBucketBean::full_name, "/TEST/HARVEST/CONTEXT") .with(DataBucketBean::master_enrichment_type, MasterEnrichmentType.streaming).done().get(); final ObjectMapper mapper = BeanTemplateUtils.configureMapper(Optional.empty()); final HarvestContext test_context = _app_injector.getInstance(HarvestContext.class); assertEquals(Optional.empty(), test_context.getBucket()); String message1 = "{\"key\":\"val\"}"; String message2 = "{\"key\":\"val2\"}"; String message3 = "{\"key\":\"val3\"}"; String message4 = "{\"key\":\"val4\"}"; Map<String, Object> msg3 = ImmutableMap.<String, Object>builder().put("key", "val3").build(); Map<String, Object> msg4 = ImmutableMap.<String, Object>builder().put("key", "val4").build(); //currently mock cds produce does nothing try {// www . jav a2s .c o m test_context.sendObjectToStreamingPipeline(Optional.empty(), Either.left(mapper.readTree(message1))); fail("Should fail, bucket not set and not specified"); } catch (Exception e) { } test_context.setBucket(bucket); assertEquals(bucket, test_context.getBucket().get()); Iterator<String> iter = test_context._distributed_services.consumeAs( BucketUtils.getUniqueSignature("/TEST/HARVEST/CONTEXT", Optional.empty()), Optional.empty(), Optional.empty()); test_context.sendObjectToStreamingPipeline(Optional.empty(), Either.left(mapper.readTree(message1))); test_context.sendObjectToStreamingPipeline(Optional.of(bucket), Either.left(mapper.readTree(message2))); test_context.sendObjectToStreamingPipeline(Optional.empty(), Either.right(msg3)); test_context.sendObjectToStreamingPipeline(Optional.of(bucket), Either.right(msg4)); Thread.sleep(5000); //wait a few seconds for producers to dump batch final HashSet<String> mutable_set = new HashSet<>(Arrays.asList(message1, message2, message3, message4)); //nothing will be in consume long count = 0; while (iter.hasNext()) { String msg = iter.next(); assertTrue("Sent this message: " + msg, mutable_set.remove(msg)); count++; } assertEquals(4, count); }