List of usage examples for java.util Set clear
void clear();
From source file:com.cloudera.impala.service.ZooKeeperSession.java
/** * Updates the worker or planner membership, including calling into the BE. *//* w w w. j a va 2 s .c o m*/ private void updateMembership(TMembershipUpdateType type, String member, boolean planner) throws InternalException { PathChildrenCache membership = planner ? plannerMembership_ : workerMembership_; Set<String> members = planner ? planners_ : workers_; synchronized (members) { TMembershipUpdate update = new TMembershipUpdate(planner, type, new ArrayList<String>()); switch (type) { case ADD: Preconditions.checkNotNull(member); members.add(member); update.membership.add(member); break; case REMOVE: Preconditions.checkNotNull(member); members.remove(member); update.membership.add(member); break; case FULL_LIST: Preconditions.checkState(member == null); members.clear(); for (ChildData d : membership.getCurrentData()) { members.add(d.getPath()); } update.membership.addAll(members); break; default: Preconditions.checkState(false); } FeSupport.UpdateMembership(update); } }
From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.java
public Set<String> getAccessibleNodeLabels(String queue) { String accessibleLabelStr = get(getQueuePrefix(queue) + ACCESSIBLE_NODE_LABELS); // When accessible-label is null, if (accessibleLabelStr == null) { // Only return null when queue is not ROOT if (!queue.equals(ROOT)) { return null; }//w w w . j av a 2 s . co m } else { // print a warning when accessibleNodeLabel specified in config and queue // is ROOT if (queue.equals(ROOT)) { LOG.warn("Accessible node labels for root queue will be ignored," + " it will be automatically set to \"*\"."); } } // always return ANY for queue root if (queue.equals(ROOT)) { return ImmutableSet.of(RMNodeLabelsManager.ANY); } // In other cases, split the accessibleLabelStr by "," Set<String> set = new HashSet<String>(); for (String str : accessibleLabelStr.split(",")) { if (!str.trim().isEmpty()) { set.add(str.trim()); } } // if labels contains "*", only keep ANY behind if (set.contains(RMNodeLabelsManager.ANY)) { set.clear(); set.add(RMNodeLabelsManager.ANY); } return Collections.unmodifiableSet(set); }
From source file:com.alibaba.wasp.master.EntityGroupStates.java
/** * A server is offline, all entityGroups on it are dead. *//* w w w . j av a 2 s .co m*/ public synchronized List<EntityGroupState> serverOffline(final ServerName sn) { // Clean up this server from map of servers to entityGroups, and remove all // entityGroups // of this server from online map of entityGroups. List<EntityGroupState> rits = new ArrayList<EntityGroupState>(); Set<EntityGroupInfo> assignedEntityGroups = serverHoldings.get(sn); if (assignedEntityGroups == null || assignedEntityGroups.isEmpty()) { // No entityGroups on this server, we are done, return empty list of RITs return rits; } for (EntityGroupInfo entityGroup : assignedEntityGroups) { entityGroupAssignments.remove(entityGroup); } // See if any of the entityGroups that were online on this server were in // RIT // If they are, normal timeouts will deal with them appropriately so // let's skip a manual re-assignment. for (EntityGroupState state : entityGroupsInTransition.values()) { if (assignedEntityGroups.contains(state.getEntityGroup())) { rits.add(state); } } assignedEntityGroups.clear(); this.notifyAll(); return rits; }
From source file:org.apache.geode.internal.cache.IncrementalBackupDUnitTest.java
/** * Successful if a member performs a full backup when its backup data is not present in the * baseline (for whatever reason). This also tests what happens when a member is offline during * the baseline backup.// www . j a v a2 s .c o m * * The test is regarded as successful when all of the missing members oplog files are backed up * during an incremental backup. This means that the member peformed a full backup because its * oplogs were missing in the baseline. */ @Test public void testMissingMemberInBaseline() throws Exception { // Simulate the missing member by forcing a persistent member // to go offline. final PersistentID missingMember = disconnect(Host.getHost(0).getVM(0), Host.getHost(0).getVM(1)); /* * Perform baseline and make sure that the list of offline disk stores contains our missing * member. */ BackupStatus baselineStatus = performBaseline(); assertBackupStatus(baselineStatus); assertNotNull(baselineStatus.getOfflineDiskStores()); assertEquals(2, baselineStatus.getOfflineDiskStores().size()); // Find all of the member's oplogs in the missing member's diskstore directory structure // (*.crf,*.krf,*.drf) Collection<File> missingMemberOplogFiles = FileUtils.listFiles(new File(missingMember.getDirectory()), new RegexFileFilter(OPLOG_REGEX), DirectoryFileFilter.DIRECTORY); assertFalse(missingMemberOplogFiles.isEmpty()); /* * Restart our missing member and make sure it is back online and part of the distributed system */ openCache(Host.getHost(0).getVM(0)); /* * After reconnecting make sure the other members agree that the missing member is back online. */ final Set<PersistentID> missingMembers = new HashSet<>(); Wait.waitForCriterion(new WaitCriterion() { @Override public boolean done() { missingMembers.clear(); missingMembers.addAll(getMissingMembers(Host.getHost(0).getVM(1))); return !missingMembers.contains(missingMember); } @Override public String description() { return "[testMissingMemberInBasline] Wait for missing member."; } }, 10000, 500, false); assertEquals(0, missingMembers.size()); /* * Peform incremental and make sure we have no offline disk stores. */ BackupStatus incrementalStatus = performIncremental(); assertBackupStatus(incrementalStatus); assertNotNull(incrementalStatus.getOfflineDiskStores()); assertEquals(0, incrementalStatus.getOfflineDiskStores().size()); // Get the missing member's member id which is different from the PersistentID String memberId = getMemberId(Host.getHost(0).getVM(0)); assertNotNull(memberId); // Get list of backed up oplog files in the incremental backup for the missing member File incrementalMemberDir = getBackupDirForMember(getIncrementalDir(), memberId); Collection<File> backupOplogFiles = FileUtils.listFiles(incrementalMemberDir, new RegexFileFilter(OPLOG_REGEX), DirectoryFileFilter.DIRECTORY); assertFalse(backupOplogFiles.isEmpty()); // Transform missing member oplogs to just their file names. List<String> missingMemberOplogNames = new LinkedList<>(); TransformUtils.transform(missingMemberOplogFiles, missingMemberOplogNames, TransformUtils.fileNameTransformer); // Transform missing member's incremental backup oplogs to just their file names. List<String> backupOplogNames = new LinkedList<>(); TransformUtils.transform(backupOplogFiles, backupOplogNames, TransformUtils.fileNameTransformer); /* * Make sure that the incremental backup for the missing member contains all of the operation * logs for that member. This proves that a full backup was performed for that member. */ assertTrue(backupOplogNames.containsAll(missingMemberOplogNames)); }
From source file:org.apache.hadoop.hdfs.server.namenode.StandbySafeMode.java
private void removeOutstandingDatanodesInternal(Set<DatanodeID> nodes, boolean logOutStandingOnly) throws IOException { synchronized (nodes) { for (DatanodeID node : nodes) { if (logOutStandingOnly) { LOG.info("Failover - outstanding node: " + node + " - node is not removed (fast failover)"); } else { try { LOG.info("Failover - removing outstanding node: " + node); namesystem.removeDatanode(node); setDatanodeDead(node); } catch (Exception e) { LOG.warn("Failover - caught exception when removing outstanding datanode " + node, e); }//from w w w . j av a2 s .co m } } if (!logOutStandingOnly) { nodes.clear(); } } }
From source file:org.apache.asterix.optimizer.rules.subplan.InlineAllNtsInSubplanVisitor.java
private ILogicalOperator visitMultiInputOperator(ILogicalOperator op) throws AlgebricksException { orderingExprs.clear();//from ww w . j a v a 2s . c o m Set<LogicalVariable> keyVarsForCurrentBranch = new HashSet<LogicalVariable>(); for (int i = op.getInputs().size() - 1; i >= 0; --i) { // Stores key variables for the previous branch. keyVarsForCurrentBranch.addAll(correlatedKeyVars); correlatedKeyVars.clear(); // Deals with single input operators. ILogicalOperator newChild = op.getInputs().get(i).getValue().accept(this, null); op.getInputs().get(i).setValue(newChild); if (correlatedKeyVars.isEmpty()) { correlatedKeyVars.addAll(keyVarsForCurrentBranch); } keyVarsForCurrentBranch.clear(); } subtituteVariables(op); return op; }
From source file:eu.stratosphere.pact.runtime.task.DataSinkTaskTest.java
@Test @SuppressWarnings("unchecked") public void testSortingDataSinkTask() { int keyCnt = 100; int valCnt = 20; super.initEnvironment(MEMORY_MANAGER_SIZE * 4, NETWORK_BUFFER_SIZE); super.addInput(new UniformRecordGenerator(keyCnt, valCnt, true), 0); DataSinkTask<Record> testTask = new DataSinkTask<Record>(); // set sorting super.getTaskConfig().setInputLocalStrategy(0, LocalStrategy.SORT); super.getTaskConfig().setInputComparator(new RecordComparatorFactory(new int[] { 1 }, ((Class<? extends Key<?>>[]) new Class[] { IntValue.class })), 0); super.getTaskConfig().setMemoryInput(0, 4 * 1024 * 1024); super.getTaskConfig().setFilehandlesInput(0, 8); super.getTaskConfig().setSpillingThresholdInput(0, 0.8f); super.registerFileOutputTask(testTask, MockOutputFormat.class, new File(tempTestPath).toURI().toString()); try {/*from www . j a va 2 s . c om*/ testTask.invoke(); } catch (Exception e) { LOG.debug(e); Assert.fail("Invoke method caused exception."); } File tempTestFile = new File(this.tempTestPath); Assert.assertTrue("Temp output file does not exist", tempTestFile.exists()); FileReader fr = null; BufferedReader br = null; try { fr = new FileReader(tempTestFile); br = new BufferedReader(fr); Set<Integer> keys = new HashSet<Integer>(); int curVal = -1; while (br.ready()) { String line = br.readLine(); Integer key = Integer.parseInt(line.substring(0, line.indexOf("_"))); Integer val = Integer.parseInt(line.substring(line.indexOf("_") + 1, line.length())); // check that values are in correct order Assert.assertTrue("Values not in ascending order", val >= curVal); // next value hit if (val > curVal) { if (curVal != -1) { // check that we saw 100 distinct keys for this values Assert.assertTrue("Keys missing for value", keys.size() == 100); } // empty keys set keys.clear(); // update current value curVal = val; } Assert.assertTrue("Duplicate key for value", keys.add(key)); } } catch (FileNotFoundException e) { Assert.fail("Out file got lost..."); } catch (IOException ioe) { Assert.fail("Caught IOE while reading out file"); } finally { if (br != null) { try { br.close(); } catch (Throwable t) { } } if (fr != null) { try { fr.close(); } catch (Throwable t) { } } } }
From source file:org.broad.igv.cbio.GeneNetwork.java
public ScoreData collectScoreData(String name, List<Track> tracks, Iterable<String> attributes) { int zoom = 0; List<NamedFeature> features = FeatureDB.getFeaturesList(name, Integer.MAX_VALUE); //If we are viewing a gene list, use the frame List<ReferenceFrame> frames = Globals.isHeadless() ? null : FrameManager.getFrames(); ReferenceFrame frame = Globals.isHeadless() ? null : FrameManager.getDefaultFrame(); if (frames != null) { for (ReferenceFrame frm : frames) { if (frm.getName().equalsIgnoreCase(name)) { frame = frm;//from w ww.j a va2s. c om } } } String frameName = frame != null ? frame.getName() : null; ScoreData<String, Float> results = new ScoreData(RegionScoreType.values().length); int initCapacity = tracks.size() / 10; //The names of all samples these tracks cover Set<String> allSamples = new HashSet<String>(initCapacity); //Each track/feature pair represents a region of a sample. //We store whether that sample has been altered in ANY way Set<String> anyAlteration = new HashSet<String>(initCapacity); //Set of samples which have data for this type Set<String> samplesForType = new HashSet<String>(initCapacity); //Set of samples which have been altered, using this type. Set<String> alteredSamplesForType = new HashSet<String>(initCapacity); for (String attr : attributes) { if (!bounds.containsKey(attr)) { throw new IllegalArgumentException("Have no bounds for " + attr); } RegionScoreType type = attributeMap.get(attr); float[] curBounds = bounds.get(attr); samplesForType.clear(); alteredSamplesForType.clear(); for (NamedFeature feat : features) { if (!name.equalsIgnoreCase(feat.getName())) { continue; } int featStart = feat.getStart(); int featEnd = feat.getEnd(); for (Track track : tracks) { if (!track.isVisible()) { continue; } String sample = track.getSample(); //If track is wrong type, or if sample has already been marked altered, //no further information can be gained if (!track.isRegionScoreType(type) || alteredSamplesForType.contains(sample)) { //if(alteredSamplesForType.contains(sample)) assert samplesForType.contains(sample); continue; } samplesForType.add(sample); float score = track.getRegionScore(feat.getChr(), featStart, featEnd, zoom, type, frameName, tracks); if (score >= curBounds[0] && score <= curBounds[1] && !Float.isNaN(score)) { alteredSamplesForType.add(sample); } } } allSamples.addAll(samplesForType); anyAlteration.addAll(alteredSamplesForType); float fractionAltered = ((float) alteredSamplesForType.size()) / samplesForType.size(); results.put(attr, fractionAltered); } results.setPercentAltered(((float) anyAlteration.size()) / allSamples.size()); return results; }
From source file:org.apache.flink.runtime.operators.DataSinkTaskTest.java
@Test @SuppressWarnings("unchecked") public void testSortingDataSinkTask() { int keyCnt = 100; int valCnt = 20; double memoryFraction = 1.0; super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE); super.addInput(new UniformRecordGenerator(keyCnt, valCnt, true), 0); DataSinkTask<Record> testTask = new DataSinkTask<Record>(); // set sorting super.getTaskConfig().setInputLocalStrategy(0, LocalStrategy.SORT); super.getTaskConfig().setInputComparator(new RecordComparatorFactory(new int[] { 1 }, ((Class<? extends Key<?>>[]) new Class[] { IntValue.class })), 0); super.getTaskConfig().setRelativeMemoryInput(0, memoryFraction); super.getTaskConfig().setFilehandlesInput(0, 8); super.getTaskConfig().setSpillingThresholdInput(0, 0.8f); super.registerFileOutputTask(testTask, MockOutputFormat.class, new File(tempTestPath).toURI().toString()); try {// www .ja v a 2 s . c o m testTask.invoke(); } catch (Exception e) { LOG.debug(e); Assert.fail("Invoke method caused exception."); } File tempTestFile = new File(this.tempTestPath); Assert.assertTrue("Temp output file does not exist", tempTestFile.exists()); FileReader fr = null; BufferedReader br = null; try { fr = new FileReader(tempTestFile); br = new BufferedReader(fr); Set<Integer> keys = new HashSet<Integer>(); int curVal = -1; while (br.ready()) { String line = br.readLine(); Integer key = Integer.parseInt(line.substring(0, line.indexOf("_"))); Integer val = Integer.parseInt(line.substring(line.indexOf("_") + 1, line.length())); // check that values are in correct order Assert.assertTrue("Values not in ascending order", val >= curVal); // next value hit if (val > curVal) { if (curVal != -1) { // check that we saw 100 distinct keys for this values Assert.assertTrue("Keys missing for value", keys.size() == 100); } // empty keys set keys.clear(); // update current value curVal = val; } Assert.assertTrue("Duplicate key for value", keys.add(key)); } } catch (FileNotFoundException e) { Assert.fail("Out file got lost..."); } catch (IOException ioe) { Assert.fail("Caught IOE while reading out file"); } finally { if (br != null) { try { br.close(); } catch (Throwable t) { } } if (fr != null) { try { fr.close(); } catch (Throwable t) { } } } }
From source file:org.asqatasun.service.command.AuditCommandImpl.java
/** * /*from ww w . j av a 2 s . com*/ * @param prList * @param testList */ private void consolidate(Collection<ProcessResult> prList, Collection<Test> testList) { Set<ProcessResult> processResultSet = new HashSet<>(); if (LOGGER.isDebugEnabled()) { if (testList.size() == 1) { LOGGER.debug(new StringBuilder("Consolidate ").append(prList.size()).append(" elements for test ") .append(testList.iterator().next().getCode()).toString()); } else { LOGGER.debug(new StringBuilder("Consolidate ").append(prList.size()).append(" elements for ") .append(testList.size()).append(" tests ").toString()); } } processResultSet.addAll(consolidatorService.consolidate(prList, testList)); if (!processResultSet.isEmpty()) { audit.setStatus(AuditStatus.ANALYSIS); } else { LOGGER.warn("Audit has no net result"); audit.setStatus(AuditStatus.ERROR); } Iterator<ProcessResult> iter = processResultSet.iterator(); Set<ProcessResult> processResultSubset = new HashSet<>(); int i = 0; while (iter.hasNext()) { ProcessResult pr = iter.next(); pr.setNetResultAudit(audit); processResultSubset.add(pr); i++; if (i % consolidationTreatmentWindow == 0) { if (LOGGER.isDebugEnabled()) { LOGGER.debug(new StringBuilder("Persisting Consolidation from ").append(i).append(TO_LOGGER_STR) .append(i + consolidationTreatmentWindow).toString()); } processResultDataService.saveOrUpdate(processResultSubset); processResultSubset.clear(); } } processResultDataService.saveOrUpdate(processResultSubset); processResultSubset.clear(); System.gc(); }