List of usage examples for java.util Set clear
void clear();
From source file:com.npower.dm.hibernate.management.ModelManagementBeanImpl.java
/** * Store the properties into ModelEntity's DM Properties. Old properties will * be replaced by the props.//from ww w .j a v a2 s .c om * * @param props * Properties * @throws DMException */ public void setDMProperties(Model model, Properties props) throws DMException { Properties newProps = props; Set<ModelDMProperty> set = ((ModelEntity) model).getModelDMProps(); Session session = this.getHibernateSession(); if (!set.isEmpty()) { for (Iterator<ModelDMProperty> i = set.iterator(); i.hasNext();) { session.delete(i.next()); } } set.clear(); Enumeration<?> names = newProps.propertyNames(); while (names.hasMoreElements()) { String name = (String) names.nextElement(); String value = newProps.getProperty(name); ModelDMPropertyID id = new ModelDMPropertyID(); id.setModelId(model.getID()); id.setPropName(name); ModelDMProperty prop = new ModelDMProperty(id, model, value); session.save(prop); set.add(prop); } }
From source file:org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.java
/** * In this testcase, client is dataNodes[0], but the dataNodes[1] is * not allowed to be chosen. So the 1st replica should be * placed on dataNodes[0], the 2nd replica should be placed on a different * rack, the 3rd should be on same rack as the 2nd replica, and the rest * should be placed on a third rack./*from w ww .j a v a2 s . com*/ * @throws Exception */ @Test public void testChooseTarget2() throws Exception { Set<Node> excludedNodes; DatanodeStorageInfo[] targets; List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>(); excludedNodes = new HashSet<Node>(); excludedNodes.add(dataNodes[1]); targets = chooseTarget(0, chosenNodes, excludedNodes); assertEquals(targets.length, 0); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); targets = chooseTarget(1, chosenNodes, excludedNodes); assertEquals(targets.length, 1); assertEquals(storages[0], targets[0]); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); targets = chooseTarget(2, chosenNodes, excludedNodes); assertEquals(targets.length, 2); assertEquals(storages[0], targets[0]); assertFalse(isOnSameRack(targets[0], targets[1])); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); targets = chooseTarget(3, chosenNodes, excludedNodes); assertEquals(targets.length, 3); assertEquals(storages[0], targets[0]); assertFalse(isOnSameRack(targets[0], targets[1])); assertTrue(isOnSameRack(targets[1], targets[2])); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); targets = chooseTarget(4, chosenNodes, excludedNodes); assertEquals(targets.length, 4); assertEquals(storages[0], targets[0]); for (int i = 1; i < 4; i++) { assertFalse(isOnSameRack(targets[0], targets[i])); } assertTrue(isOnSameRack(targets[1], targets[2]) || isOnSameRack(targets[2], targets[3])); assertFalse(isOnSameRack(targets[1], targets[3])); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); chosenNodes.add(storages[2]); targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY); System.out.println("targets=" + Arrays.asList(targets)); assertEquals(2, targets.length); //make sure that the chosen node is in the target. int i = 0; for (; i < targets.length && !storages[2].equals(targets[i]); i++) ; assertTrue(i < targets.length); }
From source file:com.npower.dm.hibernate.management.ModelManagementBeanImpl.java
/** * Store the properties into ModelEntity's DMBootstrap Properties. Old * properties will be replaced by the props. * /*from ww w . j av a 2 s. c om*/ * @param props * Properties * @throws DMException */ public void setDMBootstrapProperties(Model model, Properties props) throws DMException { Properties newProps = props; Set<DMBootstrapProperty> set = ((ModelEntity) model).getModelDMBootProps(); Session session = this.getHibernateSession(); if (!set.isEmpty()) { for (Iterator<DMBootstrapProperty> i = set.iterator(); i.hasNext();) { session.delete(i.next()); } } set.clear(); Enumeration<?> names = newProps.propertyNames(); while (names.hasMoreElements()) { String name = (String) names.nextElement(); String value = newProps.getProperty(name); DMBootstrapPropertyID id = new DMBootstrapPropertyID(); id.setModelId(model.getID()); id.setPropName(name); DMBootstrapProperty prop = new DMBootstrapProperty(id, model, value); session.save(prop); set.add(prop); } }
From source file:org.bedework.timezones.common.leveldb.LdbCachedData.java
/** Call the primary server and get a list of data that's changed since we last * looked. Then fetch each changed timezone and update the db. * * <p>We try not to keep the db locked for long periods</p> * * @return true if we successfully contacted the server * @throws TzException/* w w w . j a v a 2 s . c o m*/ */ private synchronized boolean updateFromPrimary() throws TzException { if (debug) { trace("Updating from primary"); } try { if (cfg.getPrimaryServer()) { // We are a primary. No update needed if (debug) { trace("We are a primary: exit"); } return true; // good enough } if (cfg.getPrimaryUrl() == null) { warn("No primary URL: exit"); return true; // good enough } /* Get the list of changed tzs from the primary */ final Timezones tzs = new TimezonesImpl(); tzs.init(cfg.getPrimaryUrl()); final String changedSince = cfg.getDtstamp(); final long startTime = System.currentTimeMillis(); long fetchTime = 0; final TimezoneListType tzl; try { tzl = tzs.getList(changedSince); } catch (final TzUnknownHostException tuhe) { error("Unknown host exception contacting " + cfg.getPrimaryUrl()); return false; } catch (final Throwable t) { error("Exception contacting " + cfg.getPrimaryUrl()); error(t); return false; } final String svrCs = tzl.getDtstamp(); if ((changedSince == null) || !svrCs.equals(changedSince)) { cfg.setDtstamp(svrCs); TzServerUtil.saveConfig(); } primaryFetches++; lastFetchCt = tzl.getTimezones().size(); String isAre = "are"; String theS = "s"; if (lastFetchCt == 1) { isAre = "is"; theS = ""; } info("There " + isAre + " " + lastFetchCt + " timezone" + theS + " to fetch"); final List<TzEntry> tzEntries = new ArrayList<>(); /* First go through the returned list and get our own spec. Need the db for that. */ try { open(); for (final TimezoneType sum : tzl.getTimezones()) { final TzEntry entry = new TzEntry(); entry.id = sum.getTzid(); entry.sum = sum; if (debug) { trace("Get db spec for timezone " + entry.id); } entry.dbspec = getSpec(entry.id); tzEntries.add(entry); } } finally { close(); } /* Now fetch the timezones from the primary - no db needed */ for (final TzEntry entry : tzEntries) { if (debug) { trace("Fetching timezone " + entry.id); } String etag = null; if (entry.dbspec != null) { etag = entry.dbspec.getEtag(); } final long startFetch = System.currentTimeMillis(); final TaggedTimeZone ttz = tzs.getTimeZone(entry.id, etag); fetchTime += System.currentTimeMillis() - startFetch; if ((ttz != null) && (ttz.vtz == null)) { // No change continue; } if (ttz == null) { warn("Received timezone id " + entry.id + " but not available."); continue; } entry.ttz = ttz; } /* Go through the entries and try to update. * If ttz is null no update needed. * If dbspec is null it's an add. */ final AliasMaps amaps = buildAliasMaps(); try { open(); for (final TzEntry entry : tzEntries) { if (debug) { trace("Processing timezone " + entry.id); } if (entry.ttz == null) { if (debug) { trace("No change."); } continue; } final boolean add = entry.dbspec == null; if (add) { // Create a new one entry.dbspec = new TzDbSpec(); } entry.dbspec.setName(entry.id); entry.dbspec.setEtag(entry.ttz.etag); entry.dbspec.setDtstamp(DateTimeUtil.rfcDateTimeUTC(entry.sum.getLastModified())); entry.dbspec.setSource(cfg.getPrimaryUrl()); entry.dbspec.setActive(true); entry.dbspec.setVtimezone(entry.ttz.vtz); if (!Util.isEmpty(entry.sum.getLocalNames())) { final Set<LocalizedString> dns; if (add) { dns = new TreeSet<>(); entry.dbspec.setDisplayNames(dns); } else { dns = entry.dbspec.getDisplayNames(); dns.clear(); // XXX not good - forces delete and recreate } for (final LocalNameType ln : entry.sum.getLocalNames()) { final LocalizedString ls = new LocalizedString(ln.getLang(), ln.getValue()); dns.add(ls); } } putTzSpec(entry.dbspec); /* Get all aliases for this id */ final SortedSet<String> aliases = amaps.byTzid.get(entry.id); if (!Util.isEmpty(entry.sum.getAliases())) { for (final String a : entry.sum.getAliases()) { TzAlias tza = amaps.byAlias.get(a); if (tza == null) { tza = new TzAlias(a); } tza.addTargetId(entry.id); putTzAlias(tza); /* We've seen this alias. Remove from the list */ if (aliases != null) { aliases.remove(a); } } } if (aliases != null) { /* remaining aliases should be deleted */ for (final String alias : aliases) { final TzAlias tza = getTzAlias(alias); removeTzAlias(tza); } } } } finally { close(); } info("Total time: " + TzServerUtil.printableTime(System.currentTimeMillis() - startTime)); info("Fetch time: " + TzServerUtil.printableTime(fetchTime)); lastFetchStatus = "Success"; } catch (final TzException tze) { lastFetchStatus = "Failed"; throw tze; } catch (final Throwable t) { lastFetchStatus = "Failed"; throw new TzException(t); } return true; }
From source file:com.vaadin.addon.jpacontainer.demo.TestDataGenerator.java
private void createInvoiceTestData() { if (logger.isDebugEnabled()) { logger.debug("Generating invoices"); }/*w w w . j ava 2s . com*/ Set<Order> orders = new HashSet<Order>(); Random rnd = new Random(); for (int i = 0; i < 2500; i++) { Invoice invoice = new Invoice(); Order order; do { order = entityManager.find(Order.class, orderIds.get(rnd.nextInt(orderIds.size()))); } while (orders.contains(order)); orders.add(order); invoice.setInvoiceNo(i + 1); invoice.setOrder(order); invoice.setInvoiceDate(addDaysToDate(order.getOrderDate(), rnd.nextInt(8))); invoice.setDueDate(addDaysToDate(invoice.getInvoiceDate(), 14)); order.getCustomer().setLastInvoiceDate(invoice.getInvoiceDate()); order.setBilledDate(invoice.getInvoiceDate()); if (rnd.nextInt(2) == 1) { invoice.setPaidDate(addDaysToDate(invoice.getInvoiceDate(), rnd.nextInt(14))); } for (OrderItem orderItem : order.getItems()) { InvoiceItem item = new InvoiceItem(); item.setDescription(orderItem.getDescription()); item.setQuantity(orderItem.getQuantity()); item.setPrice(orderItem.getPrice()); invoice.addItem(item); } entityManager.persist(invoice); } orders.clear(); orders = null; }
From source file:com.ibm.jaggr.core.impl.modulebuilder.javascript.RequireExpansionCompilerPassTest.java
@Test public void testHasPluginResolution() throws Exception { Features features = new Features(); Set<String> dependentFeatures = new TreeSet<String>(); features.put("feature1", true); features.put("feature2", true); List<ModuleDeps> expanded = new ArrayList<ModuleDeps>(); RequireExpansionCompilerPass pass = new RequireExpansionCompilerPass(mockAggregator, features, dependentFeatures, expanded, new MutableBoolean(false), true, null, false, null); String code, output;/*from ww w . j a v a2 s . co m*/ code = "require([\"has!feature1?has1\",\"has!feature2?has2\"]);"; output = runPass(pass, code); System.out.println(output); Assert.assertEquals("[feature1, feature2]", dependentFeatures.toString()); Assert.assertEquals("require([\"has!feature1?has1\",\"has!feature2?has2\",\"" + placeHolder0 + "\"]);", output); Assert.assertEquals(new LinkedHashSet<String>(Arrays.asList(new String[] { "dep1", "dep2" })), expanded.get(0).getModuleIds()); features.put("feature2", false); dependentFeatures.clear(); output = runPass(pass, code); Assert.assertEquals("[feature1, feature2]", dependentFeatures.toString()); Assert.assertEquals("require([\"has!feature1?has1\",\"has!feature2?has2\",\"" + placeHolder0 + "\"]);", output); Assert.assertEquals(new LinkedHashSet<String>(Arrays.asList(new String[] { "dep1" })), expanded.get(0).getModuleIds()); features.put("feature1", false); dependentFeatures.clear(); output = runPass(pass, code); Assert.assertEquals("[feature1, feature2]", dependentFeatures.toString()); Assert.assertEquals("require([\"has!feature1?has1\",\"has!feature2?has2\"]);", output); Assert.assertEquals(0, expanded.get(0).getModuleIds().size()); features.remove("feature2"); dependentFeatures.clear(); output = runPass(pass, code); Assert.assertEquals("[feature1, feature2]", dependentFeatures.toString()); Assert.assertEquals("require([\"has!feature1?has1\",\"has!feature2?has2\",\"" + placeHolder0 + "\"]);", output); Assert.assertEquals(new LinkedHashSet<String>(Arrays.asList(new String[] { "has!feature2?dep2" })), expanded.get(0).getModuleIds()); mockAggregator.getOptions().setOption(IOptions.DISABLE_HASPLUGINBRANCHING, true); output = runPass(pass, code); Assert.assertEquals("[feature1, feature2]", dependentFeatures.toString()); Assert.assertEquals("require([\"has!feature1?has1\",\"has!feature2?has2\"]);", output); Assert.assertEquals(0, expanded.get(0).getModuleIds().size()); mockAggregator.getOptions().setOption(IOptions.DISABLE_HASPLUGINBRANCHING, false); features.put("feature1", true); dependentFeatures.clear(); output = runPass(pass, code); Assert.assertEquals("[feature1, feature2]", dependentFeatures.toString()); Assert.assertEquals("require([\"has!feature1?has1\",\"has!feature2?has2\",\"" + placeHolder0 + "\"]);", output); Assert.assertEquals(new LinkedHashSet<String>(Arrays.asList(new String[] { "dep1", "has!feature2?dep2" })), expanded.get(0).getModuleIds()); features.remove("feature1"); dependentFeatures.clear(); output = runPass(pass, code); Assert.assertEquals("[feature1, feature2]", dependentFeatures.toString()); Assert.assertEquals("require([\"has!feature1?has1\",\"has!feature2?has2\",\"" + placeHolder0 + "\"]);", output); Assert.assertEquals( new LinkedHashSet<String>(Arrays.asList(new String[] { "has!feature1?dep1", "has!feature2?dep2" })), expanded.get(0).getModuleIds()); mockAggregator.getOptions().setOption(IOptions.DISABLE_HASPLUGINBRANCHING, true); output = runPass(pass, code); Assert.assertEquals("[feature1, feature2]", dependentFeatures.toString()); Assert.assertEquals("require([\"has!feature1?has1\",\"has!feature2?has2\"]);", output); Assert.assertEquals(0, expanded.get(0).getModuleIds().size()); }
From source file:be.fedict.trust.service.bean.HarvesterMDB.java
private void processHarvestMessage(HarvestMessage harvestMessage) { if (null == harvestMessage) { return;//from w w w.ja v a 2s .c om } String caName = harvestMessage.getCaName(); boolean update = harvestMessage.isUpdate(); String crlFilePath = harvestMessage.getCrlFile(); File crlFile = new File(crlFilePath); LOG.debug("processHarvestMessage - Don't have CA's Serial Number??"); LOG.debug("issuer: " + caName); CertificateAuthorityEntity certificateAuthority = this.certificateAuthorityDAO .findCertificateAuthority(caName); if (null == certificateAuthority) { LOG.error("unknown certificate authority: " + caName); deleteCrlFile(crlFile); return; } if (!update && Status.PROCESSING != certificateAuthority.getStatus()) { /* * Possible that another harvester instance already activated or is * processing the CA cache in the meanwhile. */ LOG.debug("CA status not marked for processing"); deleteCrlFile(crlFile); return; } Date validationDate = new Date(); X509Certificate issuerCertificate = certificateAuthority.getCertificate(); Date notAfter = issuerCertificate.getNotAfter(); if (validationDate.after(notAfter)) { LOG.info("will not update CRL cache for expired CA: " + issuerCertificate.getSubjectX500Principal()); deleteCrlFile(crlFile); return; } FileInputStream crlInputStream; try { crlInputStream = new FileInputStream(crlFile); } catch (FileNotFoundException e) { LOG.error("CRL file does not exist: " + crlFilePath); return; } X509CRL crl; try { CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509", "BC"); crl = (X509CRL) certificateFactory.generateCRL(crlInputStream); } catch (Exception e) { LOG.error("BC error: " + e.getMessage(), e); deleteCrlFile(crlFile); return; } LOG.debug("checking integrity CRL..."); boolean crlValid = CrlTrustLinker.checkCrlIntegrity(crl, issuerCertificate, validationDate); if (!crlValid) { this.auditDAO.logAudit("Invalid CRL for CA=" + caName); deleteCrlFile(crlFile); return; } BigInteger crlNumber = getCrlNumber(crl); LOG.debug("CRL number: " + crlNumber); BigInteger currentCrlNumber = this.certificateAuthorityDAO.findCrlNumber(caName); if (null != currentCrlNumber) { LOG.debug("CRL number in database: " + currentCrlNumber); } if (null != currentCrlNumber && currentCrlNumber.compareTo(crlNumber) >= 0 && certificateAuthority.getStatus() == Status.ACTIVE) { // current CRL cache is higher or equal, no update needed LOG.debug("current CA cache is new enough."); deleteCrlFile(crlFile); return; } List<RevokedCertificateEntity> revokedCertificateEntities = this.certificateAuthorityDAO .getRevokedCertificates(caName); LOG.debug("number of revoked certificates in database: " + revokedCertificateEntities.size()); Map<String, RevokedCertificateEntity> revokedCertificatesMap = new HashMap<String, RevokedCertificateEntity>(); for (RevokedCertificateEntity revokedCertificateEntity : revokedCertificateEntities) { String serialNumber = revokedCertificateEntity.getPk().getSerialNumber(); revokedCertificatesMap.put(serialNumber, revokedCertificateEntity); } LOG.debug("processing CRL... " + caName); boolean isIndirect; Enumeration revokedCertificatesEnum; try { isIndirect = isIndirectCRL(crl); revokedCertificatesEnum = getRevokedCertificatesEnum(crl); } catch (Exception e) { this.auditDAO.logAudit("Failed to parse CRL for CA=" + caName); this.failures++; throw new RuntimeException(e); } int entries = 0; if (revokedCertificatesEnum.hasMoreElements()) { /* * Split up persisting the crl entries to avoid memory issues. */ Set<X509CRLEntry> revokedCertsBatch = new HashSet<X509CRLEntry>(); X500Principal previousCertificateIssuer = crl.getIssuerX500Principal(); int added = 0; while (revokedCertificatesEnum.hasMoreElements()) { TBSCertList.CRLEntry entry = (TBSCertList.CRLEntry) revokedCertificatesEnum.nextElement(); X500Name x500name = new X500Name(previousCertificateIssuer.getName(X500Principal.RFC1779)); X509CRLEntryObject revokedCertificate = new X509CRLEntryObject(entry, isIndirect, x500name); previousCertificateIssuer = revokedCertificate.getCertificateIssuer(); revokedCertsBatch.add(revokedCertificate); added++; if (added == BATCH_SIZE) { /* * Persist batch */ this.certificateAuthorityDAO.updateRevokedCertificates(revokedCertsBatch, crlNumber, crl.getIssuerX500Principal(), revokedCertificatesMap); entries += revokedCertsBatch.size(); revokedCertsBatch.clear(); added = 0; } } /* * Persist final batch */ this.certificateAuthorityDAO.updateRevokedCertificates(revokedCertsBatch, crlNumber, crl.getIssuerX500Principal(), revokedCertificatesMap); entries += revokedCertsBatch.size(); /* * Cleanup redundant CRL entries */ if (null != crlNumber) { this.certificateAuthorityDAO.removeOldRevokedCertificates(crlNumber, crl.getIssuerX500Principal().toString()); } } deleteCrlFile(crlFile); LOG.debug("CRL this update: " + crl.getThisUpdate()); LOG.debug("CRL next update: " + crl.getNextUpdate()); certificateAuthority.setStatus(Status.ACTIVE); certificateAuthority.setThisUpdate(crl.getThisUpdate()); certificateAuthority.setNextUpdate(crl.getNextUpdate()); LOG.debug("cache activated for CA: " + crl.getIssuerX500Principal() + " (entries=" + entries + ")"); }
From source file:edu.brown.hstore.BatchPlanner.java
/** * Construct//from w w w. ja v a 2 s .c om * * @param plan * @return */ protected PlanGraph buildPlanGraph(BatchPlanner.BatchPlan plan) { if (this.enable_profiling) ProfileMeasurement.swap(this.time_plan, this.time_planGraph); PlanGraph graph = new PlanGraph(); // CatalogUtil.getDatabase(this.catalog_proc)); graph.num_rounds = 0; this.sorted_vertices.clear(); this.output_dependency_xref_clear.clear(); int last_id = FIRST_DEPENDENCY_ID; for (int stmt_index = 0; stmt_index < this.batchSize; stmt_index++) { Map<PlanFragment, Set<Integer>> frag_partitions = plan.frag_partitions[stmt_index]; assert (frag_partitions != null) : "No Fragment->PartitionIds map for Statement #" + stmt_index; List<PlanFragment> fragments = plan.frag_list[stmt_index]; assert (fragments != null); int num_fragments = fragments.size(); graph.num_rounds = Math.max(num_fragments, graph.num_rounds); // Generate the synthetic DependencyIds for the query int last_output_id = HStoreConstants.NULL_DEPENDENCY_ID; for (int round = 0, cnt = num_fragments; round < cnt; round++) { PlanFragment catalog_frag = fragments.get(round); Set<Integer> f_partitions = frag_partitions.get(catalog_frag); assert (f_partitions != null) : String.format("No PartitionIds for [%02d] %s in Statement #%d", round, catalog_frag.fullName(), stmt_index); boolean f_local = (f_partitions.size() == 1 && f_partitions.contains(plan.base_partition)); Integer output_id = new Integer( this.enable_unique_ids ? BatchPlanner.NEXT_DEPENDENCY_ID.getAndIncrement() : last_id++); PlanVertex v = new PlanVertex(catalog_frag, stmt_index, round, last_output_id, output_id.intValue(), f_local); Set<PlanVertex> dependencies = output_dependency_xref.get(output_id); if (dependencies == null) { dependencies = new HashSet<PlanVertex>(); this.output_dependency_xref.put(output_id, dependencies); } else if (this.output_dependency_xref_clear.contains(output_id) == false) { dependencies.clear(); this.output_dependency_xref_clear.add(output_id); } dependencies.add(v); graph.addVertex(v); this.sorted_vertices.add(v); last_output_id = output_id; } } // FOR // Setup Edges for (PlanVertex v0 : graph.getVertices()) { if (v0.input_dependency_id == HStoreConstants.NULL_DEPENDENCY_ID) continue; for (PlanVertex v1 : output_dependency_xref.get(v0.input_dependency_id)) { assert (!v0.equals(v1)) : v0; if (!graph.findEdgeSet(v0, v1).isEmpty()) continue; PlanEdge e = new PlanEdge(v0.input_dependency_id); graph.addEdge(e, v0, v1); } // FOR } // FOR // Single-Partition Cache Collections.sort(this.sorted_vertices, PLANVERTEX_COMPARATOR); final int num_vertices = this.sorted_vertices.size(); graph.fragmentIds = new long[num_vertices]; graph.input_ids = new int[num_vertices]; graph.output_ids = new int[num_vertices]; int i = 0; for (PlanVertex v : this.sorted_vertices) { graph.fragmentIds[i] = v.frag_id; graph.output_ids[i] = v.output_dependency_id; graph.input_ids[i] = v.input_dependency_id; i += 1; } // FOR if (this.enable_profiling) ProfileMeasurement.swap(this.time_planGraph, this.time_plan); return (graph); }
From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils.java
@Test(timeout = 30000) public void testValidateResourceRequestWithErrorLabelsPermission() throws IOException { // mock queue and scheduler YarnScheduler scheduler = mock(YarnScheduler.class); Set<String> queueAccessibleNodeLabels = Sets.newHashSet(); QueueInfo queueInfo = mock(QueueInfo.class); when(queueInfo.getQueueName()).thenReturn("queue"); when(queueInfo.getAccessibleNodeLabels()).thenReturn(queueAccessibleNodeLabels); when(scheduler.getQueueInfo(any(String.class), anyBoolean(), anyBoolean())).thenReturn(queueInfo); Resource maxResource = Resources.createResource( YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); // queue has labels, success cases try {/* w w w . j a v a 2s . c om*/ // set queue accessible node labesl to [x, y] queueAccessibleNodeLabels.clear(); queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y")); rmContext.getNodeLabelManager() .addToCluserNodeLabels(ImmutableSet.of(NodeLabel.newInstance("x"), NodeLabel.newInstance("y"))); Resource resource = Resources.createResource(0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); resReq.setNodeLabelExpression("x"); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); resReq.setNodeLabelExpression("y"); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); resReq.setNodeLabelExpression(""); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); resReq.setNodeLabelExpression(" "); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); } catch (InvalidResourceRequestException e) { e.printStackTrace(); fail("Should be valid when request labels is a subset of queue labels"); } finally { rmContext.getNodeLabelManager().removeFromClusterNodeLabels(Arrays.asList("x", "y")); } // same as above, but cluster node labels don't contains label being // requested. should fail try { // set queue accessible node labesl to [x, y] queueAccessibleNodeLabels.clear(); queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y")); Resource resource = Resources.createResource(0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); resReq.setNodeLabelExpression("x"); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); fail("Should fail"); } catch (InvalidResourceRequestException e) { } // queue has labels, failed cases (when ask a label not included by queue) try { // set queue accessible node labesl to [x, y] queueAccessibleNodeLabels.clear(); queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y")); rmContext.getNodeLabelManager() .addToCluserNodeLabels(ImmutableSet.of(NodeLabel.newInstance("x"), NodeLabel.newInstance("y"))); Resource resource = Resources.createResource(0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); resReq.setNodeLabelExpression("z"); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); fail("Should fail"); } catch (InvalidResourceRequestException e) { } finally { rmContext.getNodeLabelManager().removeFromClusterNodeLabels(Arrays.asList("x", "y")); } // we don't allow specify more than two node labels in a single expression // now try { // set queue accessible node labesl to [x, y] queueAccessibleNodeLabels.clear(); queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y")); rmContext.getNodeLabelManager() .addToCluserNodeLabels(ImmutableSet.of(NodeLabel.newInstance("x"), NodeLabel.newInstance("y"))); Resource resource = Resources.createResource(0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); resReq.setNodeLabelExpression("x && y"); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); fail("Should fail"); } catch (InvalidResourceRequestException e) { } finally { rmContext.getNodeLabelManager().removeFromClusterNodeLabels(Arrays.asList("x", "y")); } // queue doesn't have label, succeed (when request no label) queueAccessibleNodeLabels.clear(); try { // set queue accessible node labels to empty queueAccessibleNodeLabels.clear(); Resource resource = Resources.createResource(0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); resReq.setNodeLabelExpression(""); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); resReq.setNodeLabelExpression(" "); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); } catch (InvalidResourceRequestException e) { e.printStackTrace(); fail("Should be valid when request labels is empty"); } boolean invalidlabelexception = false; // queue doesn't have label, failed (when request any label) try { // set queue accessible node labels to empty queueAccessibleNodeLabels.clear(); rmContext.getNodeLabelManager().addToCluserNodeLabels(ImmutableSet.of(NodeLabel.newInstance("x"))); Resource resource = Resources.createResource(0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); resReq.setNodeLabelExpression("x"); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); fail("Should fail"); } catch (InvalidLabelResourceRequestException e) { invalidlabelexception = true; } catch (InvalidResourceRequestException e) { } finally { rmContext.getNodeLabelManager().removeFromClusterNodeLabels(Arrays.asList("x")); } Assert.assertTrue("InvalidLabelResourceRequestException excpeted", invalidlabelexception); // queue is "*", always succeeded try { // set queue accessible node labels to empty queueAccessibleNodeLabels.clear(); queueAccessibleNodeLabels.add(RMNodeLabelsManager.ANY); rmContext.getNodeLabelManager().addToCluserNodeLabels(ImmutableSet.of(NodeLabel.newInstance("x"), NodeLabel.newInstance("y"), NodeLabel.newInstance("z"))); Resource resource = Resources.createResource(0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); resReq.setNodeLabelExpression("x"); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); resReq.setNodeLabelExpression("y"); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); resReq.setNodeLabelExpression("z"); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); } catch (InvalidResourceRequestException e) { e.printStackTrace(); fail("Should be valid when queue can access any labels"); } finally { rmContext.getNodeLabelManager().removeFromClusterNodeLabels(Arrays.asList("x", "y", "z")); } // same as above, but cluster node labels don't contains label, should fail try { // set queue accessible node labels to empty queueAccessibleNodeLabels.clear(); queueAccessibleNodeLabels.add(RMNodeLabelsManager.ANY); Resource resource = Resources.createResource(0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); resReq.setNodeLabelExpression("x"); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); fail("Should fail"); } catch (InvalidResourceRequestException e) { } // we don't allow resource name other than ANY and specify label try { // set queue accessible node labesl to [x, y] queueAccessibleNodeLabels.clear(); queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y")); rmContext.getNodeLabelManager() .addToCluserNodeLabels(ImmutableSet.of(NodeLabel.newInstance("x"), NodeLabel.newInstance("y"))); Resource resource = Resources.createResource(0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), "rack", resource, 1); resReq.setNodeLabelExpression("x"); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); fail("Should fail"); } catch (InvalidResourceRequestException e) { } finally { rmContext.getNodeLabelManager().removeFromClusterNodeLabels(Arrays.asList("x", "y")); } // we don't allow resource name other than ANY and specify label even if // queue has accessible label = * try { // set queue accessible node labesl to * queueAccessibleNodeLabels.clear(); queueAccessibleNodeLabels.addAll(Arrays.asList(CommonNodeLabelsManager.ANY)); rmContext.getNodeLabelManager().addToCluserNodeLabels(ImmutableSet.of(NodeLabel.newInstance("x"))); Resource resource = Resources.createResource(0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), "rack", resource, 1); resReq.setNodeLabelExpression("x"); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); fail("Should fail"); } catch (InvalidResourceRequestException e) { } finally { rmContext.getNodeLabelManager().removeFromClusterNodeLabels(Arrays.asList("x")); } try { Resource resource = Resources.createResource(0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq1 = BuilderUtils.newResourceRequest(mock(Priority.class), "*", resource, 1, "x"); SchedulerUtils.normalizeAndvalidateRequest(resReq1, maxResource, "queue", scheduler, rmContext); fail("Should fail"); } catch (InvalidResourceRequestException e) { assertEquals("Invalid label resource request, cluster do not contain , " + "label= x", e.getMessage()); } try { rmContext.getYarnConfiguration().set(YarnConfiguration.NODE_LABELS_ENABLED, "false"); Resource resource = Resources.createResource(0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq1 = BuilderUtils.newResourceRequest(mock(Priority.class), "*", resource, 1, "x"); SchedulerUtils.normalizeAndvalidateRequest(resReq1, maxResource, "queue", scheduler, rmContext); Assert.assertEquals(RMNodeLabelsManager.NO_LABEL, resReq1.getNodeLabelExpression()); } catch (InvalidResourceRequestException e) { assertEquals( "Invalid resource request, node label not enabled but " + "request contains label expression", e.getMessage()); } }