List of usage examples for java.util Set removeAll
boolean removeAll(Collection<?> c);
From source file:com.vmware.vhadoop.vhm.hadoop.HadoopAdaptor.java
@Override /* Returns the set of active dnsNames based on input Set */ public Set<String> checkTargetTTsSuccess(String opType, Set<String> ttDnsNames, int totalTargetEnabled, HadoopClusterInfo cluster) {// w w w .j a va 2 s. c o m String scriptRemoteFilePath = JOB_TRACKER_DEFAULT_SCRIPT_DEST_PATH + JOB_TRACKER_CHECK_SCRIPT_FILE_NAME; String listRemoteFilePath = null; String opDesc = "checkTargetTTsSuccess"; if (ttDnsNames == null) { _log.warning("No valid TT names provided"); return null; } /* We don't expect null or empty values, but weed out anyway */ ttDnsNames.remove(null); ttDnsNames.remove(""); if (ttDnsNames.size() == 0) { _log.warning("No valid TT names provided"); return null; } _log.log(Level.INFO, "Affected TTs: " + ttDnsNames); setErrorParamsForCommand(cluster, opDesc, scriptRemoteFilePath, listRemoteFilePath); int iterations = 0; CompoundStatus getActiveStatus = null; int rc = UNKNOWN_ERROR; Set<String> allActiveTTs = null; long lastCheckAttemptTime = Long.MAX_VALUE; do { if (iterations > 0) { /* 1141429: Ensure that if the script fails, there is a minimum wait before the next retry attempt */ long millisSinceLastCheck = (System.currentTimeMillis() - lastCheckAttemptTime); long underWaitMillis = JOB_TRACKER_CHECK_SCRIPT_MIN_RETRY_MILLIS - millisSinceLastCheck; if (underWaitMillis > 0) { try { _log.fine("Sleeping for underWaitMillis = " + underWaitMillis); Thread.sleep(underWaitMillis); } catch (InterruptedException e) { } } _log.log(Level.INFO, "Target TTs not yet achieved...checking again - " + iterations); _log.log(Level.INFO, "Affected TTs: " + ttDnsNames); } getActiveStatus = new CompoundStatus(ACTIVE_TTS_STATUS_KEY); lastCheckAttemptTime = System.currentTimeMillis(); allActiveTTs = getActiveTTs(cluster, totalTargetEnabled, getActiveStatus); //Declare success as long as the we manage to de/recommission only the TTs we set out to handle (rather than checking correctness for all TTs) if ((allActiveTTs != null) && ((opType.equals("Recommission") && allActiveTTs.containsAll(ttDnsNames)) || (opType.equals("Decommission") && ttDnsNames.retainAll(allActiveTTs) && ttDnsNames.isEmpty()))) { _log.log(Level.INFO, "All selected TTs correctly %sed", opType.toLowerCase()); rc = SUCCESS; break; } /* If there was an error reported by getActiveTTs... */ TaskStatus taskStatus = getActiveStatus.getFirstFailure(STATUS_INTERPRET_ERROR_CODE); if (taskStatus != null) { rc = taskStatus.getErrorCode(); } else { /* * JG: Sometimes we don't know the hostnames (e.g., localhost); in these cases as long as the check script returns success based * on target #TTs we are good. * TODO: Change check script to return success if #newly added + #current_enabled is met rather than target #TTs is met. This is * to address scenarios where there is a mismatch (#Active TTs != #poweredOn VMs) to begin with... * CHANGED: We have changed the time at which this function is invoked -- it gets invoked only when dns/hostnames are available. * So we no longer have this issue of not knowing hostnames and still meeting target #TTs. Our only successful exit is when the * TTs that have been explicitly asked to be checked, have been correctly de/recommissioned. * * rc = SUCCESS; //Note: removing this * * We also notice that in this case, where #Active TTs matches target, but all the requested TTs haven't been de/recommissioned yet, * the check script returns immediately (because it only looks for a match of these values, which is true here). So we recompute * target TTs based on latest information to essentially put back the delay... */ Set<String> deltaTTs = new HashSet<String>(ttDnsNames); if (opType.equals("Recommission")) { deltaTTs.removeAll(allActiveTTs); //get TTs that haven't been recommissioned yet... totalTargetEnabled = allActiveTTs.size() + deltaTTs.size(); } else { //optype = Decommission deltaTTs.retainAll(allActiveTTs); //get TTs that haven't been decommissioned yet... totalTargetEnabled = allActiveTTs.size() - deltaTTs.size(); } _log.log(Level.INFO, "Even though #ActiveTTs = #TargetTTs, not all requested TTs have been " + opType.toLowerCase() + "ed yet - Trying again with updated target: " + totalTargetEnabled); } /* Break out if there is an error other than the ones we expect to be resolved in a subsequent invocation of the check script */ if (rc != ERROR_FEWER_TTS && rc != ERROR_EXCESS_TTS && rc != UNKNOWN_ERROR) { break; } } while (iterations++ < ACTIVE_TASK_TRACKERS_CHECK_RETRY_ITERATIONS); getCompoundStatus().addStatus(_errorCodes.interpretErrorCode(_log, rc, getErrorParamValues(cluster))); if (rc != SUCCESS) { getActiveStatus.registerTaskFailed(false, "Check Test Failed"); getCompoundStatus().addStatus(getActiveStatus); } return allActiveTTs; }
From source file:com.alvexcore.repo.AlvexVersionableAspect.java
private int findChangedProps(Map<QName, Serializable> before, Map<QName, Serializable> after) { int diffSize = 0; //Set<QName> changedNames = new HashSet<QName>(); Set<QName> propNames = new HashSet<QName>(after.size() * 2); propNames.addAll(after.keySet());/*from w w w. ja v a 2 s . c o m*/ propNames.addAll(before.keySet()); propNames.removeAll(excludedOnUpdatePropQNames); for (QName prop : propNames) { Serializable beforeValue = before.get(prop); Serializable afterValue = after.get(prop); if (EqualsHelper.nullSafeEquals(beforeValue, afterValue) != true) { diffSize++; //changedNames.add(prop); } } return diffSize; }
From source file:com.alibaba.jstorm.daemon.worker.WorkerData.java
public void updateWorkerToResource(Set<ResourceWorkerSlot> workers) { synchronized (workerToResource) { Set<ResourceWorkerSlot> oldWorkers = workerToResource.clone(); oldWorkers.removeAll(workers); if (oldWorkers.size() > 0) workerToResource.removeAll(workers); workerToResource.addAll(workers); }/*from w w w . java 2 s. c o m*/ }
From source file:com.ag.repo.transfer.RepoPrimaryManifestProcessorImpl.java
/** * /*w w w .j a v a2 s . c o m*/ * @param node * @param resolvedNodes * @param primaryParentAssoc */ private void create(TransferManifestNormalNode node, ResolvedParentChildPair resolvedNodes, ChildAssociationRef primaryParentAssoc) { log.info("Creating new node with noderef " + node.getNodeRef()); //logProgress("Creating new node to correspond to incoming node: " + node.getNodeRef()); QName parentAssocType = primaryParentAssoc.getTypeQName(); QName parentAssocName = primaryParentAssoc.getQName(); NodeRef parentNodeRef = resolvedNodes.resolvedParent; if (parentNodeRef == null) { if (log.isDebugEnabled()) { log.debug("Unable to resolve parent for inbound noderef " + node.getNodeRef() + ".\n Supplied parent noderef is " + primaryParentAssoc.getParentRef() + ".\n Supplied parent path is " + node.getParentPath().toString()); } // We can't find the node's parent. // We'll store the node in a temporary location and record it for // later processing ChildAssociationRef tempLocation = getTemporaryLocation(node.getNodeRef()); parentNodeRef = tempLocation.getParentRef(); parentAssocType = tempLocation.getTypeQName(); parentAssocName = tempLocation.getQName(); log.info("Recording orphaned transfer node: " + node.getNodeRef()); //logProgress("Unable to resolve parent for new incoming node. Storing it in temp folder: " + node.getNodeRef()); storeOrphanNode(primaryParentAssoc); } // We now know that this is a new node, and we have found the // appropriate parent node in the // local repository. log.info("Resolved parent node to " + parentNodeRef); // We need to process content properties separately. // First, create a shallow copy of the supplied property map... Map<QName, Serializable> props = new HashMap<QName, Serializable>(node.getProperties()); // Split out the content properties and sanitise the others Map<QName, Serializable> contentProps = processProperties(null, props, true); // Create the corresponding node... ChildAssociationRef newNode = nodeService.createNode(parentNodeRef, parentAssocType, parentAssocName, node.getType(), props); Set<QName> newAspects = nodeService.getAspects(newNode.getChildRef()); if (log.isDebugEnabled()) { log.debug( "Created new node (" + newNode.getChildRef() + ") parented by node " + newNode.getParentRef()); } // Deal with the content properties writeContent(newNode.getChildRef(), contentProps); // Apply any aspects that are needed but haven't automatically been // applied Set<QName> aspects = new HashSet<QName>(node.getAspects()); aspects.removeAll(nodeService.getAspects(newNode.getChildRef())); for (QName aspect : aspects) { nodeService.addAspect(newNode.getChildRef(), aspect, null); } // Is the node that we've just added the parent of any orphans that // we've found earlier? checkOrphans(newNode.getChildRef()); Set<QName> newAspects2 = nodeService.getAspects(newNode.getChildRef()); }
From source file:org.syncope.core.workflow.ActivitiUserWorkflowAdapter.java
private Set<String> doExecuteTask(final SyncopeUser user, final String task, final Map<String, Object> moreVariables) throws WorkflowException { Set<String> preTasks = getPerformedTasks(user); final Map<String, Object> variables = new HashMap<String, Object>(); variables.put(SYNCOPE_USER, user);/*w w w. ja v a2 s . co m*/ variables.put(TASK, task); if (moreVariables != null && !moreVariables.isEmpty()) { variables.putAll(moreVariables); } if (StringUtils.isBlank(user.getWorkflowId())) { throw new WorkflowException(new NotFoundException("Empty workflow id")); } List<Task> tasks = taskService.createTaskQuery().processInstanceId(user.getWorkflowId()).list(); if (tasks.size() != 1) { LOG.warn("Expected a single task, found {}", tasks.size()); } else { try { taskService.complete(tasks.get(0).getId(), variables); } catch (ActivitiException e) { throw new WorkflowException(e); } } Set<String> postTasks = getPerformedTasks(user); postTasks.removeAll(preTasks); postTasks.add(task); return postTasks; }
From source file:com.ag.repo.transfer.RepoPrimaryManifestProcessorImpl.java
/** * /*from w ww . j av a 2s .c om*/ * @param node * @param resolvedNodes * @param primaryParentAssoc */ private void update(TransferManifestNormalNode node, ResolvedParentChildPair resolvedNodes, ChildAssociationRef primaryParentAssoc) { NodeRef nodeToUpdate = resolvedNodes.resolvedChild; // logProgress("Updating local node: " + node.getNodeRef()); QName parentAssocType = primaryParentAssoc.getTypeQName(); QName parentAssocName = primaryParentAssoc.getQName(); NodeRef parentNodeRef = resolvedNodes.resolvedParent; if (parentNodeRef == null) { // We can't find the node's parent. // We'll store the node in a temporary location and record it for // later processing ChildAssociationRef tempLocation = getTemporaryLocation(node.getNodeRef()); parentNodeRef = tempLocation.getParentRef(); parentAssocType = tempLocation.getTypeQName(); parentAssocName = tempLocation.getQName(); storeOrphanNode(primaryParentAssoc); } // First of all, do we need to move the node? If any aspect of the // primary parent association has changed // then the answer is "yes" ChildAssociationRef currentParent = nodeService.getPrimaryParent(nodeToUpdate); if (!currentParent.getParentRef().equals(parentNodeRef) || !currentParent.getTypeQName().equals(parentAssocType) || !currentParent.getQName().equals(parentAssocName)) { // Yes, we need to move the node nodeService.moveNode(nodeToUpdate, parentNodeRef, parentAssocType, parentAssocName); //logProgress("Moved node " + nodeToUpdate + " to be under parent node " + parentNodeRef); } log.info("Resolved parent node to " + parentNodeRef); if (updateNeeded(node, nodeToUpdate)) { // We need to process content properties separately. // First, create a shallow copy of the supplied property map... Map<QName, Serializable> props = new HashMap<QName, Serializable>(node.getProperties()); // Split out the content properties and sanitise the others Map<QName, Serializable> contentProps = processProperties(nodeToUpdate, props, false); // Update the non-content properties nodeService.setProperties(nodeToUpdate, props); // Deal with the content properties writeContent(nodeToUpdate, contentProps); // Blend the aspects together Set<QName> suppliedAspects = new HashSet<QName>(node.getAspects()); Set<QName> existingAspects = nodeService.getAspects(nodeToUpdate); Set<QName> aspectsToRemove = new HashSet<QName>(existingAspects); aspectsToRemove.removeAll(suppliedAspects); suppliedAspects.removeAll(existingAspects); // Now aspectsToRemove contains the set of aspects to remove // and suppliedAspects contains the set of aspects to add for (QName aspect : suppliedAspects) { nodeService.addAspect(nodeToUpdate, aspect, null); } for (QName aspect : aspectsToRemove) { nodeService.removeAspect(nodeToUpdate, aspect); } } }
From source file:org.paxml.launch.LaunchModelBuilder.java
private void buildResources(OMElement root, boolean detach) { for (OMElement ele : AxiomUtils.getElements(root, "resource")) { ResourceMatcher matcher = parseIncludeAndExclude(ele); Set<PaxmlResource> include = new LinkedHashSet<PaxmlResource>(0); Set<PaxmlResource> exclude = new LinkedHashSet<PaxmlResource>(0); for (String pattern : matcher.include) { include.addAll(ResourceLocator.findResources(pattern, planFile)); }// ww w. j a v a 2 s .com for (String pattern : matcher.exclude) { exclude.addAll(ResourceLocator.findResources(pattern, planFile)); } include.removeAll(exclude); final PaxmlResource planFileResource; try { planFileResource = PaxmlResource.createFromPath(planFile.getURI().toString()); } catch (IOException e) { throw new PaxmlRuntimeException(e); } include.remove(planFileResource); model.getConfig().getResources().addAll(include); if (detach) { ele.detach(); } } }
From source file:org.syncope.core.workflow.ActivitiUserWorkflowAdapter.java
@Override public WorkflowResult<Map.Entry<Long, String>> submitForm(final WorkflowFormTO form, final String username) throws NotFoundException, WorkflowException { Map.Entry<Task, TaskFormData> checked = checkTask(form.getTaskId(), username); if (!checked.getKey().getOwner().equals(username)) { throw new WorkflowException(new RuntimeException("Task " + form.getTaskId() + " assigned to " + checked.getKey().getOwner() + " but submited by " + username)); }//from w w w . j a va 2 s.c o m SyncopeUser user = userDAO.findByWorkflowId(checked.getKey().getProcessInstanceId()); if (user == null) { throw new NotFoundException("User with workflow id " + checked.getKey().getProcessInstanceId()); } Set<String> preTasks = getPerformedTasks(user); try { formService.submitTaskFormData(form.getTaskId(), form.getPropertiesForSubmit()); } catch (ActivitiException e) { throw new WorkflowException(e); } Set<String> postTasks = getPerformedTasks(user); postTasks.removeAll(preTasks); postTasks.add(form.getTaskId()); updateStatus(user); SyncopeUser updated = userDAO.save(user); // see if there is any propagation to be done PropagationByResource propByRes = (PropagationByResource) runtimeService.getVariable(user.getWorkflowId(), PROP_BY_RESOURCE); // fetch - if available - the encrpted password String clearPassword = null; String encryptedPwd = (String) runtimeService.getVariable(user.getWorkflowId(), ENCRYPTED_PWD); if (StringUtils.isNotBlank(encryptedPwd)) { clearPassword = decrypt(encryptedPwd); } return new WorkflowResult<Map.Entry<Long, String>>(new DefaultMapEntry(updated.getId(), clearPassword), propByRes, postTasks); }
From source file:com.architexa.diagrams.relo.graph.GraphLayoutManager.java
private void adjustNodeCoords(Graph graph, Map<AbstractGraphicalEditPart, Object> partsToCellMap, Point defaultPos) {/*from w w w . j a va 2s. c o m*/ Set<AbstractGraphicalEditPart> newParts = new HashSet<AbstractGraphicalEditPart>(); newParts.addAll(partsToCellMap.keySet()); newParts.removeAll(ageMgr.getOldParts()); Set<AbstractGraphicalEditPart> newEdges = new HashSet<AbstractGraphicalEditPart>(newParts); CollectionUtils.filter(newParts, PredicateUtils.notPredicate(PredicateUtils.instanceofPredicate(AbstractConnectionEditPart.class))); newEdges.removeAll(newParts); for (AbstractGraphicalEditPart edge : new HashSet<AbstractGraphicalEditPart>(newEdges)) { if (nodeConstrainedConn((AbstractConnectionEditPart) edge)) newEdges.remove(edge); } //System.err.println("adjusting graph: " + newNodes.size() + " done: " + oldParts.size()); GraphLayoutRules.assertRulesForNewParts(graph, newParts, newEdges, partsToCellMap, ageMgr, defaultPos, this, rootEditPart); // save for next time //fixedLocationParts.addAll(newNodes); }
From source file:Main.java
static <E> Set<E> ungrowableSet(final Set<E> s) { return new Set<E>() { @Override/*from w w w . jav a 2 s . c om*/ public int size() { return s.size(); } @Override public boolean isEmpty() { return s.isEmpty(); } @Override public boolean contains(Object o) { return s.contains(o); } @Override public Object[] toArray() { return s.toArray(); } @Override public <T> T[] toArray(T[] a) { return s.toArray(a); } @Override public String toString() { return s.toString(); } @Override public Iterator<E> iterator() { return s.iterator(); } @Override public boolean equals(Object o) { return s.equals(o); } @Override public int hashCode() { return s.hashCode(); } @Override public void clear() { s.clear(); } @Override public boolean remove(Object o) { return s.remove(o); } @Override public boolean containsAll(Collection<?> coll) { return s.containsAll(coll); } @Override public boolean removeAll(Collection<?> coll) { return s.removeAll(coll); } @Override public boolean retainAll(Collection<?> coll) { return s.retainAll(coll); } @Override public boolean add(E o) { throw new UnsupportedOperationException(); } @Override public boolean addAll(Collection<? extends E> coll) { throw new UnsupportedOperationException(); } }; }