List of usage examples for java.util Collection removeAll
boolean removeAll(Collection<?> c);
From source file:com.pinterest.teletraan.worker.ClusterReplacer.java
/** * Step 2. LAUNCHING state should make sure all the host is in RUNNING state and serving builds * If some hosts are terminated or deploy failed, go back to INIT state to relaunch *//*from w ww.jav a2 s . c o m*/ private void processLaunchingState(ClusterUpgradeEventBean eventBean) throws Exception { String clusterName = eventBean.getCluster_name(); Collection<String> hostIds = Arrays.asList(eventBean.getHost_ids().split(",")); // 1. make sure every host is running Set<String> failedIds = hostInfoDAO.getTerminatedHosts(new HashSet<>(hostIds)); List<String> runningIds = hostInfoDAO.getRunningInstances(new ArrayList<>(hostIds)); // 2. make sure hosts are serving builds boolean succeeded = true; for (String hostId : runningIds) { List<AgentBean> agents = agentDAO.getByHostId(hostId); if (agents.isEmpty()) { LOG.info(String.format("Host %s has not ping server yet", hostId)); succeeded = false; continue; } // Make sure every env on the host are serving build for (AgentBean agent : agents) { if (agent.getDeploy_stage() != DeployStage.SERVING_BUILD) { succeeded = false; if (agent.getStatus() != AgentStatus.SUCCEEDED && agent.getStatus() != AgentStatus.UNKNOWN && agent.getStatus() != AgentStatus.SCRIPT_FAILED) { LOG.info(String.format("Deploy failed on host %s", hostId)); failedIds.add(hostId); } } } } // 3. if found failed hosts, terminate them and go back to INIT state to relaunch hosts if (!failedIds.isEmpty()) { Collection<String> updateHostIds = Arrays.asList(eventBean.getHost_ids().split(",")); updateHostIds.removeAll(failedIds); clusterManager.terminateHosts(clusterName, failedIds, true); LOG.info(String.format("Successfully terminate failed hosts %s, go back to INIT state", failedIds.toString())); ClusterUpgradeEventBean updateBean = new ClusterUpgradeEventBean(); updateBean.setHost_ids(Joiner.on(",").join(updateHostIds)); updateBean.setState(ClusterUpgradeEventState.INIT); updateBean.setStatus(ClusterUpgradeEventStatus.SUCCEEDED); transitionState(eventBean.getId(), updateBean); return; } if (succeeded) { LOG.info("Successfully completed LAUNCHING state, move to REPLACING state"); ClusterUpgradeEventBean updateBean = new ClusterUpgradeEventBean(); updateBean.setState(ClusterUpgradeEventState.REPLACING); updateBean.setStatus(ClusterUpgradeEventStatus.SUCCEEDED); transitionState(eventBean.getId(), updateBean); } }
From source file:org.apache.ambari.server.topology.LogicalRequest.java
public Collection<HostRequest> getCompletedHostRequests() { Collection<HostRequest> completedHostRequests = new ArrayList<HostRequest>(allHostRequests); completedHostRequests.removeAll(outstandingHostRequests); completedHostRequests.removeAll(requestsWithReservedHosts.values()); return completedHostRequests; }
From source file:dk.nsi.haiba.lprimporter.importer.ImportExecutor.java
private void addSygehusInitials(Collection<Codes> newSygehusKoder) { Collection<Codes> temp = new ArrayList<Codes>(); for (Codes codes : newSygehusKoder) { // these are new, so find out where they are used (find the date they are used to determine exact initials) String code = codes.getCode(); if (code.startsWith("3800")) { temp.add(codes);/*from ww w . j a v a2s .co m*/ } } newSygehusKoder.removeAll(temp); for (Codes codes : temp) { String code = codes.getCode(); String secondaryCode = codes.getSecondaryCode(); Collection<Date> inDates = lprdao.getInDatesForSygehusKoder(code, secondaryCode); // put the code back, now with initials (multiple initials are possible) for (Date in : inDates) { String sygehusInitials = haibaDao.getSygehusInitials(code, secondaryCode, in); log.debug("addSygehusInitials: added '" + sygehusInitials + "' to " + code + "/" + secondaryCode); newSygehusKoder.add(new CodesImpl(code + sygehusInitials, secondaryCode)); } } }
From source file:org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors.EnforceVariablesVisitor.java
/** * Wraps an AggregateOperator or RunningAggregateOperator with a group-by operator where * the group-by keys are variables in varsToRecover. * Note that the function here prevents this visitor being used to rewrite arbitrary query plans. * Instead, it could only be used for rewriting a nested plan within a subplan operator. * * @param op/*from w w w . ja v a 2 s.co m*/ * the logical operator for aggregate or running aggregate. * @param varsToRecover * the set of variables that needs to preserve. * @return the wrapped group-by operator if {@code varsToRecover} is not empty, and {@code op} otherwise. * @throws AlgebricksException */ private ILogicalOperator rewriteAggregateOperator(ILogicalOperator op, Collection<LogicalVariable> varsToRecover) throws AlgebricksException { Set<LogicalVariable> liveVars = new HashSet<>(); VariableUtilities.getLiveVariables(op, liveVars); varsToRecover.removeAll(liveVars); GroupByOperator gbyOp = new GroupByOperator(); for (LogicalVariable varToRecover : varsToRecover) { // This limits the visitor can only be applied to a nested logical plan inside a Subplan operator, // where the varsToRecover forms a candidate key which can uniquely identify a tuple out of the nested-tuple-source. LogicalVariable newVar = context.newVar(); gbyOp.getGroupByList().add(new Pair<LogicalVariable, Mutable<ILogicalExpression>>(newVar, new MutableObject<ILogicalExpression>(new VariableReferenceExpression(varToRecover)))); updateVarMapping(varToRecover, newVar); } NestedTupleSourceOperator nts = new NestedTupleSourceOperator(new MutableObject<ILogicalOperator>(gbyOp)); op.getInputs().clear(); op.getInputs().add(new MutableObject<ILogicalOperator>(nts)); ILogicalOperator inputOp = op.getInputs().get(0).getValue(); ILogicalPlan nestedPlan = new ALogicalPlanImpl(); nestedPlan.getRoots().add(new MutableObject<ILogicalOperator>(op)); gbyOp.getNestedPlans().add(nestedPlan); gbyOp.getInputs().add(new MutableObject<ILogicalOperator>(inputOp)); OperatorManipulationUtil.computeTypeEnvironmentBottomUp(op, context); return visitsInputs(gbyOp, varsToRecover); }
From source file:ubic.gemma.web.taglib.common.auditAndSecurity.WhatsNewBoxTag.java
@Override public int doStartTag() throws JspException { Collection<ExpressionExperiment> newExpressionExperiments = whatsNew.getNewExpressionExperiments(); Collection<ArrayDesign> newArrayDesigns = whatsNew.getNewArrayDesigns(); Collection<ExpressionExperiment> updatedExpressionExperiments = whatsNew.getUpdatedExpressionExperiments(); Collection<ArrayDesign> updatedArrayDesigns = whatsNew.getUpdatedArrayDesigns(); // don't show things that are "new" as "updated" too (if they were updated after being loaded) updatedExpressionExperiments.removeAll(newExpressionExperiments); updatedArrayDesigns.removeAll(newArrayDesigns); StringBuilder buf = new StringBuilder(); if (newArrayDesigns.size() == 0 && newExpressionExperiments.size() == 0 && updatedExpressionExperiments.size() == 0 && updatedArrayDesigns.size() == 0) { buf.append("<input type='hidden' name='nothing new' />"); } else {/*from w w w.j ava 2 s .c o m*/ buf.append(" <strong>Changes in the"); Date date = whatsNew.getDate(); Date now = Calendar.getInstance().getTime(); long millis = now.getTime() - date.getTime(); double days = millis / (double) DateUtils.MILLIS_PER_DAY; if (days > 0.9 && days < 2.0) { buf.append(" last day"); } else if (days < 8) { buf.append(" last week"); } else { NumberFormat nf = NumberFormat.getIntegerInstance(); buf.append(" last " + nf.format(days) + " days"); } buf.append("</strong> "); buf.append("<p>"); int numEEs = newExpressionExperiments.size(); int numADs = newArrayDesigns.size(); int updatedAds = updatedArrayDesigns.size(); int updatedEEs = updatedExpressionExperiments.size(); if (numEEs > 0) { int count = 0; boolean tooMany = false; List<Long> ids = new ArrayList<Long>(); for (ExpressionExperiment ee : newExpressionExperiments) { ids.add(ee.getId()); if (++count > MAX_NEW_ITEMS) { tooMany = true; break; } } buf.append("<a " + (tooMany ? "title='View the first " + MAX_NEW_ITEMS + "'" : "") + " href=\"/Gemma/expressionExperiment/showAllExpressionExperiments.html?id="); buf.append(StringUtils.join(ids, ",")); buf.append("\">" + numEEs + " new data set" + (numEEs > 1 ? "s" : "") + "</a>.<br />"); } if (numADs > 0) { int count = 0; boolean tooMany = false; List<Long> ids = new ArrayList<Long>(); for (ArrayDesign ad : newArrayDesigns) { // buf.append( ad.getId() + "," ); ids.add(ad.getId()); if (++count > MAX_NEW_ITEMS) { tooMany = true; break; } } buf.append("<a " + (tooMany ? "title='View the first " + MAX_NEW_ITEMS + "'" : "") + " href=\"/Gemma/arrays/showAllArrayDesigns.html?id="); buf.append(StringUtils.join(ids, ",")); buf.append("\">" + numADs + " new platform" + (numADs > 1 ? "s" : "") + "</a>.<br />"); } if (updatedEEs > 0) { boolean tooMany = false; List<Long> ids = new ArrayList<Long>(); int count = 0; for (ExpressionExperiment ee : updatedExpressionExperiments) { ids.add(ee.getId()); if (++count > MAX_NEW_ITEMS) { tooMany = true; break; } } buf.append("<a " + (tooMany ? "title='View the first " + MAX_NEW_ITEMS + "'" : "") + " href=\"/Gemma/expressionExperiment/showAllExpressionExperiments.html?id="); buf.append(StringUtils.join(ids, ",")); buf.append("\">" + updatedEEs + " updated data set" + (updatedEEs > 1 ? "s" : "") + "</a>.<br />"); } if (updatedAds > 0) { int count = 0; boolean tooMany = false; List<Long> ids = new ArrayList<Long>(); for (ArrayDesign ad : updatedArrayDesigns) { ids.add(ad.getId()); if (++count > MAX_NEW_ITEMS) { tooMany = true; break; } } buf.append("<a " + (tooMany ? "title='View the first " + MAX_NEW_ITEMS + "'" : "") + " href=\"/Gemma/arrays/showAllArrayDesigns.html?id="); buf.append(StringUtils.join(ids, ",")); buf.append("\">" + updatedAds + " updated platform" + (updatedAds > 1 ? "s" : "") + "</a>.<br />"); } buf.append("</p>"); } try { pageContext.getOut().print(buf.toString()); } catch (Exception ex) { throw new JspException("ContactTag: " + ex.getMessage()); } return SKIP_BODY; }
From source file:org.apache.cassandra.dht.RangeStreamer.java
public StreamResultFuture fetchAsync() { for (Map.Entry<String, Map.Entry<InetAddress, Collection<Range<Token>>>> entry : toFetch.entries()) { String keyspace = entry.getKey(); InetAddress source = entry.getValue().getKey(); InetAddress preferred = SystemKeyspace.getPreferredIP(source); Collection<Range<Token>> ranges = entry.getValue().getValue(); // filter out already streamed ranges Set<Range<Token>> availableRanges = stateStore.getAvailableRanges(keyspace, StorageService.getPartitioner()); if (ranges.removeAll(availableRanges)) { logger.info("Some ranges of {} are already available. Skipping streaming those ranges.", availableRanges);/*from w ww . java 2s .c o m*/ } if (logger.isTraceEnabled()) logger.trace("{}ing from {} ranges {}", description, source, StringUtils.join(ranges, ", ")); /* Send messages to respective folks to stream data over to me */ streamPlan.requestRanges(source, preferred, keyspace, ranges); } return streamPlan.execute(); }
From source file:org.apache.hyracks.algebricks.core.algebra.operators.logical.visitors.EnforceVariablesVisitor.java
@Override public ILogicalOperator visitGroupByOperator(GroupByOperator op, Collection<LogicalVariable> varsToRecover) throws AlgebricksException { Set<LogicalVariable> liveVars = new HashSet<>(); VariableUtilities.getLiveVariables(op, liveVars); varsToRecover.removeAll(liveVars); // Maps group by key variables if the corresponding expressions are VariableReferenceExpressions. for (Pair<LogicalVariable, Mutable<ILogicalExpression>> keyVarExprRef : op.getGroupByList()) { ILogicalExpression expr = keyVarExprRef.second.getValue(); if (expr.getExpressionTag() == LogicalExpressionTag.VARIABLE) { VariableReferenceExpression varExpr = (VariableReferenceExpression) expr; LogicalVariable sourceVar = varExpr.getVariableReference(); updateVarMapping(sourceVar, keyVarExprRef.first); varsToRecover.remove(sourceVar); }/*w ww .ja v a2s. c o m*/ } for (LogicalVariable varToRecover : varsToRecover) { // This limits the visitor can only be applied to a nested logical plan inside a Subplan operator, // where the varsToRecover forms a candidate key which can uniquely identify a tuple out of the nested-tuple-source. op.getDecorList().add(new Pair<LogicalVariable, Mutable<ILogicalExpression>>(null, new MutableObject<ILogicalExpression>(new VariableReferenceExpression(varToRecover)))); } return visitsInputs(op, varsToRecover); }
From source file:graph.inference.module.DisjointWithWorker.java
private void siblingDisjointViaModule(DAGNode atomic, QueryObject queryObj) { if (queryObj.isProof()) { DAGNode otherNode = (DAGNode) queryObj.getNode(2); // Find the unique parents for each set Collection<Node> atomicParents = CommonQuery.ALLGENLS.runQuery(dag_, atomic); Collection<Node> otherParents = CommonQuery.ALLGENLS.runQuery(dag_, otherNode); Collection<Node> commonParents = CollectionUtils.retainAll(atomicParents, otherParents); atomicParents.removeAll(commonParents); otherParents.removeAll(commonParents); // Find the sibling disjoint collections for atomics Collection<DAGNode> atomicSibCols = new HashSet<>(); for (Node atomicParent : atomicParents) if (atomicParent instanceof DAGNode) atomicSibCols.addAll(sibModule_.getSiblingDisjointParents((DAGNode) atomicParent)); // Search for match in others for (Node otherParent : otherParents) { if (otherParent instanceof DAGNode) if (CollectionUtils.containsAny(atomicSibCols, sibModule_.getSiblingDisjointParents((DAGNode) otherParent))) { // A match! if (!isException(atomic, otherNode)) { queryObj.addResult(new Substitution()); // processSiblingJustification(, // sub.getSubstitution(transTwo), // sub.getSubstitution(queryVar), queryObj); }/* ww w. ja va 2 s. c o m*/ return; } } } else { } }
From source file:com.alibaba.jstorm.hdfs.transaction.RocksDbHdfsState.java
@Override public String backup(long batchId) { try {/*from ww w . ja v a 2s . c o m*/ String hdfsCpDir = getRemoteCheckpointPath(batchId); String batchCpPath = getLocalCheckpointPath(batchId); long startTime = System.currentTimeMillis(); // upload sst data files to hdfs Collection<File> sstFiles = FileUtils.listFiles(new File(batchCpPath), new String[] { ROCKSDB_DATA_FILE_EXT }, false); for (File sstFile : sstFiles) { if (!lastCheckpointFiles.contains(sstFile.getName())) { hdfsCache.copyToDfs(batchCpPath + "/" + sstFile.getName(), hdfsDbDir, true); } } // upload sstFile.list, CURRENT, MANIFEST to hdfs Collection<String> sstFileList = getFileList(sstFiles); File cpFileList = new File(batchCpPath + "/" + SST_FILE_LIST); FileUtils.writeLines(cpFileList, sstFileList); if (hdfsCache.exist(hdfsCpDir)) hdfsCache.remove(hdfsCpDir, true); hdfsCache.mkdir(hdfsCpDir); Collection<File> allFiles = FileUtils.listFiles(new File(batchCpPath), null, false); allFiles.removeAll(sstFiles); Collection<File> nonSstFiles = allFiles; for (File nonSstFile : nonSstFiles) { hdfsCache.copyToDfs(batchCpPath + "/" + nonSstFile.getName(), hdfsCpDir, true); } if (JStormMetrics.enabled) hdfsWriteLatency.update(System.currentTimeMillis() - startTime); lastCheckpointFiles = sstFileList; return hdfsCpDir; } catch (IOException e) { LOG.error("Failed to upload checkpoint", e); throw new RuntimeException(e.getMessage()); } }
From source file:lu.lippmann.cdb.graph.GraphUtil.java
/** * FIXME : 0,0 for counters and no variables * /* w ww . j a va2s .c o m*/ * @param g1 * @param g2 * @return */ public static List<GraphOperation> diff(final CUser user, final Graph<CNode, CEdge> g1, final Graph<CNode, CEdge> g2) { final List<GraphOperation> res = new ArrayList<GraphOperation>(); final Collection<CNode> nodes1 = g1.getVertices(); final Collection<CNode> toBeRemoved = new ArrayList<CNode>(nodes1); final Collection<CNode> nodes2 = g2.getVertices(); final Collection<CNode> toBeAdded = new ArrayList<CNode>(nodes2); final Collection<CEdge> edges1 = g1.getEdges(); final Collection<CEdge> edgesToBeRemoved = new ArrayList<CEdge>(g1.getEdges()); final Collection<CEdge> edges2 = g2.getEdges(); final Collection<CEdge> edgesToBeAdded = new ArrayList<CEdge>(g2.getEdges()); toBeRemoved.removeAll(nodes2); toBeAdded.removeAll(nodes1); /** removing useless nodes **/ for (CNode node : toBeRemoved) { res.add(new GraphOperation(0, 0, user, Operation.NODE_REMOVED, new ArrayList<Object>(Arrays.asList(node)))); } /** adding new nodes **/ for (CNode node : toBeAdded) { res.add(new GraphOperation(0, 0, user, Operation.NODE_ADDED, new ArrayList<Object>(Arrays.asList(node)))); } /** * removing edges in G1 that are in G2 (with the same id/name/condition) **/ for (CEdge edge : edges2) { final CEdge foundEdge = g1.findEdge(g2.getSource(edge), g2.getDest(edge)); if (foundEdge != null) { edgesToBeRemoved.remove(foundEdge); if (!edge.containsSameFieldsThat(foundEdge)) { res.add(new GraphOperation(0, 0, user, Operation.EDGE_DATA_UPDATED, new ArrayList<Object>(Arrays.asList(foundEdge, foundEdge.getName(), edge.getName(), foundEdge.getExpression(), edge.getExpression(), foundEdge.getTags(), edge.getTags())))); } } } /** * removing edges in G2 that are in G1 (with the same id/name/condition) **/ for (CEdge edge : edges1) { final CEdge foundEdge = g2.findEdge(g1.getSource(edge), g1.getDest(edge)); if (foundEdge != null) { edgesToBeAdded.remove(foundEdge); } } /** removing nodes edges **/ for (CEdge edge : edgesToBeRemoved) { res.add(new GraphOperation(0, 0, user, Operation.EDGE_REMOVED, new ArrayList<Object>(Arrays.asList(edge, g2.getSource(edge), g2.getDest(edge))))); } /** adding usefull edges **/ for (CEdge edge : edgesToBeAdded) { res.add(new GraphOperation(0, 0, user, Operation.EDGE_ADDED, new ArrayList<Object>(Arrays.asList(edge, g2.getSource(edge), g2.getDest(edge))))); } return res; }