List of usage examples for java.util Set remove
boolean remove(Object o);
From source file:csns.web.controller.RubricSubmissionController.java
@RequestMapping("/rubric/submission/{role}/list") public String list(@PathVariable String role, @RequestParam Long assignmentId, ModelMap models) { RubricAssignment assignment = rubricAssignmentDao.getRubricAssignment(assignmentId); Set<User> students = new HashSet<User>(); for (Enrollment enrollment : assignment.getSection().getEnrollments()) students.add(enrollment.getStudent()); // we need to remove the submissions for the students who already // dropped the class Iterator<RubricSubmission> i = assignment.getSubmissions().iterator(); while (i.hasNext()) { RubricSubmission submission = i.next(); if (!students.contains(submission.getStudent())) i.remove();/*from w w w . j a va 2s . c o m*/ else students.remove(submission.getStudent()); } // we then add a submission for each student who is in the class but // didn't have a submission for this assignment. for (User student : students) { RubricSubmission submission = new RubricSubmission(student, assignment); assignment.getSubmissions().add(submission); } assignment = rubricAssignmentDao.saveRubricAssignment(assignment); models.put("user", SecurityUtils.getUser()); models.put("assignment", assignment); // Instructor, evaluator, and student will see different views. return "rubric/submission/list/" + role; }
From source file:com.pinterest.rocksplicator.controller.tasks.AddHostTask.java
@Override public void process(Context ctx) throws Exception { final String clusterName = ctx.getCluster(); final String hdfsDir = getParameter().getHdfsDir(); final HostBean hostToAdd = getParameter().getHostToAdd(); final int rateLimitMbs = getParameter().getRateLimitMbs(); final Admin.Client client = clientFactory.getClient(hostToAdd); // 1) ping the host to add to make sure it's up and running. try {// www . j a v a2 s. c o m client.ping(); // continue if #ping() succeeds. } catch (TException tex) { ctx.getTaskQueue().failTask(ctx.getId(), "Host to add is not alive!"); return; } ClusterBean clusterBean = ZKUtil.getClusterConfig(zkClient, clusterName); if (clusterBean == null) { ctx.getTaskQueue().failTask(ctx.getId(), "Failed to read cluster config from zookeeper."); return; } for (SegmentBean segment : clusterBean.getSegments()) { // 2) find shards to serve for new host Set<Integer> shardToServe = IntStream.range(0, segment.getNumShards()).boxed() .collect(Collectors.toSet()); for (HostBean host : segment.getHosts()) { // ignore hosts in different AZ than the new host if (host.getAvailabilityZone().equals(hostToAdd.getAvailabilityZone())) { host.getShards().forEach(shard -> shardToServe.remove(shard.getId())); } } // 3) upload shard data to the new host try { for (int shardId : shardToServe) { HostBean upstream = findMasterShard(shardId, segment.getHosts()); if (upstream == null) { //TODO: should we fail the task in this case? LOG.error("Failed to find master shard for segment={}, shardId={}", segment.getName(), shardId); continue; } Admin.Client upstreamClient = clientFactory.getClient(upstream); String dbName = ShardUtil.getDBNameFromSegmentAndShardId(segment.getName(), shardId); String hdfsPath = ShardUtil.getHdfsPath(hdfsDir, clusterName, segment.getName(), shardId, upstream.getIp(), getCurrentDateTime()); upstreamClient.backupDB(new BackupDBRequest(dbName, hdfsPath).setLimit_mbs(rateLimitMbs)); LOG.info("Backed up {} from {} to {}.", dbName, upstream.getIp(), hdfsPath); client.restoreDB( new RestoreDBRequest(dbName, hdfsPath, upstream.getIp(), (short) upstream.getPort()) .setLimit_mbs(rateLimitMbs)); LOG.info("Restored {} from {} to {}.", dbName, hdfsPath, hostToAdd.getIp()); } } catch (TException ex) { String errMsg = String.format("Failed to upload shard data to host %s.", hostToAdd.getIp()); LOG.error(errMsg, ex); ctx.getTaskQueue().failTask(ctx.getId(), errMsg); return; } // add shard config to new host hostToAdd.setShards(shardToServe.stream().map(id -> new ShardBean().setId(id).setRole(Role.SLAVE)) .collect(Collectors.toList())); List<HostBean> newHostList = segment.getHosts(); newHostList.add(hostToAdd); segment.setHosts(newHostList); } // 4) update cluster config in zookeeper ZKUtil.updateClusterConfig(zkClient, clusterBean); LOG.info("Updated config to {}", ConfigParser.serializeClusterConfig(clusterBean)); ctx.getTaskQueue().finishTask(ctx.getId(), "Successfully added host " + hostToAdd.getIp() + ":" + hostToAdd.getPort()); }
From source file:com.ignorelist.kassandra.steam.scraper.Tagger.java
private void addTags(SharedConfig sharedConfig, Long gameId, Options taggerOptions, Set<String> externalTags) { Set<String> existingTags = sharedConfig.getTags(gameId); if (null != taggerOptions.getWhiteList() && !taggerOptions.getWhiteList().isEmpty()) { externalTags.retainAll(taggerOptions.getWhiteList()); }/* w w w . j a va 2s . com*/ existingTags.addAll(externalTags); if (null != taggerOptions.getReplacementMap()) { for (Map.Entry<String, String> e : taggerOptions.getReplacementMap().entrySet()) { if (existingTags.remove(e.getKey())) { existingTags.add(e.getValue()); } } } if (null != taggerOptions.getRemoveTags()) { existingTags.removeAll(taggerOptions.getRemoveTags()); } if (null != taggerOptions.getWhiteList() && !taggerOptions.getWhiteList().isEmpty() && taggerOptions.isRemoveNotWhiteListed()) { existingTags.retainAll(taggerOptions.getWhiteList()); } sharedConfig.setTags(gameId, existingTags); }
From source file:net.hillsdon.reviki.search.impl.LuceneSearcher.java
public Set<String> outgoingLinks(final String page) throws IOException, PageStoreException { if (_dir == null) { return Collections.emptySet(); }// w w w . ja va 2 s . c o m try { return doReadOperation(new ReadOperation<Set<String>>() { public Set<String> execute(final IndexReader reader, final Searcher searcher, final Analyzer analyzer) throws IOException, ParseException { Hits hits = searcher.search(new TermQuery(new Term(FIELD_PATH, page))); Iterator<?> iterator = hits.iterator(); if (iterator.hasNext()) { Hit hit = (Hit) iterator.next(); String outgoingLinks = hit.getDocument().get(FIELD_OUTGOING_LINKS); Set<String> results = Sets.newHashSet(outgoingLinks.split("\\s")); results.remove(page); return ImmutableSet.copyOf(results); } return Collections.emptySet(); } }, false); } catch (QuerySyntaxException ex) { throw new NoQueryPerformedException(ex); } }
From source file:mitm.djigzo.web.pages.admin.mta.MTAConfig.java
private Object removeNetwork() throws WebServiceCheckedException { if (selectedNetwork != null) { Set<String> networks = new LinkedHashSet<String>(getMainConfig().getMyNetworks()); networks.remove(selectedNetwork); getMainConfig().setMyNetworks(networks); }// w w w. j av a 2 s . c o m return request.isXHR() ? myNetworksBlock : getKeepConfigReloadLink(); }
From source file:mitm.djigzo.web.pages.admin.mta.MTAConfig.java
private Object removeRelayDomain() throws WebServiceCheckedException { if (selectedRelayDomain != null) { Set<String> domains = new LinkedHashSet<String>(getMainConfig().getRelayDomains()); domains.remove(selectedRelayDomain); getMainConfig().setRelayDomains(domains); }//from ww w. j av a2 s . com return request.isXHR() ? relayDomainsBlock : getKeepConfigReloadLink(); }
From source file:com.aliyun.fs.oss.blk.JetOssFileSystemStore.java
public Set<Path> listDeepSubPaths(Path path) throws IOException { try {/*from w w w.j av a2s . c o m*/ String prefix = pathToKey(path); if (!prefix.endsWith(PATH_DELIMITER)) { prefix += PATH_DELIMITER; } OSSObject[] objects = (OSSObject[]) ossClient.listObjects(bucket, prefix).getObjectSummaries() .toArray(); Set<Path> prefixes = new TreeSet<Path>(); for (int i = 0; i < objects.length; i++) { prefixes.add(keyToPath(objects[i].getKey())); } prefixes.remove(path); return prefixes; } catch (ServiceException e) { if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } throw new OssException(e); } }
From source file:com.envision.envservice.service.OrgStructureService.java
/** * ?ID// w w w . jav a 2 s . com */ public Set<String> queryHigherLevel(String userId) throws Exception { Set<String> managers = new LinkedHashSet<String>(); List<SAPEmpJob> lstSAPEmpJob = queryByUserId(userId); for (SAPEmpJob sapEmpJob : lstSAPEmpJob) { managers.add(sapEmpJob.getManagerId()); } managers.remove(MARK_NO_MANAGER); return managers; }
From source file:com.mirth.connect.plugins.dashboardstatus.DashboardConnectorStatusMonitor.java
private synchronized void removeConnectionInSocketSet(Socket socket, String connectorId) { if (socket != null) { Set<Socket> socketSet = socketSetMap.get(connectorId); if (socketSet != null) { socketSet.remove(socket); }//w w w. ja v a 2s . c o m } }
From source file:dk.netarkivet.harvester.harvesting.monitor.HarvestMonitor.java
private void cleanOnStartup() { Set<Long> idsToRemove = new TreeSet<Long>(); RunningJobsInfoDAO dao = RunningJobsInfoDAO.getInstance(); idsToRemove.addAll(dao.getHistoryRecordIds()); Iterator<Long> startedJobIds = JobDAO.getInstance().getAllJobIds(JobStatus.STARTED); while (startedJobIds.hasNext()) { // don't remove records for jobs still in status STARTED idsToRemove.remove(startedJobIds.next()); }//from ww w. j a va 2 s. c o m int delCount = 0; for (long jobId : idsToRemove) { delCount += dao.removeInfoForJob(jobId); delCount += dao.deleteFrontierReports(jobId); } if (LOG.isInfoEnabled() && delCount > 0) { LOG.info("Cleaned up " + delCount + " obsolete history records."); } }