List of usage examples for java.util Set removeAll
boolean removeAll(Collection<?> c);
From source file:com.redhat.rhn.frontend.xmlrpc.kickstart.profile.ProfileHandler.java
/** * Returns a list for each kickstart profile of package names that are present * in that profile but not the other./* ww w . j a va 2 s .com*/ * * @param loggedInUser The current user * cannot be <code>null</code> * @param kickstartLabel1 identifies a profile to be compared; * cannot be <code>null</code> * @param kickstartLabel2 identifies a profile to be compared; * cannot be <code>null</code> * * @return map of kickstart label to a list of package names in that profile but not in * the other; if no keys match the criteria the list will be empty * * @xmlrpc.doc Returns a list for each kickstart profile; each list will contain * package names not present on the other profile. * @xmlrpc.param #param("string", "sessionKey") * @xmlrpc.param #param("string", "kickstartLabel1") * @xmlrpc.param #param("string", "kickstartLabel2") * @xmlrpc.returntype * #struct("Comparison Info") * #prop_desc("array", "kickstartLabel1", "Actual label of the first kickstart * profile is the key into the struct") * #array_single("string", "package name") * #prop_desc("array", "kickstartLabel2", "Actual label of the second kickstart * profile is the key into the struct") * #array_single("string", "package name") * #struct_end() */ public Map<String, Set<String>> comparePackages(User loggedInUser, String kickstartLabel1, String kickstartLabel2) { // Validate parameters if (kickstartLabel1 == null) { throw new IllegalArgumentException("kickstartLabel1 cannot be null"); } if (kickstartLabel2 == null) { throw new IllegalArgumentException("kickstartLabel2 cannot be null"); } // Load the profiles and their package lists KickstartData profile1 = KickstartFactory.lookupKickstartDataByLabelAndOrgId(kickstartLabel1, loggedInUser.getOrg().getId()); KickstartData profile2 = KickstartFactory.lookupKickstartDataByLabelAndOrgId(kickstartLabel2, loggedInUser.getOrg().getId()); // Set operations to determine deltas Set<String> onlyInProfile1 = getPackageNamesForKS(profile1); onlyInProfile1.removeAll(getPackageNamesForKS(profile2)); Set<String> onlyInProfile2 = getPackageNamesForKS(profile2); onlyInProfile2.removeAll(getPackageNamesForKS(profile1)); // Package for return Map<String, Set<String>> results = new HashMap<String, Set<String>>(2); results.put(kickstartLabel1, onlyInProfile1); results.put(kickstartLabel2, onlyInProfile2); return results; }
From source file:io.fabric8.elasticsearch.plugin.kibana.KibanaSeed.java
public void setDashboards(final OpenshiftRequestContext context, Client client, String kibanaVersion, final String projectPrefix) { LOGGER.debug("Begin setDashboards: projectPrefix '{}' for user '{}' projects '{}' kibanaIndex '{}'", projectPrefix, context.getUser(), context.getProjects(), context.getKibanaIndex()); // We want to seed the Kibana user index initially // since the logic from Kibana has changed to create before this plugin // starts...//from w ww . j av a 2 s . co m boolean changed = initialSeedKibanaIndex(context, client); // GET .../.kibana/index-pattern/_search?pretty=true&fields= // compare results to projects; handle any deltas (create, delete?) Set<String> indexPatterns = getProjectNamesFromIndexes(context, client, projectPrefix); LOGGER.debug("Found '{}' Index patterns for user", indexPatterns.size()); Set<String> projects = new HashSet<>(context.getProjects()); List<String> filteredProjects = new ArrayList<String>(filterProjectsWithIndices(projectPrefix, projects)); LOGGER.debug("projects for '{}' that have existing indexes: '{}'", context.getUser(), filteredProjects); addAliasToAllProjects(context, filteredProjects); Collections.sort(filteredProjects); // If none have been set yet if (indexPatterns.isEmpty()) { create(context.getKibanaIndex(), filteredProjects, true, client, kibanaVersion, projectPrefix, indexPatterns); changed = true; } else { List<String> common = new ArrayList<String>(indexPatterns); common.retainAll(filteredProjects); filteredProjects.removeAll(common); indexPatterns.removeAll(common); // if we aren't a cluster-admin, make sure we're deleting the // ADMIN_ALIAS_NAME if (!context.isOperationsUser()) { LOGGER.debug("user is not a cluster admin, ensure they don't keep/have the admin alias pattern"); indexPatterns.add(ADMIN_ALIAS_NAME); } // check if we're going to be adding or removing any projects if (!filteredProjects.isEmpty() || !indexPatterns.isEmpty()) { changed = true; } // for any to create (remaining in projects) call createIndices, createSearchmapping?, create dashboard create(context.getKibanaIndex(), filteredProjects, false, client, kibanaVersion, projectPrefix, indexPatterns); // cull any that are in ES but not in OS (remaining in indexPatterns) remove(context.getKibanaIndex(), indexPatterns, client, projectPrefix); common.addAll(filteredProjects); Collections.sort(common); // Set default index to first index in common if we removed the default String defaultIndex = getDefaultIndex(context, client, kibanaVersion, projectPrefix); LOGGER.debug("Checking if index patterns '{}' contain default index '{}'", indexPatterns, defaultIndex); if (indexPatterns.contains(defaultIndex) || StringUtils.isEmpty(defaultIndex)) { LOGGER.debug("'{}' does contain '{}' and common size is {}", indexPatterns, defaultIndex, common.size()); if (!common.isEmpty()) { setDefaultIndex(context.getKibanaIndex(), common.get(0), client, kibanaVersion, projectPrefix); } } } if (changed) { refreshKibanaUser(context.getKibanaIndex(), client); } }
From source file:biomine.bmvis2.pipeline.TextFilterHider.java
/** * TODO: how should this really work? Should the nodes be made POIs instead of showing them and their neighbors? * * @param graph Processable graph/* ww w . j a v a2 s.com*/ * @throws GraphOperationException */ public void doOperation(VisualGraph graph) throws GraphOperationException { Set<VisualNode> hiddenNodes = new HashSet<VisualNode>(); Set<VisualNode> shownNodes = new HashSet<VisualNode>(); for (VisualNode node : graph.getNodes()) { if (node instanceof VisualGroupNode) continue; if (this.filter.equals("")) { shownNodes.add(node); continue; } for (String key : node.getBMNode().getAttributes().keySet()) { if (new String(node.getBMNode().getAttributes().get(key)).toLowerCase() .contains(this.filter.toLowerCase())) shownNodes.add(node); else hiddenNodes.add(node); } if (hiddenNodes.contains(node)) { if (node.getName().toLowerCase().contains(this.filter.toLowerCase()) || node.getType().toLowerCase().contains(this.filter) || node.getId().toLowerCase().contains(this.filter)) { hiddenNodes.remove(node); shownNodes.add(node); } } } Set<VisualNode> addToShown = new HashSet<VisualNode>(); for (VisualNode node : shownNodes) { for (VisualNode neighbor : node.getNeighbors()) addToShown.add(neighbor); } shownNodes.addAll(addToShown); hiddenNodes.removeAll(shownNodes); graph.setHiddenNodes(hiddenNodes); }
From source file:com.blazebit.security.impl.interceptor.ChangeInterceptor.java
/** * //w w w. j ava 2 s . c om */ @Override public void onCollectionUpdate(Object collection, Serializable key) throws CallbackException { if (!EntityFeatures.isInterceptorActive()) { super.onCollectionUpdate(collection, key); return; } if (collection instanceof PersistentCollection) { PersistentCollection newValuesCollection = (PersistentCollection) collection; Object entity = newValuesCollection.getOwner(); if (AnnotationUtils.findAnnotation(entity.getClass(), EntityResourceType.class) == null) { super.onCollectionUpdate(collection, key); return; } // copy new values and old values @SuppressWarnings({ "unchecked", "rawtypes" }) Collection<?> newValues = new HashSet((Collection<?>) newValuesCollection.getValue()); @SuppressWarnings({ "unchecked", "rawtypes" }) Set<?> oldValues = new HashSet(((Map<?, ?>) newValuesCollection.getStoredSnapshot()).keySet()); String fieldName = StringUtils.replace(newValuesCollection.getRole(), entity.getClass().getName() + ".", ""); UserContext userContext = BeanProvider.getContextualReference(UserContext.class); ActionFactory actionFactory = BeanProvider.getContextualReference(ActionFactory.class); EntityResourceFactory resourceFactory = BeanProvider .getContextualReference(EntityResourceFactory.class); PermissionService permissionService = BeanProvider.getContextualReference(PermissionService.class); // find all objects that were added boolean isGrantedToAdd = true; boolean isGrantedToRemove = true; @SuppressWarnings({ "unchecked", "rawtypes" }) Set<?> retained = new HashSet(oldValues); retained.retainAll(newValues); oldValues.removeAll(retained); // if there is a difference between oldValues and newValues if (!oldValues.isEmpty()) { // if something remained isGrantedToRemove = permissionService.isGranted(actionFactory.createAction(Action.REMOVE), resourceFactory.createResource(entity, fieldName)); } newValues.removeAll(retained); if (!newValues.isEmpty()) { isGrantedToAdd = permissionService.isGranted(actionFactory.createAction(Action.ADD), resourceFactory.createResource(entity, fieldName)); } if (!isGrantedToAdd) { throw new PermissionActionException("Element cannot be added to entity " + entity + "'s collection " + fieldName + " by " + userContext.getUser()); } else { if (!isGrantedToRemove) { throw new PermissionActionException("Element cannot be removed from entity " + entity + "'s collection " + fieldName + " by " + userContext.getUser()); } else { super.onCollectionUpdate(collection, key); return; } } } else { // not a persistent collection? } }
From source file:com.mirth.connect.client.ui.editors.transformer.TransformerPane.java
/** * @return Returns true if the transformer has unsupported step types and an alert was * generated, false otherwise./*from w w w.j av a 2 s . co m*/ */ private boolean alertUnsupportedStepTypes(Transformer transformer) { if (LoadedExtensions.getInstance().getTransformerStepPlugins().values().size() == 0) { parent.alertError(this, "No transformer step plugins loaded.\r\nPlease install plugins and try again."); return true; } Set<String> stepTypes = new HashSet<String>(); for (Step step : transformer.getSteps()) { stepTypes.add(step.getType()); } stepTypes.removeAll(LoadedExtensions.getInstance().getTransformerStepPlugins().keySet()); if (!stepTypes.isEmpty()) { if (stepTypes.size() == 1) { parent.alertError(this, "The \"" + stepTypes.toArray()[0] + "\" step plugin is required by this transformer. Please install this plugin and try again."); } else { parent.alertError(this, "The following step type plugins are required by this transformer: " + StringUtils.join(stepTypes, ", ") + ". Please install these plugins and try again."); } return true; } return false; }
From source file:com.alibaba.jstorm.schedule.default_assign.WorkerScheduler.java
private void getRightWorkers(DefaultTopologyAssignContext context, Set<Integer> needAssign, List<ResourceWorkerSlot> assignedWorkers, int workersNum, Collection<ResourceWorkerSlot> workers) { Set<Integer> assigned = new HashSet<Integer>(); List<ResourceWorkerSlot> users = new ArrayList<ResourceWorkerSlot>(); if (workers == null) return;//from ww w. j av a2 s.c o m for (ResourceWorkerSlot worker : workers) { boolean right = true; Set<Integer> tasks = worker.getTasks(); if (tasks == null) continue; for (Integer task : tasks) { if (!needAssign.contains(task) || assigned.contains(task)) { right = false; break; } } if (right) { assigned.addAll(tasks); users.add(worker); } } if (users.size() + assignedWorkers.size() > workersNum) { LOG.warn( "There are no enough workers for user define scheduler / keeping old assignment, userdefineWorkers={}, assignedWorkers={}, workerNum={}", users, assignedWorkers, workersNum); return; } assignedWorkers.addAll(users); needAssign.removeAll(assigned); }
From source file:com.ephesoft.dcma.da.dao.hibernate.BatchInstanceDaoImpl.java
/** * An api to fetch all the batch instance for status list input. API will return those batch instance having access by the user * roles on the basis of ephesoft user./*from w w w. ja va2s. c o m*/ * * @param statusList List<BatchInstanceStatus> * @param firstResult int * @param maxResults int * @param ephesoftUser EphesoftUser * @return List<BatchInstance> */ @Override public List<BatchInstance> getBatchInstances(List<BatchInstanceStatus> statusList, final int firstResult, final int maxResults, final Set<String> userRoles, EphesoftUser ephesoftUser) { List<BatchInstance> batchInstances = new ArrayList<BatchInstance>(); Set<String> batchClassIdentifiers = batchClassGroupsDao.getBatchClassIdentifierForUserRoles(userRoles); EphesoftCriteria criteria = criteria(); criteria.add(Restrictions.in(STATUS, statusList)); criteria.add(Restrictions.isNull(CURRENT_USER)); criteria.add(Restrictions.eq(IS_REMOTE, false)); switch (ephesoftUser) { case NORMAL_USER: Set<String> batchInstanceIdentifiers = batchInstanceGroupsDao .getBatchInstanceIdentifierForUserRoles(userRoles); Set<String> batchInstanceIdentifierSet = batchInstanceGroupsDao .getBatchInstanceIdentifiersExceptUserRoles(userRoles); batchInstanceIdentifierSet.removeAll(batchInstanceIdentifiers); if ((null != batchClassIdentifiers && batchClassIdentifiers.size() > 0) || (null != batchInstanceIdentifiers && batchInstanceIdentifiers.size() > 0)) { List<Order> orderList = new ArrayList<Order>(); Order orderForHighestBatchPriority = new Order(BatchInstanceProperty.PRIORITY, true); Order orderForLastModified = new Order(BatchInstanceProperty.LASTMODIFIED, false); orderList.add(orderForHighestBatchPriority); orderList.add(orderForLastModified); batchInstances = find(criteria, 0, -1, orderList.toArray(new Order[orderList.size()])); // Fixed against JIRA-1018 User not able to navigate through batch list. batchInstances = updateBatchInstanceList(batchInstances, batchClassIdentifiers, batchInstanceIdentifiers, batchInstanceIdentifierSet, firstResult, maxResults); } break; default: if (null != batchClassIdentifiers && batchClassIdentifiers.size() > 0) { criteria.createAlias(BATCH_CLASS, BATCH_CLASS); criteria.add(Restrictions.in(BATCH_CLASS_IDENTIFIER, batchClassIdentifiers)); List<Order> orderList = new ArrayList<Order>(); Order orderForHighestBatchPriority = new Order(BatchInstanceProperty.PRIORITY, true); Order orderForLastModified = new Order(BatchInstanceProperty.LASTMODIFIED, false); orderList.add(orderForHighestBatchPriority); orderList.add(orderForLastModified); batchInstances = find(criteria, firstResult, maxResults, orderList.toArray(new Order[orderList.size()])); } break; } return batchInstances; }
From source file:com.uber.hoodie.TestHoodieClient.java
@Test public void testInsertAndCleanByCommits() throws Exception { int maxCommits = 3; // keep upto 3 commits from the past HoodieWriteConfig cfg = getConfigBuilder().withCompactionConfig(HoodieCompactionConfig.newBuilder() .withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainCommits(maxCommits) .build()).build();//www . j av a 2 s . c o m HoodieWriteClient client = new HoodieWriteClient(jsc, cfg); HoodieIndex index = HoodieIndex.createIndex(cfg, jsc); FileSystem fs = FSUtils.getFs(); /** * do a big insert * (this is basically same as insert part of upsert, just adding it here so we can * catch breakages in insert(), if the implementation diverges.) */ String newCommitTime = client.startCommit(); List<HoodieRecord> records = dataGen.generateInserts(newCommitTime, 500); JavaRDD<HoodieRecord> writeRecords = jsc.parallelize(records, 5); List<WriteStatus> statuses = client.insert(writeRecords, newCommitTime).collect(); // Verify there are no errors assertNoWriteErrors(statuses); // verify that there is a commit assertEquals("Expecting a single commit.", new HoodieReadClient(jsc, basePath).listCommitsSince("000").size(), 1); // Should have 100 records in table (check using Index), all in locations marked at commit HoodieTableMetaClient metaClient = new HoodieTableMetaClient(fs, basePath); HoodieTable table = HoodieTable.getHoodieTable(metaClient, getConfig()); assertFalse(table.getCompletedCommitTimeline().empty()); String commitTime = table.getCompletedCommitTimeline().getInstants().findFirst().get().getTimestamp(); assertFalse(table.getCompletedCleanTimeline().empty()); assertEquals("The clean instant should be the same as the commit instant", commitTime, table.getCompletedCleanTimeline().getInstants().findFirst().get().getTimestamp()); List<HoodieRecord> taggedRecords = index.tagLocation(jsc.parallelize(records, 1), table).collect(); checkTaggedRecords(taggedRecords, newCommitTime); // Keep doing some writes and clean inline. Make sure we have expected number of files remaining. for (int writeCnt = 2; writeCnt < 10; writeCnt++) { Thread.sleep(1100); // make sure commits are unique newCommitTime = client.startCommit(); records = dataGen.generateUpdates(newCommitTime, 100); statuses = client.upsert(jsc.parallelize(records, 1), newCommitTime).collect(); // Verify there are no errors assertNoWriteErrors(statuses); HoodieTableMetaClient metadata = new HoodieTableMetaClient(fs, basePath); HoodieTable table1 = HoodieTable.getHoodieTable(metadata, cfg); HoodieTimeline activeTimeline = table1.getCompletedCommitTimeline(); Optional<HoodieInstant> earliestRetainedCommit = activeTimeline.nthFromLastInstant(maxCommits - 1); Set<HoodieInstant> acceptableCommits = activeTimeline.getInstants().collect(Collectors.toSet()); if (earliestRetainedCommit.isPresent()) { acceptableCommits.removeAll( activeTimeline.findInstantsInRange("000", earliestRetainedCommit.get().getTimestamp()) .getInstants().collect(Collectors.toSet())); acceptableCommits.add(earliestRetainedCommit.get()); } TableFileSystemView fsView = table1.getFileSystemView(); // Need to ensure the following for (String partitionPath : dataGen.getPartitionPaths()) { List<HoodieFileGroup> fileGroups = fsView.getAllFileGroups(partitionPath) .collect(Collectors.toList()); for (HoodieFileGroup fileGroup : fileGroups) { Set<String> commitTimes = new HashSet<>(); fileGroup.getAllDataFiles().forEach(value -> { System.out.println("Data File - " + value); commitTimes.add(value.getCommitTime()); }); assertEquals("Only contain acceptable versions of file should be present", acceptableCommits.stream().map(HoodieInstant::getTimestamp).collect(Collectors.toSet()), commitTimes); } } } }
From source file:com.ephesoft.dcma.da.dao.hibernate.BatchInstanceDaoImpl.java
/** * An api to fetch count of the batch instance table for batch instance status and batch priority. API will return those batch * instance having access by the user roles on the basis of ephesoft user. * /*from w w w.j a va2 s . c o m*/ * @param batchName {@link String} * @param batchInstanceStatus {@link BatchInstanceStatus} * @param userName {@link String} * @param priority {@link BatchPriority} * @param userRoles Set<{@link String}> * @param ephesoftUser {@link EphesoftUser} * @return int, count of the batch instance present for the batch instance status. */ @Override public int getCount(String batchName, BatchInstanceStatus batchInstanceStatus, String userName, BatchPriority batchPriority, Set<String> userRoles, EphesoftUser ephesoftUser) { int count = 0; EphesoftCriteria criteria = criteria(); if (batchName != null && !batchName.isEmpty()) { String batchNameLocal = batchName.replaceAll("%", "\\\\%"); // Criteria added for search either for batch name or batch identifier(5723) Criterion nameLikeCriteria = Restrictions.like(BATCH_NAME, "%" + batchNameLocal + "%"); Criterion idLikeCriteria = Restrictions.like(BATCH_INSTANCE_IDENTIFIER, "%" + batchNameLocal + "%"); LogicalExpression searchCriteria = Restrictions.or(nameLikeCriteria, idLikeCriteria); criteria.add(searchCriteria); } else if (null != batchPriority) { Disjunction disjunction = Restrictions.disjunction(); Integer lowValue = batchPriority.getLowerLimit(); Integer upperValue = batchPriority.getUpperLimit(); disjunction.add(Restrictions.between(PRIORITY, lowValue, upperValue)); criteria.add(disjunction); } Set<String> batchClassIdentifiers = batchClassGroupsDao.getBatchClassIdentifierForUserRoles(userRoles); switch (ephesoftUser) { case NORMAL_USER: Set<String> batchInstanceIdentifiers = batchInstanceGroupsDao .getBatchInstanceIdentifierForUserRoles(userRoles); Set<String> batchInstanceIdentifierSet = batchInstanceGroupsDao .getBatchInstanceIdentifiersExceptUserRoles(userRoles); batchInstanceIdentifierSet.removeAll(batchInstanceIdentifiers); if ((null != batchClassIdentifiers && batchClassIdentifiers.size() > 0) || (null != batchInstanceIdentifiers && batchInstanceIdentifiers.size() > 0)) { criteria.add(Restrictions.eq(STATUS, batchInstanceStatus)); criteria.add(Restrictions.eq(IS_REMOTE, false)); criteria.add(Restrictions.or(Restrictions.isNull(CURRENT_USER), Restrictions.eq(CURRENT_USER, userName))); // Fixed against JIRA-1018 User not able to navigate through batch list. List<BatchInstance> batchInstanceList = find(criteria); batchInstanceList = updateBatchInstanceList(batchInstanceList, batchClassIdentifiers, batchInstanceIdentifiers, batchInstanceIdentifierSet, 0, batchInstanceList.size()); count = batchInstanceList.size(); } break; default: if (null != batchClassIdentifiers && batchClassIdentifiers.size() > 0) { criteria.add(Restrictions.eq(STATUS, batchInstanceStatus)); criteria.add(Restrictions.eq(IS_REMOTE, false)); criteria.add(Restrictions.or(Restrictions.isNull(CURRENT_USER), Restrictions.eq(CURRENT_USER, userName))); criteria.createAlias(BATCH_CLASS, BATCH_CLASS); criteria.add(Restrictions.in(BATCH_CLASS_IDENTIFIER, batchClassIdentifiers)); count = count(criteria); } break; } return count; }
From source file:ddf.test.itests.catalog.TestFederation.java
private void verifyEvents(Set<String> metacardIdsExpected, Set<String> metacardIdsNotExpected, Set<String> subscriptionIds) { long millis = 0; boolean isAllEventsReceived = false; boolean isUnexpectedEventReceived = false; while (!isAllEventsReceived && !isUnexpectedEventReceived && millis < TimeUnit.MINUTES.toMillis(2)) { Set<String> foundIds = null; try {// w w w . j av a 2s . co m Thread.sleep(EVENT_UPDATE_WAIT_INTERVAL); millis += EVENT_UPDATE_WAIT_INTERVAL; } catch (InterruptedException e) { LOGGER.info("Interrupted exception while trying to sleep for events", e); } if ((millis % 1000) == 0) { LOGGER.info("Waiting for events to be received...{}ms", millis); } for (String id : subscriptionIds) { foundIds = getEvents(id); isAllEventsReceived = foundIds.containsAll(metacardIdsExpected); isUnexpectedEventReceived = foundIds.removeAll(metacardIdsNotExpected); } } assertTrue(isAllEventsReceived); assertFalse(isUnexpectedEventReceived); }