List of usage examples for com.google.common.collect Multimap containsKey
boolean containsKey(@Nullable Object key);
From source file:org.apache.sentry.provider.file.SimplePolicyEngine.java
private ImmutableSetMultimap<String, String> parsePermissions(@Nullable String database, Ini.Section rolesSection, Ini.Section groupsSection) { ImmutableSetMultimap.Builder<String, String> resultBuilder = ImmutableSetMultimap.builder(); Multimap<String, String> roleNameToPrivilegeMap = HashMultimap.create(); List<? extends RoleValidator> validators = Lists.newArrayList(new ServersAllIsInvalid(), new DatabaseMustMatch(), new DatabaseRequiredInRole(), new ServerNameMustMatch(serverName)); for (Map.Entry<String, String> entry : rolesSection.entrySet()) { String roleName = Strings.nullToEmpty(entry.getKey()).trim(); String roleValue = Strings.nullToEmpty(entry.getValue()).trim(); boolean invalidConfiguration = false; if (roleName.isEmpty()) { LOGGER.warn("Empty role name encountered in {}", resourcePath); invalidConfiguration = true; }//from www.j ava 2 s. com if (roleValue.isEmpty()) { LOGGER.warn("Empty role value encountered in {}", resourcePath); invalidConfiguration = true; } if (roleNameToPrivilegeMap.containsKey(roleName)) { LOGGER.warn("Role {} defined twice in {}", roleName, resourcePath); } Set<String> roles = PermissionUtils.toPermissionStrings(roleValue); if (!invalidConfiguration && roles != null) { for (String role : roles) { for (RoleValidator validator : validators) { validator.validate(database, role.trim()); } } roleNameToPrivilegeMap.putAll(roleName, roles); } } Splitter roleSplitter = ROLE_SPLITTER.omitEmptyStrings().trimResults(); for (Map.Entry<String, String> entry : groupsSection.entrySet()) { String groupName = Strings.nullToEmpty(entry.getKey()).trim(); String groupPrivileges = Strings.nullToEmpty(entry.getValue()).trim(); Collection<String> resolvedGroupPrivileges = Sets.newHashSet(); for (String roleName : roleSplitter.split(groupPrivileges)) { if (roleNameToPrivilegeMap.containsKey(roleName)) { resolvedGroupPrivileges.addAll(roleNameToPrivilegeMap.get(roleName)); } else { LOGGER.warn("Role {} for group {} does not exist in privileges section in {}", new Object[] { roleName, groupName, resourcePath }); } } resultBuilder.putAll(groupName, resolvedGroupPrivileges); } return resultBuilder.build(); }
From source file:io.prestosql.execution.scheduler.SourcePartitionedScheduler.java
private Set<RemoteTask> assignSplits(Multimap<Node, Split> splitAssignment, Multimap<Node, Lifespan> noMoreSplitsNotification) { ImmutableSet.Builder<RemoteTask> newTasks = ImmutableSet.builder(); ImmutableSet<Node> nodes = ImmutableSet.<Node>builder().addAll(splitAssignment.keySet()) .addAll(noMoreSplitsNotification.keySet()).build(); for (Node node : nodes) { // source partitioned tasks can only receive broadcast data; otherwise it would have a different distribution ImmutableMultimap<PlanNodeId, Split> splits = ImmutableMultimap.<PlanNodeId, Split>builder() .putAll(partitionedNode, splitAssignment.get(node)).build(); ImmutableMultimap.Builder<PlanNodeId, Lifespan> noMoreSplits = ImmutableMultimap.builder(); if (noMoreSplitsNotification.containsKey(node)) { noMoreSplits.putAll(partitionedNode, noMoreSplitsNotification.get(node)); }/* w ww .ja v a2s.co m*/ newTasks.addAll(stage.scheduleSplits(node, splits, noMoreSplits.build())); } return newTasks.build(); }
From source file:edu.harvard.med.screensaver.ui.users.UserViewer.java
private void initScreensDataModels() { if (_screensDataModel == null && isScreeningRoomUserViewMode()) { List<ScreenAndRole> screensAndRoles = new ArrayList<ScreenAndRole>(); for (Screen screen : getScreeningRoomUser().getAllAssociatedScreens()) { // note: if both Lead Screener and PI, show Lead Screener String role = getScreeningRoomUser().getScreensLed().contains(screen) ? "Lead Screener" : (getLabHead() != null && getLabHead().getScreensHeaded().contains(screen)) ? "Lab Head (PI)" : "Collaborator"; if (!screen.isRestricted()) { screensAndRoles.add(new ScreenAndRole(screen, role)); }//from ww w . j a va 2s. com } Multimap<ScreenType, ScreenAndRole> screenType2ScreenAndRole = HashMultimap.create(); for (ScreenAndRole screenAndRole : screensAndRoles) { screenType2ScreenAndRole.put(screenAndRole.getScreen().getScreenType(), screenAndRole); } _screensDataModel = new HashMap<ScreenType, DataModel>(); for (ScreenType screenType : ScreenType.values()) { if (screenType2ScreenAndRole.containsKey(screenType)) { ArrayList<ScreenAndRole> screensAndRolesOfType = new ArrayList<ScreenAndRole>( screenType2ScreenAndRole.get(screenType)); Collections.sort(screensAndRolesOfType); _screensDataModel.put(screenType, new ListDataModel(screensAndRolesOfType)); } } } }
From source file:grakn.core.graql.executor.ComputeExecutor.java
/** * Helper method to get list of all shortest paths * * @param resultGraph edge map/*from ww w . j a va 2 s. c o m*/ * @param fromID starting vertex * @return */ private List<List<ConceptId>> getComputePathResultList(Multimap<ConceptId, ConceptId> resultGraph, ConceptId fromID) { List<List<ConceptId>> allPaths = new ArrayList<>(); List<ConceptId> firstPath = new ArrayList<>(); firstPath.add(fromID); Deque<List<ConceptId>> queue = new ArrayDeque<>(); queue.addLast(firstPath); while (!queue.isEmpty()) { List<ConceptId> currentPath = queue.pollFirst(); if (resultGraph.containsKey(currentPath.get(currentPath.size() - 1))) { Collection<ConceptId> successors = resultGraph.get(currentPath.get(currentPath.size() - 1)); Iterator<ConceptId> iterator = successors.iterator(); for (int i = 0; i < successors.size() - 1; i++) { List<ConceptId> extendedPath = new ArrayList<>(currentPath); extendedPath.add(iterator.next()); queue.addLast(extendedPath); } currentPath.add(iterator.next()); queue.addLast(currentPath); } else { allPaths.add(currentPath); } } return allPaths; }
From source file:me.lukasczyk.busybox_preprocessor.core.Algorithm.java
/** * Returns a multi map of needed files per file * * @return Multi map [file needs files]/*from ww w .j av a 2 s. com*/ * @throws InterruptedException In case of interruption by shutdown notifier */ private Multimap<String, String> getNeededFilesPerFile() throws InterruptedException { final Map<String, String> exportedFunctions = getExportedFunctions(); final Multimap<String, String> importedFunctions = getImportedFunctions(); final Multimap<String, String> neededFilesPerFile = LinkedListMultimap.create(); for (XMLInterfaceParser parser : interfaceParsers) { if (parser.getFileName().contains("lib")) { continue; } shutdownNotifier.shutdownIfNecessary(); final Deque<String> waitList = new LinkedList<>(); waitList.addAll(importedFunctions.get(parser.getFileName())); final List<String> visited = new LinkedList<>(); final Set<String> fileNames = new LinkedHashSet<>(); while (!waitList.isEmpty()) { final String function = waitList.poll(); if (visited.contains(function)) { continue; } visited.add(function); if (!exportedFunctions.containsKey(function)) { continue; } final String fileName = exportedFunctions.get(function); if (!importedFunctions.containsKey(fileName)) { continue; } shutdownNotifier.shutdownIfNecessary(); fileNames.add(fileName); waitList.addAll(importedFunctions.get(fileName)); } if (fileNames.size() > 0) { neededFilesPerFile.putAll(parser.getFileName(), fileNames); } else { neededFilesPerFile.put(parser.getFileName(), ""); } } return neededFilesPerFile; }
From source file:com.zimbra.cs.store.file.BlobConsistencyChecker.java
/** * Reconciles blobs against the files in the given directory and adds any inconsistencies * to the current result set./*from ww w .j av a 2s. com*/ */ private void check(short volumeId, String blobDirPath, Multimap<Integer, BlobInfo> blobsById) throws IOException { Multimap<Integer, BlobInfo> revisions = HashMultimap.create(); File blobDir = new File(blobDirPath); File[] files = blobDir.listFiles(); if (files == null) { files = new File[0]; } log.info("Comparing %d items to %d files in %s.", blobsById.size(), files.length, blobDirPath); for (File file : files) { // Parse id and mod_content value from filename. Matcher matcher = PAT_BLOB_FILENAME.matcher(file.getName()); int itemId = 0; int modContent = 0; if (matcher.matches()) { itemId = Integer.parseInt(matcher.group(1)); modContent = Integer.parseInt(matcher.group(2)); } BlobInfo blob = null; if (blobsById.containsKey(itemId)) { Iterator<BlobInfo> iterator = blobsById.get(itemId).iterator(); while (iterator.hasNext()) { BlobInfo tempBlob = iterator.next(); if (tempBlob.modContent == modContent) { blob = tempBlob; revisions.put(itemId, tempBlob); iterator.remove(); } } } if (blob == null) { BlobInfo unexpected = new BlobInfo(); unexpected.volumeId = volumeId; unexpected.path = file.getAbsolutePath(); unexpected.fileSize = file.length(); results.unexpectedBlobs.put(itemId, unexpected); } else { blob.fileSize = file.length(); blob.fileModContent = modContent; if (reportUsedBlobs) { results.usedBlobs.put(blob.itemId, blob); } if (checkSize) { blob.fileDataSize = getDataSize(file, blob.dbSize); if (blob.dbSize != blob.fileDataSize) { results.incorrectSize.put(blob.itemId, blob); } } } } // Any remaining items have missing blobs. for (BlobInfo blob : blobsById.values()) { results.missingBlobs.put(blob.itemId, blob); } // Redefining incorrect revisions for all items that support single revision // If there exists a single item with the same itemID in both missingBlobs and unexpectedBlobs // and if there aren't any items with same itemId in revisions then it is categorised as incorrect revision Iterator<Integer> keyIterator = results.missingBlobs.keySet().iterator(); while (keyIterator.hasNext()) { int itemId = keyIterator.next(); List<BlobInfo> missingBlobs = new ArrayList<BlobInfo>(results.missingBlobs.get(itemId)); List<BlobInfo> unexpectedBlobs = new ArrayList<BlobInfo>(results.unexpectedBlobs.get(itemId)); if (missingBlobs.size() == 1 && unexpectedBlobs.size() == 1 && revisions.get(itemId).size() == 0) { BlobInfo incorrectRevision = new BlobInfo(); BlobInfo missingBlob = missingBlobs.get(0); incorrectRevision.itemId = missingBlob.itemId; incorrectRevision.modContent = missingBlob.modContent; incorrectRevision.dbSize = missingBlob.dbSize; incorrectRevision.volumeId = missingBlob.volumeId; BlobInfo unexpectedBlob = unexpectedBlobs.get(0); incorrectRevision.path = unexpectedBlob.path; incorrectRevision.fileSize = unexpectedBlob.fileSize; incorrectRevision.fileModContent = unexpectedBlob.fileModContent; results.incorrectModContent.put(incorrectRevision.itemId, incorrectRevision); keyIterator.remove(); results.unexpectedBlobs.removeAll(itemId); } } }
From source file:com.bigdata.dastor.locator.AbstractReplicationStrategy.java
/** * returns multimap of {live destination: ultimate targets}, where if target is not the same * as the destination, it is a "hinted" write, and will need to be sent to * the ultimate target when it becomes alive again. *///from w ww .j a v a 2 s .c o m public Multimap<InetAddress, InetAddress> getHintedEndpoints(String table, Collection<InetAddress> targets) { Multimap<InetAddress, InetAddress> map = HashMultimap.create(targets.size(), 1); IEndPointSnitch endPointSnitch = DatabaseDescriptor.getEndPointSnitch(table); // first, add the live endpoints for (InetAddress ep : targets) { if (FailureDetector.instance.isAlive(ep)) map.put(ep, ep); } // if everything was alive or we're not doing HH on this keyspace, stop with just the live nodes if (map.size() == targets.size() || !StorageProxy.isHintedHandoffEnabled()) return map; // assign dead endpoints to be hinted to the closest live one, or to the local node // (since it is trivially the closest) if none are alive. This way, the cost of doing // a hint is only adding the hint header, rather than doing a full extra write, if any // destination nodes are alive. // // we do a 2nd pass on targets instead of using temporary storage, // to optimize for the common case (everything was alive). InetAddress localAddress = FBUtilities.getLocalAddress(); for (InetAddress ep : targets) { if (map.containsKey(ep)) continue; InetAddress destination = map.isEmpty() ? localAddress : endPointSnitch.getSortedListByProximity(localAddress, map.keySet()).get(0); map.put(destination, ep); } return map; }
From source file:org.apache.sentry.provider.file.SimpleFileProviderBackend.java
private void parsePrivileges(@Nullable String database, Ini.Section rolesSection, Ini.Section groupsSection, List<? extends PrivilegeValidator> validators, Path policyPath, Table<String, String, Set<String>> groupRolePrivilegeTable) { Multimap<String, String> roleNameToPrivilegeMap = HashMultimap.create(); for (Map.Entry<String, String> entry : rolesSection.entrySet()) { String roleName = stringInterner.intern(Strings.nullToEmpty(entry.getKey()).trim()); String roleValue = Strings.nullToEmpty(entry.getValue()).trim(); boolean invalidConfiguration = false; if (roleName.isEmpty()) { String errMsg = String.format("Empty role name encountered in %s", policyPath); LOGGER.warn(errMsg);/*from www .j a v a2 s . c o m*/ configErrors.add(errMsg); invalidConfiguration = true; } if (roleValue.isEmpty()) { String errMsg = String.format("Empty role value encountered in %s", policyPath); LOGGER.warn(errMsg); configErrors.add(errMsg); invalidConfiguration = true; } if (roleNameToPrivilegeMap.containsKey(roleName)) { String warnMsg = String.format("Role %s defined twice in %s", roleName, policyPath); LOGGER.warn(warnMsg); configWarnings.add(warnMsg); } Set<String> privileges = PrivilegeUtils.toPrivilegeStrings(roleValue); if (!invalidConfiguration && privileges != null) { Set<String> internedPrivileges = Sets.newHashSet(); for (String privilege : privileges) { for (PrivilegeValidator validator : validators) { validator.validate(new PrivilegeValidatorContext(database, privilege.trim())); } internedPrivileges.add(stringInterner.intern(privilege)); } roleNameToPrivilegeMap.putAll(roleName, internedPrivileges); } } Splitter roleSplitter = ROLE_SPLITTER.omitEmptyStrings().trimResults(); for (Map.Entry<String, String> entry : groupsSection.entrySet()) { String groupName = stringInterner.intern(Strings.nullToEmpty(entry.getKey()).trim()); String groupPrivileges = Strings.nullToEmpty(entry.getValue()).trim(); for (String roleName : roleSplitter.split(groupPrivileges)) { roleName = stringInterner.intern(roleName); if (roleNameToPrivilegeMap.containsKey(roleName)) { Set<String> privileges = groupRolePrivilegeTable.get(groupName, roleName); if (privileges == null) { privileges = new HashSet<String>(); groupRolePrivilegeTable.put(groupName, roleName, privileges); } privileges.addAll(roleNameToPrivilegeMap.get(roleName)); } else { String warnMsg = String.format( "Role %s for group %s does not exist in privileges section in %s", roleName, groupName, policyPath); LOGGER.warn(warnMsg); configWarnings.add(warnMsg); } } } }
From source file:com.bouncestorage.swiftproxy.v1.ObjectResource.java
private Map<String, Object> blobGetStandardHeaders(Blob blob) { ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder(); Multimap<String, String> headers = blob.getAllHeaders(); for (String h : STD_BLOB_HEADERS) { if (headers.containsKey(h)) { builder.put(h, headers.get(h).iterator().next()); }//www . j a v a 2s.com } return builder.build(); }
From source file:org.apache.cassandra.locator.AbstractReplicationStrategy.java
/** * returns <tt>Multimap</tt> of {live destination: ultimate targets}, where if target is not the same * as the destination, it is a "hinted" write, and will need to be sent to * the ultimate target when it becomes alive again. *//*from w w w . ja va 2s .co m*/ public Multimap<InetAddress, InetAddress> getHintedEndpoints(Collection<InetAddress> targets) { Multimap<InetAddress, InetAddress> map = HashMultimap.create(targets.size(), 1); // first, add the live endpoints for (InetAddress ep : targets) { if (FailureDetector.instance.isAlive(ep)) map.put(ep, ep); } // if everything was alive or we're not doing HH on this keyspace, stop with just the live nodes if (map.size() == targets.size() || !StorageProxy.isHintedHandoffEnabled()) return map; // assign dead endpoints to be hinted to the closest live one, or to the local node // (since it is trivially the closest) if none are alive. This way, the cost of doing // a hint is only adding the hint header, rather than doing a full extra write, if any // destination nodes are alive. // // we do a 2nd pass on targets instead of using temporary storage, // to optimize for the common case (everything was alive). InetAddress localAddress = FBUtilities.getLocalAddress(); for (InetAddress ep : targets) { if (map.containsKey(ep)) continue; if (!StorageProxy.shouldHint(ep)) { if (logger.isDebugEnabled()) logger.debug("not hinting " + ep + " which has been down " + Gossiper.instance.getEndpointDowntime(ep) + "ms"); continue; } //hint destination InetAddress destination = map.isEmpty() ? localAddress : snitch.getSortedListByProximity(localAddress, map.keySet()).get(0); map.put(destination, ep); } return map; }