List of usage examples for com.google.common.collect Multimap entries
Collection<Map.Entry<K, V>> entries();
From source file:ai.grakn.graql.internal.reasoner.atom.binary.Relation.java
private Multimap<RoleType, Type> getRoleTypeMap() { Multimap<RoleType, Type> roleTypeMap = ArrayListMultimap.create(); Multimap<RoleType, Var> roleMap = getRoleVarMap(); Map<Var, Type> varTypeMap = getParentQuery().getVarTypeMap(); roleMap.entries().stream().filter(e -> varTypeMap.containsKey(e.getValue())) .forEach(e -> roleTypeMap.put(e.getKey(), varTypeMap.get(e.getValue()))); return roleTypeMap; }
From source file:to.lean.tools.gmail.importer.gmail.Mailbox.java
void syncLocalLabelsToGmail(Multimap<LocalMessage, Message> map) { class Batches { BatchRequest thisBatch;/*from w ww. j a v a 2 s . c o m*/ BatchRequest nextBatch; } Gmail gmail = gmailService.getServiceWithRetries(); Batches batches = new Batches(); batches.thisBatch = gmail.batch(); batches.nextBatch = gmail.batch(); try { for (Map.Entry<LocalMessage, Message> entry : map.entries()) { LocalMessage localMessage = entry.getKey(); Message message = entry.getValue(); Set<String> labelNamesToAdd = localMessage.getFolders().stream().map(this::normalizeLabelName) .collect(toSet()); Set<String> labelNamesToRemove = Sets.newHashSet("SPAM", "TRASH"); labelNamesToRemove.removeAll(labelNamesToAdd); if (localMessage.isStarred()) { labelNamesToAdd.add("STARRED"); labelNamesToRemove.remove("STARRED"); } if (localMessage.isUnread()) { labelNamesToAdd.add("UNREAD"); labelNamesToRemove.remove("UNREAD"); } else { labelNamesToRemove.add("UNREAD"); labelNamesToAdd.remove("UNREAD"); } if (!labelNamesToAdd.contains("INBOX")) { labelNamesToRemove.add("INBOX"); labelNamesToAdd.remove("INBOX"); } List<String> labelIdsToAdd = labelNamesToAdd.stream() .map(labelName -> labelsByName.get(labelName).getId()).collect(toList()); List<String> labelIdsToRemove = labelNamesToRemove.stream() .map(labelName -> labelsByName.get(labelName).getId()).collect(toList()); Gmail.Users.Messages.Modify request = gmail.users().messages().modify(user.getEmailAddress(), message.getId(), new ModifyMessageRequest().setAddLabelIds(labelIdsToAdd) .setRemoveLabelIds(labelIdsToRemove)); JsonBatchCallback<Message> callback = new JsonBatchCallback<Message>() { @Override public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { System.err.format("For message: %s, got error: %s\n", message.getId(), e.toPrettyString()); if (e.getCode() == TOO_MANY_CONCURRENT_REQUESTS_FOR_USER) { request.queue(batches.nextBatch, this); } } @Override public void onSuccess(Message message, HttpHeaders responseHeaders) throws IOException { System.err.println(message.toPrettyString()); } }; request.queue(batches.thisBatch, callback); } while (batches.thisBatch.size() > 0) { batches.thisBatch.execute(); batches.thisBatch = batches.nextBatch; batches.nextBatch = gmail.batch(); } } catch (IOException e) { throw new RuntimeException(e); } }
From source file:com.b2international.snowowl.datastore.server.oplock.impl.RemoteLockTargetListener.java
@Override protected void onLogout(final IApplicationSessionManager manager, final RpcSession session) { if (null == session) { return;/*from w w w . j a v a2 s . co m*/ } if (!remotelyLockedContexts.containsKey(session)) { return; } final Multimap<IOperationLockTarget, DatastoreLockContext> targetsForSession = remotelyLockedContexts .remove(session); if (targetsForSession.isEmpty()) { return; } final String disconnectedUserId = (String) session.get(IApplicationSessionManager.KEY_USER_ID); if (null == disconnectedUserId) { return; } LOGGER.warn("Disconnected client had locks granted, unlocking."); final IDatastoreOperationLockManager lockManager = ApplicationContext.getInstance() .getServiceChecked(IDatastoreOperationLockManager.class); for (final Entry<IOperationLockTarget, DatastoreLockContext> targetContextPair : targetsForSession .entries()) { try { lockManager.unlock(targetContextPair.getValue(), targetContextPair.getKey()); } catch (final OperationLockException e) { LOGGER.error("Failed to unlock targets left after closed session.", e); } } }
From source file:org.apache.accumulo.examples.wikisearch.ingest.WikipediaMapper.java
@Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { Article article = extractor/*from w w w .j a va2 s . c om*/ .extract(new InputStreamReader(new ByteArrayInputStream(value.getBytes()), UTF8)); String NULL_BYTE = "\u0000"; String colfPrefix = language + NULL_BYTE; String indexPrefix = "fi" + NULL_BYTE; if (article != null) { int groupId = WikipediaMapper.getPartitionId(article, numGroups); if (groupId != myGroup) return; Text partitionId = new Text(Integer.toString(WikipediaMapper.getPartitionId(article, numPartitions))); // Create the mutations for the document. // Row is partition id, colf is language0articleid, colq is fieldName\0fieldValue Mutation m = new Mutation(partitionId); for (Entry<String, Object> entry : article.getFieldValues().entrySet()) { m.put(colfPrefix + article.getId(), entry.getKey() + NULL_BYTE + entry.getValue().toString(), cv, article.getTimestamp(), NULL_VALUE); // Create mutations for the metadata table. String metadataKey = entry.getKey() + METADATA_EVENT_COLUMN_FAMILY + language; if (!metadataSent.contains(metadataKey)) { Mutation mm = new Mutation(entry.getKey()); mm.put(METADATA_EVENT_COLUMN_FAMILY, language, cv, article.getTimestamp(), NULL_VALUE); context.write(metadataTableName, mm); metadataSent.add(metadataKey); } } // Tokenize the content Set<String> tokens = getTokens(article); // We are going to put the fields to be indexed into a multimap. This allows us to iterate // over the entire set once. Multimap<String, String> indexFields = HashMultimap.create(); // Add the normalized field values LcNoDiacriticsNormalizer normalizer = new LcNoDiacriticsNormalizer(); for (Entry<String, String> index : article.getNormalizedFieldValues().entrySet()) indexFields.put(index.getKey(), index.getValue()); // Add the tokens for (String token : tokens) indexFields.put(TOKENS_FIELD_NAME, normalizer.normalizeFieldValue("", token)); for (Entry<String, String> index : indexFields.entries()) { // Create mutations for the in partition index // Row is partition id, colf is 'fi'\0fieldName, colq is fieldValue\0language\0article id m.put(indexPrefix + index.getKey(), index.getValue() + NULL_BYTE + colfPrefix + article.getId(), cv, article.getTimestamp(), NULL_VALUE); // Create mutations for the global index // Create a UID object for the Value Builder uidBuilder = Uid.List.newBuilder(); uidBuilder.setIGNORE(false); uidBuilder.setCOUNT(1); uidBuilder.addUID(Integer.toString(article.getId())); Uid.List uidList = uidBuilder.build(); Value val = new Value(uidList.toByteArray()); // Create mutations for the global index // Row is field value, colf is field name, colq is partitionid\0language, value is Uid.List object Mutation gm = new Mutation(index.getValue()); gm.put(index.getKey(), partitionId + NULL_BYTE + language, cv, article.getTimestamp(), val); context.write(indexTableName, gm); // Create mutations for the global reverse index Mutation grm = new Mutation(StringUtils.reverse(index.getValue())); grm.put(index.getKey(), partitionId + NULL_BYTE + language, cv, article.getTimestamp(), val); context.write(reverseIndexTableName, grm); // Create mutations for the metadata table. String metadataKey = index.getKey() + METADATA_INDEX_COLUMN_FAMILY + language; if (!metadataSent.contains(metadataKey)) { Mutation mm = new Mutation(index.getKey()); mm.put(METADATA_INDEX_COLUMN_FAMILY, language + NULL_BYTE + LcNoDiacriticsNormalizer.class.getName(), cv, article.getTimestamp(), NULL_VALUE); context.write(metadataTableName, mm); metadataSent.add(metadataKey); } } // Add the entire text to the document section of the table. // row is the partition, colf is 'd', colq is language\0articleid, value is Base64 encoded GZIP'd document m.put(DOCUMENT_COLUMN_FAMILY, colfPrefix + article.getId(), cv, article.getTimestamp(), new Value(Base64.encodeBase64(article.getText().getBytes()))); context.write(tablename, m); } else { context.getCounter("wikipedia", "invalid articles").increment(1); } context.progress(); }
From source file:com.jcwhatever.nucleus.collections.timed.TimedMultimap.java
@Override public boolean putAll(Multimap<? extends K, ? extends V> entries) { PreCon.notNull(entries);/*from w w w . j av a 2s . c o m*/ boolean isChanged = false; for (Map.Entry<? extends K, ? extends V> entry : entries.entries()) { isChanged = put(entry.getKey(), entry.getValue()) || isChanged; } return isChanged; }
From source file:org.apache.crunch.impl.mr.plan.MSCRPlanner.java
public MRExecutor plan(Class<?> jarClass, Configuration conf) throws IOException { DotfileUtil dotfileUtil = new DotfileUtil(jarClass, conf); // Generate the debug lineage dotfiles (if configuration is enabled) dotfileUtil.buildLineageDotfile(outputs); Map<PCollectionImpl<?>, Set<Target>> targetDeps = Maps.newTreeMap(DEPTH_COMPARATOR); for (PCollectionImpl<?> pcollect : outputs.keySet()) { targetDeps.put(pcollect, pcollect.getTargetDependencies()); }//from ww w .j ava 2 s .co m Multimap<Target, JobPrototype> assignments = HashMultimap.create(); while (!targetDeps.isEmpty()) { Set<Target> allTargets = Sets.newHashSet(); for (PCollectionImpl<?> pcollect : targetDeps.keySet()) { allTargets.addAll(outputs.get(pcollect)); } GraphBuilder graphBuilder = new GraphBuilder(); // Walk the current plan tree and build a graph in which the vertices are // sources, targets, and GBK operations. Set<PCollectionImpl<?>> currentStage = Sets.newHashSet(); for (PCollectionImpl<?> output : targetDeps.keySet()) { Set<Target> deps = Sets.intersection(allTargets, targetDeps.get(output)); if (deps.isEmpty()) { graphBuilder.visitOutput(output); currentStage.add(output); } } Graph baseGraph = graphBuilder.getGraph(); boolean hasInputs = false; for (Vertex v : baseGraph) { if (v.isInput()) { hasInputs = true; break; } } if (!hasInputs) { LOG.warn("No input sources for pipeline, nothing to do..."); return new MRExecutor(conf, jarClass, outputs, toMaterialize, appendedTargets, pipelineCallables); } // Create a new graph that splits up up dependent GBK nodes. Graph graph = prepareFinalGraph(baseGraph); // Break the graph up into connected components. List<List<Vertex>> components = graph.connectedComponents(); // Generate the debug graph dotfiles (if configuration is enabled) dotfileUtil.buildBaseGraphDotfile(outputs, graph); dotfileUtil.buildSplitGraphDotfile(outputs, graph, components); // For each component, we will create one or more job prototypes, // depending on its profile. // For dependency handling, we only need to care about which // job prototype a particular GBK is assigned to. Multimap<Vertex, JobPrototype> newAssignments = HashMultimap.create(); for (List<Vertex> component : components) { newAssignments.putAll(constructJobPrototypes(component)); } // Add in the job dependency information here. for (Map.Entry<Vertex, JobPrototype> e : newAssignments.entries()) { JobPrototype current = e.getValue(); for (Vertex parent : graph.getParents(e.getKey())) { for (JobPrototype parentJobProto : newAssignments.get(parent)) { current.addDependency(parentJobProto); } } } ImmutableMultimap<Target, JobPrototype> previousStages = ImmutableMultimap.copyOf(assignments); for (Map.Entry<Vertex, JobPrototype> e : newAssignments.entries()) { if (e.getKey().isOutput()) { PCollectionImpl<?> pcollect = e.getKey().getPCollection(); JobPrototype current = e.getValue(); // Add in implicit dependencies via SourceTargets that are read into memory for (Target pt : pcollect.getTargetDependencies()) { for (JobPrototype parentJobProto : assignments.get(pt)) { current.addDependency(parentJobProto); } } // Add this to the set of output assignments for (Target t : outputs.get(pcollect)) { assignments.put(t, e.getValue()); } } else { Source source = e.getKey().getSource(); if (source != null && source instanceof Target) { JobPrototype current = e.getValue(); Collection<JobPrototype> parentJobPrototypes = previousStages.get((Target) source); if (parentJobPrototypes != null) { for (JobPrototype parentJobProto : parentJobPrototypes) { current.addDependency(parentJobProto); } } } } } // Remove completed outputs and mark materialized output locations // for subsequent job processing. for (PCollectionImpl<?> output : currentStage) { if (toMaterialize.containsKey(output)) { MaterializableIterable mi = toMaterialize.get(output); if (mi.isSourceTarget()) { output.materializeAt((SourceTarget) mi.getSource()); } } targetDeps.remove(output); } } // Finally, construct the jobs from the prototypes and return. MRExecutor exec = new MRExecutor(conf, jarClass, outputs, toMaterialize, appendedTargets, pipelineCallables); // Generate the debug Plan dotfiles dotfileUtil.buildPlanDotfile(exec, assignments, pipeline, lastJobID); for (JobPrototype proto : Sets.newHashSet(assignments.values())) { exec.addJob(proto.getCrunchJob(jarClass, conf, pipeline, lastJobID)); } // Generate the debug RTNode dotfiles (if configuration is enabled) dotfileUtil.buildRTNodesDotfile(exec); // Attach the dotfiles to the MRExcutor context dotfileUtil.addDotfilesToContext(exec); return exec; }
From source file:com.netflix.genie.client.BaseGenieClient.java
/** * Executes HTTP request based on user params, and performs * marshaling/unmarshaling.//from w w w . j av a 2 s . c o m * * @param verb * GET, POST or DELETE * @param baseRestUri * the base Uri to use in the request, e.g. genie/v0/jobs * @param uuid * the id to append to the baseRestUri, if any (e.g. job ID) * @param params * HTTP params (e.g. userName="foo") * @param request * Genie request if applicable (for POST), null otherwise * @param responseClass * class name of expected response to be used for unmarshalling * * @return extracted and unmarshalled response from the Genie Execution Service * @throws CloudServiceException */ protected <T extends BaseResponse> T executeRequest(Verb verb, String baseRestUri, String uuid, Multimap<String, String> params, Object request, Class<T> responseClass) throws CloudServiceException { HttpResponse response = null; String requestUri = buildRequestUri(baseRestUri, uuid); try { // execute an HTTP request on Genie using load balancer RestClient genieClient = (RestClient) ClientFactory.getNamedClient(NIWS_CLIENT_NAME_GENIE); HttpRequest.Builder builder = HttpRequest.newBuilder().verb(verb).header("Accept", "application/json") .uri(new URI(requestUri)).entity(request); if (params != null) { Iterator<Entry<String, String>> it = params.entries().iterator(); while (it.hasNext()) { Entry<String, String> next = it.next(); builder.queryParams(next.getKey(), next.getValue()); } } HttpRequest req = builder.build(); response = genieClient.executeWithLoadBalancer(req); // basic error checking if (response == null) { String msg = "Received NULL response from Genie service"; logger.error(msg); throw new CloudServiceException(HttpURLConnection.HTTP_INTERNAL_ERROR, msg); } // extract/cast/unmarshal and return entity return extractEntityFromClientResponse(response, responseClass); } catch (URISyntaxException e) { logger.error("Exception caught while executing request", e); throw new CloudServiceException(HttpURLConnection.HTTP_INTERNAL_ERROR, e); } catch (ClientException e) { logger.error("Exception caught while executing request", e); throw new CloudServiceException(HttpURLConnection.HTTP_INTERNAL_ERROR, e); } finally { // release resources after we are done // this is really really important - or we run out of connections if (response != null) { response.close(); } } }
From source file:ai.grakn.graql.internal.reasoner.atom.binary.RelationAtom.java
private Multimap<Role, OntologyConcept> getRoleTypeMap() { Multimap<Role, OntologyConcept> roleTypeMap = ArrayListMultimap.create(); Multimap<Role, Var> roleMap = getRoleVarMap(); Map<Var, OntologyConcept> varTypeMap = getParentQuery().getVarOntologyConceptMap(); roleMap.entries().stream().filter(e -> varTypeMap.containsKey(e.getValue())) .sorted(Comparator.comparing(e -> varTypeMap.get(e.getValue()).getLabel())) .forEach(e -> roleTypeMap.put(e.getKey(), varTypeMap.get(e.getValue()))); return roleTypeMap; }
From source file:org.apache.accumulo.examples.wikisearch.ingest.WikipediaPartitionedMapper.java
@Override protected void map(Text language, Article article, Context context) throws IOException, InterruptedException { String NULL_BYTE = "\u0000"; String colfPrefix = language.toString() + NULL_BYTE; String indexPrefix = "fi" + NULL_BYTE; ColumnVisibility cv = new ColumnVisibility(cvPrefix + language); if (article != null) { Text partitionId = new Text(Integer.toString(WikipediaMapper.getPartitionId(article, numPartitions))); // Create the mutations for the document. // Row is partition id, colf is language0articleid, colq is fieldName\0fieldValue Mutation m = new Mutation(partitionId); for (Entry<String, Object> entry : article.getFieldValues().entrySet()) { m.put(colfPrefix + article.getId(), entry.getKey() + NULL_BYTE + entry.getValue().toString(), cv, article.getTimestamp(), NULL_VALUE); // Create mutations for the metadata table. MutationInfo mm = new MutationInfo(entry.getKey(), METADATA_EVENT_COLUMN_FAMILY, language.toString(), cv, article.getTimestamp()); wikiMetadataOutput.put(mm, NULL_VALUE); }//from w ww .j a va 2s.c om // Tokenize the content Set<String> tokens = WikipediaMapper.getTokens(article); // We are going to put the fields to be indexed into a multimap. This allows us to iterate // over the entire set once. Multimap<String, String> indexFields = HashMultimap.create(); // Add the normalized field values LcNoDiacriticsNormalizer normalizer = new LcNoDiacriticsNormalizer(); for (Entry<String, String> index : article.getNormalizedFieldValues().entrySet()) indexFields.put(index.getKey(), index.getValue()); // Add the tokens for (String token : tokens) indexFields.put(TOKENS_FIELD_NAME, normalizer.normalizeFieldValue("", token)); for (Entry<String, String> index : indexFields.entries()) { // Create mutations for the in partition index // Row is partition id, colf is 'fi'\0fieldName, colq is fieldValue\0language\0article id m.put(indexPrefix + index.getKey(), index.getValue() + NULL_BYTE + colfPrefix + article.getId(), cv, article.getTimestamp(), NULL_VALUE); // Create mutations for the global index // Row is field value, colf is field name, colq is partitionid\0language, value is Uid.List object MutationInfo gm = new MutationInfo(index.getValue(), index.getKey(), partitionId + NULL_BYTE + language, cv, article.getTimestamp()); wikiIndexOutput.put(gm, new CountAndSet(Integer.toString(article.getId()))); // Create mutations for the global reverse index MutationInfo grm = new MutationInfo(StringUtils.reverse(index.getValue()), index.getKey(), partitionId + NULL_BYTE + language, cv, article.getTimestamp()); wikiReverseIndexOutput.put(grm, new CountAndSet(Integer.toString(article.getId()))); // Create mutations for the metadata table. MutationInfo mm = new MutationInfo(index.getKey(), METADATA_INDEX_COLUMN_FAMILY, language + NULL_BYTE + LcNoDiacriticsNormalizer.class.getName(), cv, article.getTimestamp()); wikiMetadataOutput.put(mm, NULL_VALUE); } // Add the entire text to the document section of the table. // row is the partition, colf is 'd', colq is language\0articleid, value is Base64 encoded GZIP'd document m.put(DOCUMENT_COLUMN_FAMILY, colfPrefix + article.getId(), cv, article.getTimestamp(), new Value(Base64.encodeBase64(article.getText().getBytes()))); context.write(tablename, m); } else { context.getCounter("wikipedia", "invalid articles").increment(1); } context.progress(); }
From source file:com.palantir.atlasdb.keyvalue.rocksdb.impl.RocksDbKeyValueService.java
@Override public void delete(String tableName, Multimap<Cell, Long> keys) { try (Disposer d = new Disposer(); ColumnFamily table = columnFamilies.get(tableName)) { WriteOptions options = d.register(new WriteOptions().setSync(writeOptions.fsyncPut())); WriteBatch batch = d.register(new WriteBatch()); for (Entry<Cell, Long> entry : keys.entries()) { byte[] key = RocksDbKeyValueServices.getKey(entry.getKey(), entry.getValue()); batch.remove(table.getHandle(), key); }/*w w w.jav a 2s.c om*/ getDb().write(options, batch); } catch (RocksDBException e) { throw Throwables.propagate(e); } }