List of usage examples for com.google.common.collect Multimap values
Collection<V> values();
From source file:org.killbill.billing.plugin.analytics.dao.factory.BusinessInvoiceFactory.java
/** * Create current business invoices and invoice items. * * @return all business invoice and invoice items to create * @throws org.killbill.billing.plugin.analytics.AnalyticsRefreshException *///from ww w . j a va 2 s . c o m public Map<BusinessInvoiceModelDao, Collection<BusinessInvoiceItemBaseModelDao>> createBusinessInvoicesAndInvoiceItems( final BusinessContextFactory businessContextFactory) throws AnalyticsRefreshException { // Pre-fetch these, to avoid contention on BusinessContextFactory final Account account = businessContextFactory.getAccount(); final Long accountRecordId = businessContextFactory.getAccountRecordId(); final Long tenantRecordId = businessContextFactory.getTenantRecordId(); final ReportGroup reportGroup = businessContextFactory.getReportGroup(); final CurrencyConverter currencyConverter = businessContextFactory.getCurrencyConverter(); // Lookup the invoices for that account final Iterable<Invoice> invoices = businessContextFactory.getAccountInvoices(); // All invoice items across all invoices for that account (we need to be able to reference items across multiple invoices) final Multimap<UUID, InvoiceItem> allInvoiceItems = ArrayListMultimap.<UUID, InvoiceItem>create(); // Convenient mapping invoiceId -> invoice final Map<UUID, Invoice> invoiceIdToInvoiceMappings = new LinkedHashMap<UUID, Invoice>(); for (final Invoice invoice : invoices) { invoiceIdToInvoiceMappings.put(invoice.getId(), invoice); allInvoiceItems.get(invoice.getId()).addAll(invoice.getInvoiceItems()); } // Lookup once all SubscriptionBundle for that account (this avoids expensive lookups for each item) final Iterable<SubscriptionBundle> bundlesForAccount = businessContextFactory.getAccountBundles(); final Map<UUID, SubscriptionBundle> bundles = new LinkedHashMap<UUID, SubscriptionBundle>(); for (final SubscriptionBundle bundle : bundlesForAccount) { bundles.put(bundle.getId(), bundle); } // Create the business invoice items // We build them in parallel as invoice items are directly proportional to subscriptions (@see BusinessSubscriptionTransitionFactory) final CompletionService<BusinessInvoiceItemBaseModelDao> completionService = new ExecutorCompletionService<BusinessInvoiceItemBaseModelDao>( executor); final Multimap<UUID, BusinessInvoiceItemBaseModelDao> businessInvoiceItemsForInvoiceId = ArrayListMultimap .<UUID, BusinessInvoiceItemBaseModelDao>create(); for (final InvoiceItem invoiceItem : allInvoiceItems.values()) { // Fetch audit logs in the main thread as AccountAuditLogs is not thread safe final AuditLog creationAuditLog = invoiceItem.getId() != null ? businessContextFactory.getInvoiceItemCreationAuditLog(invoiceItem.getId()) : null; completionService.submit(new Callable<BusinessInvoiceItemBaseModelDao>() { @Override public BusinessInvoiceItemBaseModelDao call() throws Exception { return createBusinessInvoiceItem(businessContextFactory, invoiceItem, allInvoiceItems, invoiceIdToInvoiceMappings, account, bundles, currencyConverter, creationAuditLog, accountRecordId, tenantRecordId, reportGroup); } }); } for (int i = 0; i < allInvoiceItems.values().size(); ++i) { try { final BusinessInvoiceItemBaseModelDao businessInvoiceItemModelDao = completionService.take().get(); if (businessInvoiceItemModelDao != null) { businessInvoiceItemsForInvoiceId.get(businessInvoiceItemModelDao.getInvoiceId()) .add(businessInvoiceItemModelDao); } } catch (InterruptedException e) { throw new AnalyticsRefreshException(e); } catch (ExecutionException e) { throw new AnalyticsRefreshException(e); } } // Now, create the business invoices final Map<BusinessInvoiceModelDao, Collection<BusinessInvoiceItemBaseModelDao>> businessRecords = new HashMap<BusinessInvoiceModelDao, Collection<BusinessInvoiceItemBaseModelDao>>(); for (final Invoice invoice : invoices) { final Collection<BusinessInvoiceItemBaseModelDao> businessInvoiceItems = businessInvoiceItemsForInvoiceId .get(invoice.getId()); if (businessInvoiceItems == null) { continue; } final Long invoiceRecordId = businessContextFactory.getInvoiceRecordId(invoice.getId()); final AuditLog creationAuditLog = businessContextFactory.getInvoiceCreationAuditLog(invoice.getId()); final BusinessInvoiceModelDao businessInvoice = new BusinessInvoiceModelDao(account, accountRecordId, invoice, invoiceRecordId, currencyConverter, creationAuditLog, tenantRecordId, reportGroup); businessRecords.put(businessInvoice, businessInvoiceItems); } return businessRecords; }
From source file:org.artifactory.build.BuildServiceImpl.java
@Override public Map<Dependency, FileInfo> getBuildDependenciesFileInfos(Build build) { AqlBase.AndClause<AqlApiBuild> and = AqlApiBuild.and(AqlApiBuild.name().equal(build.getName()), AqlApiBuild.number().equal(build.getNumber())); log.debug("Executing dependencies search for build {}:{}", build.getName(), build.getNumber()); AqlBase buildDependenciesQuery = AqlApiBuild.create().filter(and); buildDependenciesQuery.include(AqlApiBuild.module().dependecy().name(), AqlApiBuild.module().dependecy().item().sha1Actual(), AqlApiBuild.module().dependecy().item().md5Actual(), AqlApiBuild.module().dependecy().item().sha1Orginal(), AqlApiBuild.module().dependecy().item().md5Orginal(), AqlApiBuild.module().dependecy().item().created(), AqlApiBuild.module().dependecy().item().modifiedBy(), AqlApiBuild.module().dependecy().item().createdBy(), AqlApiBuild.module().dependecy().item().updated(), AqlApiBuild.module().dependecy().item().repo(), AqlApiBuild.module().dependecy().item().path(), AqlApiBuild.module().dependecy().item().name(), AqlApiBuild.module().dependecy().item().size() //Ordering by the last updated field, in case of duplicates with the same checksum. ).addSortElement(AqlApiBuild.module().dependecy().item().updated()).asc(); AqlEagerResult<AqlBaseFullRowImpl> results = aqlService.executeQueryEager(buildDependenciesQuery); log.debug("Search returned {} dependencies", results.getSize()); Multimap<String, Dependency> buildDependencies = BuildServiceUtils.getBuildDependencies(build); log.debug("This build contains {} dependencies (taken from build info)", buildDependencies.size()); Map<Dependency, FileInfo> matchedDependencies = matchDependenciesToFileInfos(results.getResults(), buildDependencies);//from w w w.j a va2 s.c o m log.debug("Matched {} build dependencies to actual paths returned by search", matchedDependencies.size()); //Lastly, populate matchedDependencies with all remaining unmatched dependencies with null values to help users //of this function know if all build artifacts were found. log.debug("{} dependencies were not matched to actual paths", buildDependencies.size()); for (Dependency dependency : buildDependencies.values()) { if (!matchedDependencies.containsKey(dependency)) { matchedDependencies.put(dependency, null); } } return matchedDependencies; }
From source file:com.cloudant.sync.datastore.BasicDatastore.java
/** * Removes revisions present in the datastore from the input map. * * @param revisions an multimap from document id to set of revisions. The * map is modified in place for performance consideration. *//*from ww w . jav a 2 s .co m*/ void revsDiffBatch(Multimap<String, String> revisions) { final String sql = String.format( "SELECT docs.docid, revs.revid FROM docs, revs " + "WHERE docs.doc_id = revs.doc_id AND docs.docid IN (%s) AND revs.revid IN (%s) " + "ORDER BY docs.docid", SQLDatabaseUtils.makePlaceholders(revisions.keySet().size()), SQLDatabaseUtils.makePlaceholders(revisions.size())); String[] args = new String[revisions.keySet().size() + revisions.size()]; String[] keys = revisions.keySet().toArray(new String[revisions.keySet().size()]); String[] values = revisions.values().toArray(new String[revisions.size()]); System.arraycopy(keys, 0, args, 0, revisions.keySet().size()); System.arraycopy(values, 0, args, revisions.keySet().size(), revisions.size()); Cursor cursor = null; try { cursor = this.sqlDb.rawQuery(sql, args); while (cursor.moveToNext()) { String docId = cursor.getString(0); String revId = cursor.getString(1); revisions.remove(docId, revId); } } catch (SQLException e) { e.printStackTrace(); } finally { DatabaseUtils.closeCursorQuietly(cursor); } }
From source file:com.kurtraschke.nyctrtproxy.services.TripUpdateProcessor.java
public List<GtfsRealtime.TripUpdate> processFeed(Integer feedId, GtfsRealtime.FeedMessage fm, MatchMetrics totalMetrics) {// w w w .j a v a 2 s . c o m long timestamp = fm.getHeader().getTimestamp(); MatchMetrics feedMetrics = new MatchMetrics(); feedMetrics.reportLatency(timestamp); if (_latencyLimit > 0 && feedMetrics.getLatency() > _latencyLimit) { _log.info("Feed {} ignored, too high latency = {}", feedId, feedMetrics.getLatency()); if (_listener != null) _listener.reportMatchesForSubwayFeed(feedId.toString(), feedMetrics, _cloudwatchNamespace); return Collections.emptyList(); } final Map<String, String> realtimeToStaticRouteMap = _realtimeToStaticRouteMapByFeed.getOrDefault(feedId, Collections.emptyMap()); int nExpiredTus = 0, nTotalRecords = 0; // Read in trip updates per route. Skip trip updates that have too stale of data. Multimap<String, GtfsRealtime.TripUpdate> tripUpdatesByRoute = ArrayListMultimap.create(); for (GtfsRealtime.FeedEntity entity : fm.getEntityList()) { if (entity.hasTripUpdate()) { GtfsRealtime.TripUpdate tu = entity.getTripUpdate(); if (expiredTripUpdate(tu, fm.getHeader().getTimestamp())) { nExpiredTus++; } else { String routeId = tu.getTrip().getRouteId(); routeId = realtimeToStaticRouteMap.getOrDefault(routeId, routeId); tripUpdatesByRoute.put(routeId, tu); } nTotalRecords++; } } reportRecordsIn(nTotalRecords, nExpiredTus, totalMetrics, feedMetrics); List<GtfsRealtime.TripUpdate> ret = Lists.newArrayList(); for (GtfsRealtimeNYCT.TripReplacementPeriod trp : fm.getHeader() .getExtension(GtfsRealtimeNYCT.nyctFeedHeader).getTripReplacementPeriodList()) { if (_routeBlacklistByFeed.getOrDefault(feedId, Collections.emptySet()).contains(trp.getRouteId())) continue; GtfsRealtime.TimeRange range = trp.getReplacementPeriod(); Date start = range.hasStart() ? new Date(range.getStart() * 1000) : earliestTripStart(tripUpdatesByRoute.values()); Date end = range.hasEnd() ? new Date(range.getEnd() * 1000) : new Date(fm.getHeader().getTimestamp() * 1000); // All route IDs in this trip replacement period Set<String> routeIds = Arrays.stream(trp.getRouteId().split(", ?")) .map(routeId -> realtimeToStaticRouteMap.getOrDefault(routeId, routeId)) .collect(Collectors.toSet()); for (String routeId : routeIds) { String newRouteId = _addToTripReplacementPeriodByRoute.get(routeId); if (newRouteId != null) routeIds.add(newRouteId); } // Kurt's trip matching algorithm (ActivatedTripMatcher) requires calculating currently-active static trips at this point. _tripMatcher.initForFeed(start, end, routeIds); for (String routeId : routeIds) { MatchMetrics routeMetrics = new MatchMetrics(); Multimap<String, TripMatchResult> matchesByTrip = ArrayListMultimap.create(); Collection<GtfsRealtime.TripUpdate> tripUpdates = tripUpdatesByRoute.get(routeId); routeMetrics.reportRecordsIn(tripUpdates.size()); for (GtfsRealtime.TripUpdate tu : tripUpdates) { GtfsRealtime.TripUpdate.Builder tub = GtfsRealtime.TripUpdate.newBuilder(tu); GtfsRealtime.TripDescriptor.Builder tb = tub.getTripBuilder(); // rewrite route ID for some routes tb.setRouteId(realtimeToStaticRouteMap.getOrDefault(tb.getRouteId(), tb.getRouteId())); // remove timepoints not in GTFS... in some cases this means there may be no STUs left (ex. H shuttle at H19S.) removeTimepoints(tub); // get ID which consists of route, direction, origin-departure time, possibly a path identifier (for feed 1.) NyctTripId rtid = NyctTripId.buildFromTripDescriptor(tb, _routesWithReverseRTDirections); // If we were able to parse the trip ID, there are various fixes // we may need to apply. if (rtid != null) { // Fix stop IDs which don't include direction tub.getStopTimeUpdateBuilderList().forEach(stub -> { if (!(stub.getStopId().endsWith("N") || stub.getStopId().endsWith("S"))) { stub.setStopId(stub.getStopId() + rtid.getDirection()); } else if (_routesWithReverseRTDirections.contains(tb.getRouteId())) { String stopId = stub.getStopId(); stub.setStopId(stopId.substring(0, stopId.length() - 1) + rtid.getDirection()); } if (_stopIdTransformStrategy != null) { String stopId = stub.getStopId(); stopId = _stopIdTransformStrategy.transform(rtid.getRouteId(), rtid.getDirection(), stopId); stub.setStopId(stopId); } }); // Re-set the trip ID to the parsed trip ID; coerces IDs to a uniform format. // If the trip is matched, the ID will be rewritten again to the corresponding static trip ID below. tb.setTripId(rtid.toString()); } else { _log.error("invalid trip_id={} train_id={}", tb.getTripId(), tb.getExtension(GtfsRealtimeNYCT.nyctTripDescriptor).getTrainId()); } // Some routes have start date set incorrectly if (tb.getStartDate().length() > 8) { tb.setStartDate(fixedStartDate(tb)); } TripMatchResult result = _tripMatcher.match(tub, rtid, fm.getHeader().getTimestamp()); matchesByTrip.put(result.getTripId(), result); } // For TUs that match to same trip - possible they should be merged (route D has mid-line relief points where trip ID changes) // If they are NOT merged, then drop the matches for the worse ones for (Collection<TripMatchResult> matches : matchesByTrip.asMap().values()) { if (!tryMergeResult(matches) && matches.size() > 1 && !_allowDuplicates) { List<TripMatchResult> dups = new ArrayList<>(matches); dups.sort(Collections.reverseOrder()); TripMatchResult best = dups.get(0); for (int i = 1; i < dups.size(); i++) { TripMatchResult result = dups.get(i); _log.debug( "dropping duplicate in static trip={}, RT trip={} ({}). Better trip is {} ({})", best.getTripId(), result.getRtTripId(), result.getStatus(), best.getRtTripId(), best.getStatus()); result.setStatus(Status.NO_MATCH); result.setResult(null); } } } Set<String> matchedTripIds = new HashSet<>(); // Read out results of matching. If there is a match, rewrite TU's trip ID. Add TU to return list. for (TripMatchResult result : matchesByTrip.values()) { if (!result.getStatus().equals(Status.MERGED)) { GtfsRealtime.TripUpdate.Builder tub = result.getTripUpdateBuilder(); GtfsRealtime.TripDescriptor.Builder tb = tub.getTripBuilder(); if (result.hasResult() && (result.getTripUpdate().getStopTimeUpdateCount() == 0 || !result.stopsMatchToEnd())) { _log.info("no stop match rt={} static={} {}", result.getTripUpdate().getTrip().getTripId(), result.getResult().getTrip().getId().getId(), (result.getResult().getStopTimes().get(0).getDepartureTime() / 60) * 100); result.setStatus(Status.NO_MATCH); result.setResult(null); } if (result.hasResult()) { ActivatedTrip at = result.getResult(); String staticTripId = at.getTrip().getId().getId(); _log.debug("matched {} -> {}", tb.getTripId(), staticTripId); tb.setTripId(staticTripId); removeTimepoints(at, tub); matchedTripIds.add(staticTripId); } else { _log.debug("unmatched: {} due to {}", tub.getTrip().getTripId(), result.getStatus()); tb.setScheduleRelationship(GtfsRealtime.TripDescriptor.ScheduleRelationship.ADDED); // ignore ADDED trips without stops if (tub.getStopTimeUpdateCount() == 0) continue; // Trip Headsign and direction String stopId = result.getRtLastStop(); String tripHeadsign = _tripActivator.getStopNameForId(stopId); String nsDirection = NyctTripId .buildFromTripDescriptor(tub.getTrip(), _routesWithReverseRTDirections) .getDirection(); String tripDirection = "S".equals(nsDirection) ? "1" : "0"; GtfsRealtimeOneBusAway.OneBusAwayTripUpdate.Builder obaTripUpdate = GtfsRealtimeOneBusAway.OneBusAwayTripUpdate .newBuilder(); if (StringUtils.isNotBlank(tripHeadsign)) { obaTripUpdate.setTripHeadsign(tripHeadsign); //Stop Headsign if (_directionsService != null) _directionsService.fillStopHeadSigns(tub.getStopTimeUpdateBuilderList()); } obaTripUpdate.setTripDirection(tripDirection); tub.setExtension(GtfsRealtimeOneBusAway.obaTripUpdate, obaTripUpdate.build()); } tub.setTimestamp(timestamp); TripUpdate tripUpdate = tub.build(); ret.add(tripUpdate); } routeMetrics.add(result); feedMetrics.add(result); totalMetrics.add(result); } if (_cancelUnmatchedTrips) { Iterator<ActivatedTrip> staticTrips = _tripActivator .getTripsForRangeAndRoute(start, end, routeId).iterator(); while (staticTrips.hasNext()) { ActivatedTrip at = staticTrips.next(); if (!matchedTripIds.contains(at.getTrip().getId().getId())) { long time = fm.getHeader().getTimestamp(); if (at.activeFor(trp, time)) { TripUpdate.Builder tub = TripUpdate.newBuilder(); TripDescriptor.Builder tdb = tub.getTripBuilder(); tdb.setTripId(at.getTrip().getId().getId()); tdb.setRouteId(at.getTrip().getRoute().getId().getId()); tdb.setStartDate(at.getServiceDate().getAsString()); tdb.setScheduleRelationship(ScheduleRelationship.CANCELED); ret.add(tub.build()); routeMetrics.addCancelled(); feedMetrics.addCancelled(); totalMetrics.addCancelled(); } } } } if (_listener != null) _listener.reportMatchesForRoute(routeId, routeMetrics, _cloudwatchNamespace); } } if (_listener != null) _listener.reportMatchesForSubwayFeed(feedId.toString(), feedMetrics, _cloudwatchNamespace); _log.info("feed={}, expired TUs={}", feedId, nExpiredTus); return ret; }
From source file:org.eclipse.papyrus.uml.diagram.activity.activitygroup.GroupRequestAdvisor.java
protected Multimap<EReference, IGroupNotifier> fillReqestWithReferendedElement(IGroupRequest request, boolean lookingForParent, boolean onlyContainment) { final Rectangle newBounds = getInitalTargetRequestNewBounds(request); final Multimap<EReference, IGroupNotifier> result = ArrayListMultimap.create(); if (request.getNodeDescpitor() == null) { return result; }//from ww w . j a v a2 s . com List<EReference> references = null; if (lookingForParent) { references = request.getNodeDescpitor().getParentReferences(); } else { references = request.getNodeDescpitor().getChildrenReferences(); } final Multimap<EReference, IGroupNotifier> auxResult = ArrayListMultimap.create(); final Multimap<EReference, Element> eReferenceLookedForMap = ArrayListMultimap.create(); getReferenceElements(request, newBounds, references, eReferenceLookedForMap, auxResult, lookingForParent, onlyContainment, lookingForParent ? request.getNodeDescpitor().getParentEOppositeReferences() : null); /* * Filter ancestors */ for (EReference ref : eReferenceLookedForMap.keySet()) { /* * Filter descendant * Example : * 1 - ActPart1 include in Act1 then Act1 disappear * 2 - ActPart1 include in ActPart2 then ActPart1 disappear */ Object adapter = request.getTargetElement().getAdapter(EObject.class); if (adapter instanceof Element) { Element element = (Element) adapter; Predicate<Element> composedPredicate = Predicates.and(new SameContainerFilter(element), lookingForParent ? new DescendantsFilter(eReferenceLookedForMap.values()) : new AncestorFilter(eReferenceLookedForMap.values())); Collection<Element> filteredCollection = Collections2.filter(eReferenceLookedForMap.get(ref), composedPredicate); if (lookingForParent) { request.getParentEReferenceMap().putAll(ref, filteredCollection); } else { request.getChildrenEReferenceMap().putAll(ref, filteredCollection); } } } for (EReference ref : auxResult.keySet()) { /* * Filter descendant * Example : * 1 - ActPart1 include in Act1 then Act1 disappear * 2 - ActPart1 include in ActPart2 then ActPart1 disappear */ Iterable<IGroupNotifier> resultCollection = Iterables.filter(auxResult.get(ref), new DescendantsFilterIGroupNotifier(auxResult.values())); result.putAll(ref, resultCollection); } return result; }
From source file:ca.sqlpower.object.annotation.SPAnnotationProcessor.java
/** * Generates and returns source code for importing packages that are * required by the persister helper this class is generating. * //from w w w .j av a 2s .co m * @param visitedClass * The {@link SPObject} class that is being visited by the * annotation processor. * @param constructorImports * The {@link Set} of packages that visitedClass uses in its * {@link Constructor} annotated constructor and need to be * imported. * @param mutatorImports * The {@link Multimap} of setter methods to packages that * visitedClass uses in its {@link Mutator} annotated methods and * needs to be imported. * @return The source code for the generated imports. */ private String generateImports(Class<? extends SPObject> visitedClass, Set<String> constructorImports, Multimap<String, String> mutatorImports) { final String helperPackage = visitedClass.getPackage().getName() + "." + PersisterHelperFinder.GENERATED_PACKAGE_NAME; // Using a TreeSet here to sort imports alphabetically. Set<String> allImports = new TreeSet<String>(); if (!Modifier.isAbstract(visitedClass.getModifiers())) { allImports.addAll(constructorImports); } allImports.addAll(mutatorImports.values()); StringBuilder sb = new StringBuilder(); // XXX Need to import any additional classes this generated persister helper // class requires, aside from those needed in visitedClass. allImports.add(List.class.getName()); allImports.add(visitedClass.getName()); allImports.add(SPPersistenceException.class.getName()); allImports.add(SPPersister.class.getName()); allImports.add(SessionPersisterSuperConverter.class.getName()); allImports.add(SPObject.class.getName()); allImports.add(DataType.class.getName()); allImports.addAll(importedClassNames); for (String pkg : allImports) { // No need to import java.lang as it is automatically imported. // No need to import package if the persister helper is already // in the package. // Also want to keep array classes out if (!pkg.startsWith("java.lang") && !pkg.startsWith("[L")) { // Nested classes, enums, etc. will be separated by the "$" // character but we need to change them to "." so it can be // imported correctly. String pkgName = pkg.replaceAll("\\$", "."); // Only import the package if it is not the same one // that the persister helper exists in. int index = pkgName.lastIndexOf("."); if (index == -1) { index = pkgName.length(); } if (!pkgName.substring(0, index).equals(helperPackage)) { niprintln(sb, "import " + pkgName + ";"); } } } return sb.toString(); }
From source file:org.jboss.errai.ioc.rebind.ioc.graph.impl.DependencyGraphBuilderImpl.java
private Injectable resolveDependency(final BaseDependency dep, final Injectable concrete, final Collection<String> problems, final Map<String, Injectable> customProvidedInjectables) { if (dep.injectable.resolution != null) { return dep.injectable.resolution; }/*from w w w . j a v a 2s . c om*/ final Multimap<ResolutionPriority, ConcreteInjectable> resolvedByPriority = HashMultimap.create(); final Queue<AbstractInjectable> resolutionQueue = new LinkedList<AbstractInjectable>(); resolutionQueue.add(dep.injectable); resolutionQueue.add(addMatchingExactTypeInjectables(dep.injectable)); processResolutionQueue(resolutionQueue, resolvedByPriority); // Iterates through priorities from highest to lowest. for (final ResolutionPriority priority : ResolutionPriority.values()) { if (resolvedByPriority.containsKey(priority)) { final Collection<ConcreteInjectable> resolved = resolvedByPriority.get(priority); if (resolved.size() > 1) { problems.add( ambiguousDependencyMessage(dep, concrete, new ArrayList<ConcreteInjectable>(resolved))); return null; } else { Injectable injectable = resolved.iterator().next(); if (injectable.isExtension()) { final ExtensionInjectable providedInjectable = (ExtensionInjectable) injectable; final Collection<Injectable> otherResolvedInjectables = new ArrayList<Injectable>( resolvedByPriority.values()); otherResolvedInjectables.remove(injectable); final InjectionSite site = new InjectionSite(concrete.getInjectedType(), getAnnotated(dep), otherResolvedInjectables); injectable = providedInjectable.provider.getInjectable(site, nameGenerator); customProvidedInjectables.put(injectable.getFactoryName(), injectable); dep.injectable = copyAbstractInjectable(dep.injectable); } return (dep.injectable.resolution = injectable); } } } problems.add(unsatisfiedDependencyMessage(dep, concrete)); return null; }
From source file:org.apache.beam.runners.core.construction.graph.GreedyPipelineFuser.java
/** * Produce the set of sets of {@link CollectionConsumer consumers} that can be fused into a single * {@link ExecutableStage}. This identifies available siblings for sibling fusion. * * <p>For each set in the returned collection, each of {@link CollectionConsumer consumers} * present consumes from the same {@link PCollection} and is compatible, as determined by {@link * GreedyPCollectionFusers#isCompatible(PTransformNode, PTransformNode, QueryablePipeline)}. * * <p>Each input {@link CollectionConsumer} must have an associated {@link Environment}. *///from w w w . ja v a 2 s.c o m private NavigableSet<NavigableSet<CollectionConsumer>> groupSiblings( NavigableSet<CollectionConsumer> newConsumers /* Use a navigable set for consistent iteration order */) { Multimap<SiblingKey, NavigableSet<CollectionConsumer>> compatibleConsumers = HashMultimap.create(); // This is O(N**2) with the number of siblings we consider, which is generally the number of // parallel consumers of a PCollection. This usually is unlikely to be high, // but has potential to be a pretty significant slowdown. for (CollectionConsumer newConsumer : newConsumers) { SiblingKey key = new AutoValue_GreedyPipelineFuser_SiblingKey(newConsumer.consumedCollection(), pipeline.getEnvironment(newConsumer.consumingTransform()).get()); boolean foundSiblings = false; for (Set<CollectionConsumer> existingConsumers : compatibleConsumers.get(key)) { if (existingConsumers.stream().allMatch( // The two consume the same PCollection and can exist in the same stage. collectionConsumer -> GreedyPCollectionFusers.isCompatible( collectionConsumer.consumingTransform(), newConsumer.consumingTransform(), pipeline))) { existingConsumers.add(newConsumer); foundSiblings = true; break; } } if (!foundSiblings) { NavigableSet<CollectionConsumer> newConsumerSet = new TreeSet<>(); newConsumerSet.add(newConsumer); compatibleConsumers.put(key, newConsumerSet); } } // Order sibling sets by their least siblings. This is stable across the order siblings are // generated, given stable IDs. @SuppressWarnings("JdkObsolete") NavigableSet<NavigableSet<CollectionConsumer>> orderedSiblings = new TreeSet<>( Comparator.comparing(NavigableSet::first)); orderedSiblings.addAll(compatibleConsumers.values()); return orderedSiblings; }
From source file:org.apache.cassandra.service.StorageService.java
/** * Remove a node that has died, attempting to restore the replica count. * If the node is alive, decommission should be attempted. If decommission * fails, then removeToken should be called. If we fail while trying to * restore the replica count, finally forceRemoveCompleteion should be * called to forcibly remove the node without regard to replica count. * * @param tokenString token for the node *//*from w ww . java2s. c o m*/ public void removeToken(String tokenString) { InetAddress myAddress = FBUtilities.getLocalAddress(); Token localToken = tokenMetadata_.getToken(myAddress); Token token = partitioner.getTokenFactory().fromString(tokenString); InetAddress endpoint = tokenMetadata_.getEndpoint(token); if (endpoint == null) throw new UnsupportedOperationException("Token not found."); if (endpoint.equals(myAddress)) throw new UnsupportedOperationException("Cannot remove node's own token"); if (Gossiper.instance.getLiveMembers().contains(endpoint)) throw new UnsupportedOperationException("Node " + endpoint + " is alive and owns this token. Use decommission command to remove it from the ring"); // A leaving endpoint that is dead is already being removed. if (tokenMetadata_.isLeaving(endpoint)) logger_.warn("Node " + endpoint + " is already being removed, continuing removal anyway"); if (!replicatingNodes.isEmpty()) throw new UnsupportedOperationException( "This node is already processing a removal. Wait for it to complete, or use 'removetoken force' if this has failed."); // Find the endpoints that are going to become responsible for data for (String table : DatabaseDescriptor.getNonSystemTables()) { // if the replication factor is 1 the data is lost so we shouldn't wait for confirmation if (Table.open(table).getReplicationStrategy().getReplicationFactor() == 1) continue; // get all ranges that change ownership (that is, a node needs // to take responsibility for new range) Multimap<Range, InetAddress> changedRanges = getChangedRangesForLeaving(table, endpoint); IFailureDetector failureDetector = FailureDetector.instance; for (InetAddress ep : changedRanges.values()) { if (failureDetector.isAlive(ep)) replicatingNodes.add(ep); else logger_.warn("Endpoint " + ep + " is down and will not receive data for re-replication of " + endpoint); } } removingNode = endpoint; tokenMetadata_.addLeavingEndpoint(endpoint); calculatePendingRanges(); // bundle two states together. include this nodes state to keep the status quo, // but indicate the leaving token so that it can be dealt with. Gossiper.instance.addLocalApplicationState(ApplicationState.STATUS, valueFactory.removingNonlocal(localToken, token)); // kick off streaming commands restoreReplicaCount(endpoint, myAddress); // wait for ReplicationFinishedVerbHandler to signal we're done while (!replicatingNodes.isEmpty()) { try { Thread.sleep(100); } catch (InterruptedException e) { throw new AssertionError(e); } } excise(token, endpoint); // indicate the token has left Gossiper.instance.addLocalApplicationState(ApplicationState.STATUS, valueFactory.removedNonlocal(localToken, token)); replicatingNodes.clear(); removingNode = null; }
From source file:com.github.pms1.tppt.p2.FeatureXmlComparator.java
private void comparePlugins2(Map<String, Multimap<String, Element>> baseline, Map<String, Multimap<String, Element>> current, ElementDeltaReporter elementDeltaReporter, DeltaReporter deltaReporter) {//from w w w. j av a2 s.c o m for (String id : Sets.union(baseline.keySet(), current.keySet())) { Multimap<String, Element> b = baseline.get(id); if (b == null) b = HashMultimap.create(); else b = HashMultimap.create(b); Multimap<String, Element> c = current.get(id); if (c == null) c = HashMultimap.create(); else c = HashMultimap.create(c); AttributesDeltaReporter r = new AttributesDeltaReporter() { @Override public void removed(String key) { deltaReporter.fileDelta("Plugin {0} attribute {1} removed", id, key); } @Override public void changed(String key, String left, String right) { if (key.equals("version")) { deltaReporter.pluginVersionDelta(id, left, right); } else { deltaReporter.fileDelta("Plugin {0} attribute {1} changed {2} -> {3}", id, key, left, right); } } @Override public void added(String key, String value) { deltaReporter.fileDelta("Plugin {0} attribute {1} / {2} added", id, key, value); } }; Set<String> intersection = new HashSet<>(b.keys()); intersection.retainAll(c.keys()); for (String v : intersection) { Collection<Element> be = b.get(v); Collection<Element> ce = c.get(v); if (be.size() == 1 && ce.size() == 1) { compareAttributes(Iterables.getOnlyElement(be), Iterables.getOnlyElement(ce), r); b.removeAll(v); c.removeAll(v); } } if (b.size() == 1 && c.size() == 1) { compareAttributes(Iterables.getOnlyElement(b.values()), Iterables.getOnlyElement(c.values()), r); } else { for (Element e : b.values()) deltaReporter.fileDelta("Plugin removed: {0}", domRenderer.render(e)); for (Element e : c.values()) deltaReporter.fileDelta("Plugin added: {0}", domRenderer.render(e)); } } }