Example usage for com.google.common.collect Multimap containsKey

List of usage examples for com.google.common.collect Multimap containsKey

Introduction

In this page you can find the example usage for com.google.common.collect Multimap containsKey.

Prototype

boolean containsKey(@Nullable Object key);

Source Link

Document

Returns true if this multimap contains at least one key-value pair with the key key .

Usage

From source file:com.github.rinde.rinsim.central.rt.ScheduleUtil.java

static List<List<Parcel>> fixSchedule(ImmutableList<ImmutableList<Parcel>> schedule, GlobalStateObject state) {

    checkArgument(schedule.size() == state.getVehicles().size(),
            "The number of routes (%s) and the number of vehicles (%s) must " + "be equal.", schedule.size(),
            state.getVehicles().size());
    checkArgument(!state.getVehicles().get(0).getRoute().isPresent(),
            "A state object without routes is expected.");

    // only parcels in this set may occur in the schedule
    final Set<Parcel> undeliveredParcels = new HashSet<>();
    undeliveredParcels.addAll(state.getAvailableParcels());

    // for each vehicle, we create a multiset that is a representation of the
    // number of times the occurrence of a parcel is REQUIRED to be in the
    // route of the vehicle
    final List<Multiset<Parcel>> expectedRoutes = new ArrayList<>();
    for (int i = 0; i < state.getVehicles().size(); i++) {
        expectedRoutes.add(HashMultiset.<Parcel>create());
        final VehicleStateObject vehicle = state.getVehicles().get(i);
        expectedRoutes.get(i).addAll(vehicle.getContents());
        if (vehicle.getDestination().isPresent()
                && !vehicle.getContents().contains(vehicle.getDestination().get())) {
            expectedRoutes.get(i).add(vehicle.getDestination().get(), 2);
        }/*from  w w  w  .java  2 s. c  o m*/
        undeliveredParcels.addAll(vehicle.getContents());
    }

    // create map of parcel -> vehicle index
    final Multimap<Parcel, Integer> parcelOwner = LinkedHashMultimap.create();
    for (int i = 0; i < schedule.size(); i++) {
        final List<Parcel> route = schedule.get(i);
        final Set<Parcel> routeSet = ImmutableSet.copyOf(route);
        for (final Parcel p : routeSet) {
            parcelOwner.put(p, i);
        }
    }

    // copy schedule into a modifiable structure
    final List<List<Parcel>> newSchedule = new ArrayList<>();
    for (final ImmutableList<Parcel> route : schedule) {
        newSchedule.add(new ArrayList<>(route));
    }
    // compare with current vehicle cargo
    for (int i = 0; i < state.getVehicles().size(); i++) {
        final VehicleStateObject vehicle = state.getVehicles().get(i);
        final Multiset<Parcel> routeSet = ImmutableMultiset.copyOf(schedule.get(i));

        final Set<Parcel> test = Sets.union(routeSet.elementSet(), expectedRoutes.get(i).elementSet());

        for (final Parcel p : test) {
            final int actualOccurences = routeSet.count(p);
            checkState(actualOccurences <= 2);
            final int expectedOccurrences = expectedRoutes.get(i).count(p);

            if (!undeliveredParcels.contains(p)) {
                // it is already delivered, remove all occurrences
                newSchedule.get(i).removeAll(Collections.singleton(p));
            } else if (actualOccurences != expectedOccurrences && expectedOccurrences > 0) {
                if (expectedOccurrences == 1 && actualOccurences == 2) {
                    newSchedule.get(i).remove(p);
                } else {
                    // expected occurr = 1 or 2
                    final boolean destinationIsCurrent = vehicle.getDestination().asSet().contains(p);

                    int toAdd = expectedOccurrences - actualOccurences;

                    // add it once at the front of the route
                    if (destinationIsCurrent) {
                        newSchedule.get(i).add(0, p);
                        toAdd--;
                    }

                    // add it once to the end of the route
                    if (toAdd > 0) {
                        newSchedule.get(i).add(p);
                    }
                }
            }

            // if the parcel is expected in the current vehicle, but it also appears
            // in (an) other vehicle(s), we have to remove it there
            if (expectedOccurrences > 0 && parcelOwner.containsKey(p)) {
                for (final Integer v : parcelOwner.get(p)) {
                    if (!v.equals(i)) {
                        newSchedule.get(v).removeAll(Collections.singleton(p));
                    }
                }
            }
        }

        if (vehicle.getDestination().isPresent()
                && !newSchedule.get(i).get(0).equals(vehicle.getDestination().get())) {
            newSchedule.get(i).remove(vehicle.getDestination().get());
            newSchedule.get(i).add(0, vehicle.getDestination().get());
        }
    }
    return newSchedule;
}

From source file:org.apache.druid.java.util.http.client.NettyHttpClient.java

@Override
public <Intermediate, Final> ListenableFuture<Final> go(final Request request,
        final HttpResponseHandler<Intermediate, Final> handler, final Duration requestReadTimeout) {
    final HttpMethod method = request.getMethod();
    final URL url = request.getUrl();
    final Multimap<String, String> headers = request.getHeaders();

    final String requestDesc = StringUtils.format("%s %s", method, url);
    if (log.isDebugEnabled()) {
        log.debug("[%s] starting", requestDesc);
    }/* w  ww.  j a  v  a2  s.  c om*/

    // Block while acquiring a channel from the pool, then complete the request asynchronously.
    final Channel channel;
    final String hostKey = getPoolKey(url);
    final ResourceContainer<ChannelFuture> channelResourceContainer = pool.take(hostKey);
    final ChannelFuture channelFuture = channelResourceContainer.get().awaitUninterruptibly();
    if (!channelFuture.isSuccess()) {
        channelResourceContainer.returnResource(); // Some other poor sap will have to deal with it...
        return Futures.immediateFailedFuture(
                new ChannelException("Faulty channel in resource pool", channelFuture.getCause()));
    } else {
        channel = channelFuture.getChannel();

        // In case we get a channel that never had its readability turned back on.
        channel.setReadable(true);
    }
    final String urlFile = StringUtils.nullToEmptyNonDruidDataString(url.getFile());
    final HttpRequest httpRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, method,
            urlFile.isEmpty() ? "/" : urlFile);

    if (!headers.containsKey(HttpHeaders.Names.HOST)) {
        httpRequest.headers().add(HttpHeaders.Names.HOST, getHost(url));
    }

    // If Accept-Encoding is set in the Request, use that. Otherwise use the default from "compressionCodec".
    if (!headers.containsKey(HttpHeaders.Names.ACCEPT_ENCODING)) {
        httpRequest.headers().set(HttpHeaders.Names.ACCEPT_ENCODING, compressionCodec.getEncodingString());
    }

    for (Map.Entry<String, Collection<String>> entry : headers.asMap().entrySet()) {
        String key = entry.getKey();

        for (String obj : entry.getValue()) {
            httpRequest.headers().add(key, obj);
        }
    }

    if (request.hasContent()) {
        httpRequest.setContent(request.getContent());
    }

    final long readTimeout = getReadTimeout(requestReadTimeout);
    final SettableFuture<Final> retVal = SettableFuture.create();

    if (readTimeout > 0) {
        channel.getPipeline().addLast(READ_TIMEOUT_HANDLER_NAME,
                new ReadTimeoutHandler(timer, readTimeout, TimeUnit.MILLISECONDS));
    }

    channel.getPipeline().addLast(LAST_HANDLER_NAME, new SimpleChannelUpstreamHandler() {
        private volatile ClientResponse<Intermediate> response = null;

        // Chunk number most recently assigned.
        private long currentChunkNum = 0;

        // Suspend and resume watermarks (respectively: last chunk number that triggered a suspend, and that was
        // provided to the TrafficCop's resume method). Synchronized access since they are not always accessed
        // from an I/O thread. (TrafficCops can be called from any thread.)
        private final Object watermarkLock = new Object();
        private long suspendWatermark = -1;
        private long resumeWatermark = -1;

        @Override
        public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
            if (log.isDebugEnabled()) {
                log.debug("[%s] messageReceived: %s", requestDesc, e.getMessage());
            }
            try {
                Object msg = e.getMessage();

                if (msg instanceof HttpResponse) {
                    HttpResponse httpResponse = (HttpResponse) msg;
                    if (log.isDebugEnabled()) {
                        log.debug("[%s] Got response: %s", requestDesc, httpResponse.getStatus());
                    }

                    HttpResponseHandler.TrafficCop trafficCop = resumeChunkNum -> {
                        synchronized (watermarkLock) {
                            resumeWatermark = Math.max(resumeWatermark, resumeChunkNum);

                            if (suspendWatermark >= 0 && resumeWatermark >= suspendWatermark) {
                                suspendWatermark = -1;
                                channel.setReadable(true);
                                long backPressureDuration = System.nanoTime() - backPressureStartTimeNs;
                                log.debug("[%s] Resumed reads from channel (chunkNum = %,d).", requestDesc,
                                        resumeChunkNum);
                                return backPressureDuration;
                            }
                        }

                        return 0; //If we didn't resume, don't know if backpressure was happening
                    };
                    response = handler.handleResponse(httpResponse, trafficCop);
                    if (response.isFinished()) {
                        retVal.set((Final) response.getObj());
                    }

                    assert currentChunkNum == 0;
                    possiblySuspendReads(response);

                    if (!httpResponse.isChunked()) {
                        finishRequest();
                    }
                } else if (msg instanceof HttpChunk) {
                    HttpChunk httpChunk = (HttpChunk) msg;
                    if (log.isDebugEnabled()) {
                        log.debug("[%s] Got chunk: %sB, last=%s", requestDesc,
                                httpChunk.getContent().readableBytes(), httpChunk.isLast());
                    }

                    if (httpChunk.isLast()) {
                        finishRequest();
                    } else {
                        response = handler.handleChunk(response, httpChunk, ++currentChunkNum);
                        if (response.isFinished() && !retVal.isDone()) {
                            retVal.set((Final) response.getObj());
                        }
                        possiblySuspendReads(response);
                    }
                } else {
                    throw new IllegalStateException(
                            StringUtils.format("Unknown message type[%s]", msg.getClass()));
                }
            } catch (Exception ex) {
                log.warn(ex, "[%s] Exception thrown while processing message, closing channel.", requestDesc);

                if (!retVal.isDone()) {
                    retVal.set(null);
                }
                channel.close();
                channelResourceContainer.returnResource();

                throw ex;
            }
        }

        private void possiblySuspendReads(ClientResponse<?> response) {
            if (!response.isContinueReading()) {
                synchronized (watermarkLock) {
                    suspendWatermark = Math.max(suspendWatermark, currentChunkNum);
                    if (suspendWatermark > resumeWatermark) {
                        channel.setReadable(false);
                        backPressureStartTimeNs = System.nanoTime();
                        log.debug("[%s] Suspended reads from channel (chunkNum = %,d).", requestDesc,
                                currentChunkNum);
                    }
                }
            }
        }

        private void finishRequest() {
            ClientResponse<Final> finalResponse = handler.done(response);

            if (!finalResponse.isFinished() || !finalResponse.isContinueReading()) {
                throw new ISE(
                        "[%s] Didn't get a completed ClientResponse Object from [%s] (finished = %s, continueReading = %s)",
                        requestDesc, handler.getClass(), finalResponse.isFinished(),
                        finalResponse.isContinueReading());
            }
            if (!retVal.isDone()) {
                retVal.set(finalResponse.getObj());
            }
            removeHandlers();
            channel.setReadable(true);
            channelResourceContainer.returnResource();
        }

        @Override
        public void exceptionCaught(ChannelHandlerContext context, ExceptionEvent event) {
            if (log.isDebugEnabled()) {
                final Throwable cause = event.getCause();
                if (cause == null) {
                    log.debug("[%s] Caught exception", requestDesc);
                } else {
                    log.debug(cause, "[%s] Caught exception", requestDesc);
                }
            }

            retVal.setException(event.getCause());
            // response is non-null if we received initial chunk and then exception occurs
            if (response != null) {
                handler.exceptionCaught(response, event.getCause());
            }
            try {
                if (channel.isOpen()) {
                    channel.close();
                }
            } catch (Exception e) {
                log.warn(e, "Error while closing channel");
            } finally {
                channelResourceContainer.returnResource();
            }
        }

        @Override
        public void channelDisconnected(ChannelHandlerContext context, ChannelStateEvent event) {
            if (log.isDebugEnabled()) {
                log.debug("[%s] Channel disconnected", requestDesc);
            }
            // response is non-null if we received initial chunk and then exception occurs
            if (response != null) {
                handler.exceptionCaught(response, new ChannelException("Channel disconnected"));
            }
            channel.close();
            channelResourceContainer.returnResource();
            if (!retVal.isDone()) {
                log.warn("[%s] Channel disconnected before response complete", requestDesc);
                retVal.setException(new ChannelException("Channel disconnected"));
            }
        }

        private void removeHandlers() {
            if (readTimeout > 0) {
                channel.getPipeline().remove(READ_TIMEOUT_HANDLER_NAME);
            }
            channel.getPipeline().remove(LAST_HANDLER_NAME);
        }
    });

    channel.write(httpRequest).addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) {
            if (!future.isSuccess()) {
                channel.close();
                channelResourceContainer.returnResource();
                if (!retVal.isDone()) {
                    retVal.setException(new ChannelException(
                            StringUtils.format("[%s] Failed to write request to channel", requestDesc),
                            future.getCause()));
                }
            }
        }
    });

    return retVal;
}

From source file:com.b2international.snowowl.snomed.validation.detail.SnomedValidationIssueDetailExtension.java

private void extendIssueDetails(BranchContext context, Collection<ValidationIssue> issues) {
    final RevisionSearcher searcher = context.service(RevisionSearcher.class);

    final Multimap<String, ValidationIssue> issuesByComponentId = Multimaps.index(issues,
            issue -> issue.getAffectedComponent().getComponentId());

    final Multimap<ComponentCategory, String> issueComponentIdsByComponentCategory = HashMultimap.create();
    issues.stream().forEach(issue -> {
        final ComponentCategory componentCategory = getComponentCategory(
                issue.getAffectedComponent().getTerminologyComponentId());
        issueComponentIdsByComponentCategory.put(componentCategory,
                issue.getAffectedComponent().getComponentId());
    });//w ww.  j  a v  a  2  s.  c  om

    final Multimap<String, String> issueIdsByConceptIds = HashMultimap.create();
    final Set<String> alreadyFetchedConceptIds = Sets.newHashSet();
    for (ComponentCategory category : issueComponentIdsByComponentCategory.keySet()) {
        final Query<String[]> query = buildQuery(category, issueComponentIdsByComponentCategory.get(category));

        for (Hits<String[]> hits : searcher.scroll(query)) {
            for (String[] hit : hits) {
                String id = hit[0];
                String status = hit[1];
                String moduleId = hit[2];
                issuesByComponentId.get(id).forEach(validationIssue -> {
                    validationIssue.setDetails(COMPONENT_STATUS, status);
                    validationIssue.setDetails(COMPONENT_MODULE_ID, moduleId);
                    if (CONCEPT == category) {
                        validationIssue.setDetails(CONCEPT_STATUS, status);
                        validationIssue.setDetails(SnomedDocument.Fields.EFFECTIVE_TIME,
                                Long.parseLong(hit[3]));
                        alreadyFetchedConceptIds.add(id);
                    } else if (DESCRIPTION == category || RELATIONSHIP == category) {
                        validationIssue.setDetails(SnomedDocument.Fields.EFFECTIVE_TIME,
                                Long.parseLong(hit[3]));
                        final String containerConceptId = hit[4];
                        if (!Strings.isNullOrEmpty(containerConceptId)
                                && (!issueIdsByConceptIds.containsKey(containerConceptId)
                                        || !alreadyFetchedConceptIds.contains(containerConceptId))) {
                            issueIdsByConceptIds.put(containerConceptId, id);
                        }
                        // in case of description just add the already fetched term as label to the issue, concepts and relationship will get their 
                        if (DESCRIPTION == category) {
                            validationIssue.setAffectedComponentLabels(Collections.singletonList(hit[5]));
                        }
                    }
                });
            }
        }
    }

    if (!issueIdsByConceptIds.isEmpty()) {
        final Query<String[]> conceptStatusQuery = Query.select(String[].class)
                .from(SnomedConceptDocument.class)
                .fields(SnomedConceptDocument.Fields.ID, SnomedConceptDocument.Fields.ACTIVE)
                .where(SnomedConceptDocument.Expressions.ids(issueIdsByConceptIds.keySet())).limit(SCROLL_SIZE)
                .build();

        for (Hits<String[]> hits : searcher.scroll(conceptStatusQuery)) {
            for (String[] hit : hits) {
                Collection<String> issueIds = issueIdsByConceptIds.get(hit[0]);
                issueIds.stream().forEach(id -> {
                    issuesByComponentId.get(id)
                            .forEach(validationIssue -> validationIssue.setDetails(CONCEPT_STATUS, hit[1]));
                });
            }
        }
    }
}

From source file:de.hzi.helmholtz.Compare.PathwayComparisonUsingModules.java

public String getmax(Collection<Multimap<Double, String>> collection,
        Multimap<Integer, Multimap<Double, String>> reverse) {
    String geneToBeCompared = "";
    String maxScoringGene = ""; // can be one gene or combination of many genes
    double max = 0;
    boolean isGenePresent = true;

    if (collection.size() > 0) {
        while (isGenePresent) {
            for (Multimap<Double, String> gene : collection) {
                try {
                    max = gene.keySet().iterator().next();
                    finalscore = max;// ww  w  . j a  v  a2 s  .  c  o  m
                    for (String geneCombinationWithBestScore : gene.get(max)) {
                        if (geneCombinationWithBestScore.contains("+")) {
                            String[] individualGenes = geneCombinationWithBestScore.split("\\+");
                            for (String individualGene : individualGenes) {
                                if (reverse.containsKey(Integer.parseInt(individualGene))) {
                                    if (geneToBeCompared.equals("")) {
                                        geneToBeCompared = individualGene;
                                    } else {
                                        geneToBeCompared += "+" + individualGene;
                                    }
                                }
                            }
                        } else {
                            if (reverse.containsKey(Integer.parseInt(geneCombinationWithBestScore))) {
                                if (geneToBeCompared.equals("")) {
                                    geneToBeCompared = geneCombinationWithBestScore;
                                } else {
                                    geneToBeCompared += ";" + geneCombinationWithBestScore;
                                }
                            }
                        }
                    }
                    if (geneToBeCompared.trim().equals("")) {
                        gene.asMap().remove(max);
                    }
                } catch (Exception ds) {
                    ds.printStackTrace();
                }
            }
            if (!geneToBeCompared.trim().equals("")) {
                isGenePresent = false;
            }
            double roundOff = (double) Math.round(max * 100) / 100; // set score precision to two decimal places
            maxScoringGene = roundOff + "=" + geneToBeCompared;
        }
    }
    return maxScoringGene;
}

From source file:org.dllearner.algorithms.qtl.qald.QALDExperiment.java

/**
 * Split the SPARQL query and join the result set of each split. This
 * allows for the execution of more complex queries.
 * @param sparqlQuery/*from w  w  w. ja va2s  .  co m*/
 * @return
 */
private List<String> getResultSplitted(String sparqlQuery) {
    Query query = QueryFactory.create(sparqlQuery);
    logger.trace("Getting result set for\n" + query);

    QueryUtils queryUtils = new QueryUtils();
    Set<Triple> triplePatterns = queryUtils.extractTriplePattern(query);

    // remove triple patterns with unbound object vars
    if (triplePatterns.size() > 10) {
        query = removeUnboundObjectVarTriples(query);
        triplePatterns = queryUtils.extractTriplePattern(query);
    }

    //  Virtuoso bug workaround with literals of type xsd:float and xsd:double
    for (Iterator<Triple> iterator = triplePatterns.iterator(); iterator.hasNext();) {
        Node object = iterator.next().getObject();
        if (object.isLiteral() && object.getLiteralDatatype() != null
                && (object.getLiteralDatatype().equals(XSDDatatype.XSDfloat)
                        || object.getLiteralDatatype().equals(XSDDatatype.XSDdouble))) {
            iterator.remove();
        }
    }

    Var targetVar = query.getProjectVars().get(0); // should be ?x0

    Multimap<Var, Triple> var2TriplePatterns = HashMultimap.create();
    for (Triple tp : triplePatterns) {
        var2TriplePatterns.put(Var.alloc(tp.getSubject()), tp);
    }

    // we keep only the most specific types for each var
    filterOutGeneralTypes(var2TriplePatterns);

    // 1. get the outgoing triple patterns of the target var that do not have
    // outgoing triple patterns
    Set<Triple> fixedTriplePatterns = new HashSet<>();
    Set<Set<Triple>> clusters = new HashSet<>();
    Collection<Triple> targetVarTriplePatterns = var2TriplePatterns.get(targetVar);
    boolean useSplitting = false;
    for (Triple tp : targetVarTriplePatterns) {
        Node object = tp.getObject();
        if (object.isConcrete() || !var2TriplePatterns.containsKey(Var.alloc(object))) {
            fixedTriplePatterns.add(tp);
        } else {
            Set<Triple> cluster = new TreeSet<>((Comparator<Triple>) (o1, o2) -> {
                return ComparisonChain.start().compare(o1.getSubject().toString(), o2.getSubject().toString())
                        .compare(o1.getPredicate().toString(), o2.getPredicate().toString())
                        .compare(o1.getObject().toString(), o2.getObject().toString()).result();
            });
            cluster.add(tp);
            clusters.add(cluster);
            useSplitting = true;
        }
    }

    if (!useSplitting) {
        clusters.add(Sets.newHashSet(fixedTriplePatterns));
    } else {
        logger.trace("Query too complex. Splitting...");
        // 2. build clusters for other
        for (Set<Triple> cluster : clusters) {
            Triple representative = cluster.iterator().next();
            cluster.addAll(var2TriplePatterns.get(Var.alloc(representative.getObject())));
            cluster.addAll(fixedTriplePatterns);
        }
    }

    // again split clusters to have only a maximum number of triple patterns
    int maxNrOfTriplePatternsPerQuery = 20;// number of outgoing triple patterns form the target var in each executed query
    Set<Set<Triple>> newClusters = new HashSet<>();
    for (Set<Triple> cluster : clusters) {
        int cnt = 0;
        for (Triple triple : cluster) {
            if (triple.getSubject().matches(targetVar)) {
                cnt++;
            }
        }

        if (cnt > maxNrOfTriplePatternsPerQuery) {
            Set<Triple> newCluster = new HashSet<>();
            for (Triple triple : cluster) {
                if (triple.getSubject().matches(targetVar)) {
                    newCluster.add(triple);
                }
                if (newCluster.size() == maxNrOfTriplePatternsPerQuery) {
                    newClusters.add(newCluster);
                    newCluster = new HashSet<>();
                }
            }
            if (!newCluster.isEmpty()) {
                newClusters.add(newCluster);
            }
        }
    }
    for (Set<Triple> cluster : newClusters) {
        Set<Triple> additionalTriples = new HashSet<>();
        for (Triple triple : cluster) {
            if (triple.getObject().isVariable()) {
                additionalTriples.addAll(var2TriplePatterns.get(Var.alloc(triple.getObject())));
            }
        }
        cluster.addAll(additionalTriples);
    }
    //      clusters = newClusters;

    Set<String> resources = null;
    // 3. run query for each cluster
    for (Set<Triple> cluster : clusters) {
        Query q = new Query();
        q.addProjectVars(Collections.singleton(targetVar));
        ElementTriplesBlock el = new ElementTriplesBlock();
        for (Triple triple : cluster) {
            el.addTriple(triple);
        }
        q.setQuerySelectType();
        q.setDistinct(true);
        q.setQueryPattern(el);

        q = rewriteForVirtuosoDateLiteralBug(q);
        //         q = rewriteForVirtuosoFloatingPointIssue(q);
        logger.trace(q);
        //         sparqlQuery = getPrefixedQuery(sparqlQuery);
        System.out.println(q);
        List<String> partialResult = getResult(q.toString());
        Set<String> resourcesTmp = new HashSet<>(partialResult);

        if (resourcesTmp.isEmpty()) {
            System.err.println("Empty query result");
            System.err.println(q);
            //            System.exit(0);
            return Collections.EMPTY_LIST;
        }

        if (resources == null) {
            resources = resourcesTmp;
        } else {
            resources.retainAll(resourcesTmp);
        }
    }

    return new ArrayList<>(resources);
}

From source file:org.opennms.features.topology.plugins.topo.linkd.internal.LldpLinkStatusProvider.java

@Override
protected List<EdgeAlarmStatusSummary> getEdgeAlarmSummaries(List<Integer> linkIds) {
    List<LldpLink> links = m_lldpLinkDao.findLinksForIds(linkIds);

    Multimap<String, EdgeAlarmStatusSummary> summaryMap = HashMultimap.create();
    for (LldpLink sourceLink : links) {

        OnmsNode sourceNode = sourceLink.getNode();
        LldpElement sourceElement = sourceNode.getLldpElement();

        for (LldpLink targetLink : links) {
            OnmsNode targetNode = targetLink.getNode();
            LldpElement targetLldpElement = targetNode.getLldpElement();

            //Compare the remote data to the targetNode element data
            boolean bool1 = sourceLink.getLldpRemPortId().equals(targetLink.getLldpPortId())
                    && targetLink.getLldpRemPortId().equals(sourceLink.getLldpPortId());
            boolean bool2 = sourceLink.getLldpRemPortDescr().equals(targetLink.getLldpPortDescr())
                    && targetLink.getLldpRemPortDescr().equals(sourceLink.getLldpPortDescr());
            boolean bool3 = sourceLink.getLldpRemChassisId().equals(targetLldpElement.getLldpChassisId())
                    && targetLink.getLldpRemChassisId().equals(sourceElement.getLldpChassisId());
            boolean bool4 = sourceLink.getLldpRemSysname().equals(targetLldpElement.getLldpSysname())
                    && targetLink.getLldpRemSysname().equals(sourceElement.getLldpSysname());
            boolean bool5 = sourceLink.getLldpRemPortIdSubType() == targetLink.getLldpPortIdSubType()
                    && targetLink.getLldpRemPortIdSubType() == sourceLink.getLldpPortIdSubType();

            if (bool1 && bool2 && bool3 && bool4 && bool5) {

                summaryMap.put(sourceNode.getNodeId() + ":" + sourceLink.getLldpPortIfindex(),
                        new EdgeAlarmStatusSummary(sourceLink.getId(), targetLink.getId(), null));

            }/*from  w  w  w .j  a  v a 2  s .  co  m*/
        }
    }

    List<OnmsAlarm> alarms = getLinkDownAlarms();

    for (OnmsAlarm alarm : alarms) {
        String key = alarm.getNodeId() + ":" + alarm.getIfIndex();
        if (summaryMap.containsKey(key)) {
            Collection<EdgeAlarmStatusSummary> summaries = summaryMap.get(key);
            for (EdgeAlarmStatusSummary summary : summaries) {
                summary.setEventUEI(alarm.getUei());
            }

        }

    }
    return new ArrayList<EdgeAlarmStatusSummary>(summaryMap.values());
}

From source file:org.apache.pulsar.broker.loadbalance.impl.DeviationShedder.java

/**
 * Recommend that all of the returned bundles be unloaded based on observing excessive standard deviations according
 * to some metric./*from ww  w. jav a 2  s.c  o m*/
 *
 * @param loadData
 *            The load data to used to make the unloading decision.
 * @param conf
 *            The service configuration.
 * @return A map from all selected bundles to the brokers on which they reside.
 */
@Override
public Multimap<String, String> findBundlesForUnloading(final LoadData loadData,
        final ServiceConfiguration conf) {
    final Multimap<String, String> result = ArrayListMultimap.create();
    bundleTreeSetCache.clear();
    metricTreeSetCache.clear();
    double sum = 0;
    double squareSum = 0;
    final Map<String, BrokerData> brokerDataMap = loadData.getBrokerData();

    // Treating each broker as a data point, calculate the sum and squared
    // sum of the evaluated broker metrics.
    // These may be used to calculate the standard deviation.
    for (Map.Entry<String, BrokerData> entry : brokerDataMap.entrySet()) {
        final double value = brokerValue(entry.getValue(), conf);
        sum += value;
        squareSum += value * value;
        metricTreeSetCache.add(new ImmutablePair<>(value, entry.getKey()));
    }
    // Mean cannot change by just moving around bundles.
    final double mean = sum / brokerDataMap.size();
    double standardDeviation = Math.sqrt(squareSum / brokerDataMap.size() - mean * mean);
    final double deviationThreshold = getDeviationThreshold(conf);
    String lastMostOverloaded = null;
    // While the most loaded broker is above the standard deviation
    // threshold, continue to move bundles.
    while ((metricTreeSetCache.last().getKey() - mean) / standardDeviation > deviationThreshold) {
        final Pair<Double, String> mostLoadedPair = metricTreeSetCache.last();
        final double highestValue = mostLoadedPair.getKey();
        final String mostLoaded = mostLoadedPair.getValue();

        final Pair<Double, String> leastLoadedPair = metricTreeSetCache.first();
        final double leastValue = leastLoadedPair.getKey();
        final String leastLoaded = metricTreeSetCache.first().getValue();

        if (!mostLoaded.equals(lastMostOverloaded)) {
            // Reset the bundle tree set now that a different broker is
            // being considered.
            bundleTreeSetCache.clear();
            for (String bundle : brokerDataMap.get(mostLoaded).getLocalData().getBundles()) {
                if (!result.containsKey(bundle)) {
                    // Don't consider bundles that are already going to be
                    // moved.
                    bundleTreeSetCache.add(new ImmutablePair<>(
                            bundleValue(bundle, brokerDataMap.get(mostLoaded), conf), bundle));
                }
            }
            lastMostOverloaded = mostLoaded;
        }
        boolean selected = false;
        while (!(bundleTreeSetCache.isEmpty() || selected)) {
            Pair<Double, String> mostExpensivePair = bundleTreeSetCache.pollLast();
            double loadIncurred = mostExpensivePair.getKey();
            // When the bundle is moved, we want the now least loaded server
            // to have lower overall load than the
            // most loaded server does not. Thus, we will only consider
            // moving the bundle if this condition
            // holds, and otherwise we will try the next bundle.
            if (loadIncurred + leastValue < highestValue) {
                // Update the standard deviation and replace the old load
                // values in the broker tree set with the
                // load values assuming this move took place.
                final String bundleToMove = mostExpensivePair.getValue();
                result.put(bundleToMove, mostLoaded);
                metricTreeSetCache.remove(mostLoadedPair);
                metricTreeSetCache.remove(leastLoadedPair);
                final double newHighLoad = highestValue - loadIncurred;
                final double newLowLoad = leastValue - loadIncurred;
                squareSum -= highestValue * highestValue + leastValue * leastValue;
                squareSum += newHighLoad * newHighLoad + newLowLoad * newLowLoad;
                standardDeviation = Math.sqrt(squareSum / brokerDataMap.size() - mean * mean);
                metricTreeSetCache.add(new ImmutablePair<>(newLowLoad, leastLoaded));
                metricTreeSetCache.add(new ImmutablePair<>(newHighLoad, mostLoaded));
                selected = true;
            }
        }
        if (!selected) {
            // Move on to the next broker if no bundle could be moved.
            metricTreeSetCache.pollLast();
        }
    }
    return result;
}

From source file:org.sosy_lab.cpachecker.util.predicates.AssignmentToPathAllocator.java

/**
 * We need the variableEnvoirment and functionEnvoirment for their SSAIndeces.
 *///from w ww .  j av a  2  s  .c o  m
private void createAssignments(Model pModel, Collection<AssignableTerm> terms, Set<Assignment> termSet,
        Map<String, Assignment> variableEnvoirment, Map<LeftHandSide, Object> pVariables,
        Multimap<String, Assignment> functionEnvoirment, Map<String, Map<Address, Object>> memory) {

    for (AssignableTerm term : terms) {

        Assignment assignment = new Assignment(term, pModel.get(term));

        if (term instanceof Variable) {

            Variable variable = (Variable) term;
            String name = variable.getName();

            if (variableEnvoirment.containsKey(name)) {
                Variable oldVariable = (Variable) variableEnvoirment.get(name).getTerm();
                int oldIndex = oldVariable.getSSAIndex();
                int newIndex = variable.getSSAIndex();
                if (oldIndex < newIndex) {

                    //update variableEnvoirment for subsequent calculation
                    variableEnvoirment.remove(name);
                    variableEnvoirment.put(name, assignment);

                    LeftHandSide oldlhs = createLeftHandSide(oldVariable);
                    LeftHandSide lhs = createLeftHandSide(variable);
                    pVariables.remove(oldlhs);
                    pVariables.put(lhs, assignment.getValue());
                }
            } else {
                //update variableEnvoirment for subsequent calculation
                variableEnvoirment.put(name, assignment);

                LeftHandSide lhs = createLeftHandSide(variable);
                pVariables.put(lhs, assignment.getValue());
            }

        } else if (term instanceof Function) {

            Function function = (Function) term;
            String name = getName(function);

            if (functionEnvoirment.containsKey(name)) {

                boolean replaced = false;

                Set<Assignment> assignments = new HashSet<>(functionEnvoirment.get(name));

                for (Assignment oldAssignment : assignments) {
                    Function oldFunction = (Function) oldAssignment.getTerm();

                    if (isLessSSA(oldFunction, function)) {

                        //update functionEnvoirment for subsequent calculation
                        functionEnvoirment.remove(name, oldAssignment);
                        functionEnvoirment.put(name, assignment);
                        replaced = true;
                        removeHeapValue(memory, assignment);
                        addHeapValue(memory, assignment);

                    }
                }

                if (!replaced) {
                    functionEnvoirment.put(name, assignment);
                    addHeapValue(memory, assignment);
                }
            } else {
                functionEnvoirment.put(name, assignment);
                addHeapValue(memory, assignment);
            }
        }
        termSet.add(assignment);
    }
}

From source file:uk.ac.ebi.mdk.service.loader.name.ChEBINameLoader.java

/**
 * @inheritDoc/*  w ww .ja  v a 2  s.co  m*/
 */
@Override
public void update() throws IOException {

    // get the preferred names first
    Map<String, String> preferredNameMap = getPreferredNameMap();

    ResourceFileLocation location = getLocation("ChEBI Names");
    CSVReader csv = new CSVReader(new InputStreamReader(location.open()), '\t', '\0');

    List<String> header = Arrays.asList(csv.readNext());
    int nameIndex = header.indexOf("NAME");
    int typeIndex = header.indexOf("TYPE");
    int accessionIndex = header.indexOf("COMPOUND_ID");
    int langIndex = header.indexOf("LANGUAGE");

    // what we treat as synonyms
    Set<String> synonymType = new HashSet<String>(Arrays.asList("SYSTEMATIC NAME", "SYNONYM"));

    Multimap<String, String> synonyms = HashMultimap.create();
    Multimap<String, String> iupac = HashMultimap.create();
    Multimap<String, String> inn = HashMultimap.create();
    Multimap<String, String> brand = HashMultimap.create();

    fireProgressUpdate("preloading primary identifiers...");
    createMap();
    fireProgressUpdate("done");

    int count = 0;
    String[] row = null;
    while (!isCancelled() && (row = csv.readNext()) != null) {

        if (row.length < header.size()) {
            LOGGER.error("malformed entry: " + Joiner.on(", ").join(row));
            continue;
        }

        String accession = getPrimaryIdentifier(row[accessionIndex]);
        String name = row[nameIndex];
        String type = row[typeIndex];
        String lang = row[langIndex];

        // only keep latin and english compound names
        if (!lang.equals("en") && !lang.equals("la")) {
            continue;
        }

        /* SYNONYM
         * Type matches anything we are considering a synonym
         */
        if (synonymType.contains(type))
            synonyms.put(accession, name);

        /* PREFERRED NAME
         * If there is already a preferred name in the map, check
         * whether the preferred name matches the 'NAME'. If it
         * doesn't add the 'NAME' to the synonym map
         */
        if (type.equals("NAME")) {
            if (preferredNameMap.containsKey(accession)) {
                String preferredName = preferredNameMap.get(accession);
                if (!preferredName.equals(name)) {
                    synonyms.put(accession, name);
                }
            } else {
                preferredNameMap.put(accession, name);
            }
        }

        /* IUPAC
         * Add the iupac name, if there is already an iupac
         * name and they are not matches, add the iupac name
         * to the synonym's list
         */
        if (type.equals("IUPAC NAME")) {
            if (!iupac.containsKey(accession)) {
                iupac.put(accession, name);
            } else {
                if (!iupac.get(accession).contains(name))
                    synonyms.put(accession, name);
            }

        }

        if (type.equals("BRAND NAME")) {
            if (!brand.containsKey(accession)) {
                brand.put(accession, name);
            } else {
                if (!brand.get(accession).contains(name))
                    synonyms.put(accession, name);
            }
        }

        if (type.equals("INN")) {
            if (!inn.containsKey(accession)) {
                inn.put(accession, name);
            } else {
                if (!inn.get(accession).contains(name))
                    synonyms.put(accession, name);
            }
        }

        // update progress (note this is only first step take have the progress on the file)
        if (++count % 150 == 0)
            fireProgressUpdate(location.progress() * 0.5);

    }

    // if not cancelled write the index
    if (!isCancelled()) {

        // get all the accessions to iterate through
        Set<String> accessions = new HashSet<String>(preferredNameMap.size() * 2);
        accessions.addAll(synonyms.keySet());
        accessions.addAll(iupac.keySet());
        accessions.addAll(preferredNameMap.keySet());
        accessions.addAll(brand.keySet());
        accessions.addAll(inn.keySet());

        DefaultNameIndexWriter writer = new DefaultNameIndexWriter(getIndex());

        double size = accessions.size();
        count = 0;

        // for each accession write the name's to the index
        for (String accession : accessions) {

            if (isActive(accession)) {

                String preferredName = preferredNameMap.containsKey(accession) ? preferredNameMap.get(accession)
                        : "";
                String iupacName = iupac.containsKey(accession) ? iupac.get(accession).iterator().next() : "";
                String brandName = brand.containsKey(accession) ? brand.get(accession).iterator().next() : "";
                String innName = inn.containsKey(accession) ? inn.get(accession).iterator().next() : "";

                writer.write(accession, preferredName, iupacName, brandName, innName, synonyms.get(accession));

            }

            if (++count % 150 == 0)
                fireProgressUpdate(0.5 + ((count / size) * 0.5));

        }

        writer.close();

    }

    location.close();

}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

/** Worker function for storeObject
 * @param new_object - the bucket to create
 * @param old_bucket - the version of the bucket being overwritte, if an update
 * @param validation_info - validation info to be presented to the user
 * @param replace_if_present - update move
 * @return - the user return value//from w  w w .  ja  v  a 2 s  .c o m
 * @throws Exception
 */
public ManagementFuture<Supplier<Object>> storeValidatedObject(final DataBucketBean new_object,
        final Optional<DataBucketBean> old_bucket, final Collection<BasicMessageBean> validation_info,
        boolean replace_if_present) throws Exception {
    final MethodNamingHelper<DataBucketStatusBean> helper = BeanTemplateUtils.from(DataBucketStatusBean.class);

    // Error if a bucket status doesn't exist - must create a bucket status before creating the bucket
    // (note the above validation ensures the bucket has an _id)
    // (obviously need to block here until we're sure..)

    final CompletableFuture<Optional<DataBucketStatusBean>> corresponding_status = _underlying_data_bucket_status_db
            .get().getObjectById(new_object._id(),
                    Arrays.asList(helper.field(DataBucketStatusBean::_id),
                            helper.field(DataBucketStatusBean::node_affinity),
                            helper.field(DataBucketStatusBean::confirmed_master_enrichment_type),
                            helper.field(DataBucketStatusBean::confirmed_suspended),
                            helper.field(DataBucketStatusBean::confirmed_multi_node_enabled),
                            helper.field(DataBucketStatusBean::suspended),
                            helper.field(DataBucketStatusBean::quarantined_until)),
                    true);

    if (!corresponding_status.get().isPresent()) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException(
                        ErrorUtils.get(ManagementDbErrorUtils.BUCKET_CANNOT_BE_CREATED_WITHOUT_BUCKET_STATUS,
                                new_object.full_name()))),
                CompletableFuture.completedFuture(Collections.emptyList()));
    }

    // Some fields like multi-node, you can only change if the bucket status is set to suspended, to make
    // the control logic easy
    old_bucket.ifPresent(ob -> {
        validation_info.addAll(checkForInactiveOnlyUpdates(new_object, ob, corresponding_status.join().get()));
        // (corresponding_status present and completed because of above check) 
    });
    if (!validation_info.isEmpty() && validation_info.stream().anyMatch(m -> !m.success())) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException("Bucket not valid, see management channels")),
                CompletableFuture.completedFuture(validation_info));
    }
    // Made it this far, try to set the next_poll_time in the status object
    if (null != new_object.poll_frequency()) {
        //get the next poll time
        final Date next_poll_time = TimeUtils
                .getForwardSchedule(new_object.poll_frequency(), Optional.of(new Date())).success();
        //update the status
        _underlying_data_bucket_status_db.get().updateObjectById(new_object._id(), CrudUtils
                .update(DataBucketStatusBean.class).set(DataBucketStatusBean::next_poll_date, next_poll_time));
    }

    // Create the directories

    try {
        createFilePaths(new_object, _storage_service.get());
        //if logging is enabled, create the logging filepath also
        if (Optionals.of(() -> new_object.management_schema().logging_schema().enabled()).orElse(false)) {
            createFilePaths(BucketUtils.convertDataBucketBeanToLogging(new_object), _storage_service.get());
        }
    } catch (Exception e) { // Error creating directory, haven't created object yet so just back out now

        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
    // OK if the bucket is validated we can store it (and create a status object)

    final CompletableFuture<Supplier<Object>> ret_val = _underlying_data_bucket_db.get().storeObject(new_object,
            replace_if_present);
    final boolean is_suspended = DataBucketStatusCrudService
            .bucketIsSuspended(corresponding_status.get().get());

    // Register the bucket update with any applicable data services      

    final Multimap<IDataServiceProvider, String> data_service_info = DataServiceUtils
            .selectDataServices(new_object.data_schema(), _service_context);
    final Optional<Multimap<IDataServiceProvider, String>> old_data_service_info = old_bucket
            .map(old -> DataServiceUtils.selectDataServices(old.data_schema(), _service_context));

    final List<CompletableFuture<Collection<BasicMessageBean>>> ds_update_results = data_service_info.asMap()
            .entrySet().stream()
            .map(kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                    kv.getValue().stream().collect(Collectors.toSet()),
                    old_data_service_info.map(old_map -> old_map.get(kv.getKey()))
                            .map(old_servs -> old_servs.stream().collect(Collectors.toSet()))
                            .orElse(Collections.emptySet())))
            .collect(Collectors.toList());

    // Process old data services that are no longer in use
    final List<CompletableFuture<Collection<BasicMessageBean>>> old_ds_update_results = old_data_service_info
            .map(old_ds_info -> {
                return old_ds_info.asMap().entrySet().stream()
                        .filter(kv -> !data_service_info.containsKey(kv.getKey()))
                        .<CompletableFuture<Collection<BasicMessageBean>>>map(
                                kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                                        Collections.emptySet(),
                                        kv.getValue().stream().collect(Collectors.toSet())))
                        .collect(Collectors.toList());
            }).orElse(Collections.emptyList());

    //(combine)
    @SuppressWarnings("unchecked")
    CompletableFuture<Collection<BasicMessageBean>> all_service_registration_complete[] = Stream
            .concat(ds_update_results.stream(), old_ds_update_results.stream())
            .toArray(CompletableFuture[]::new);

    // Get the status and then decide whether to broadcast out the new/update message

    final CompletableFuture<Collection<BasicMessageBean>> mgmt_results = CompletableFuture
            .allOf(all_service_registration_complete)
            .thenCombine(
                    old_bucket.isPresent()
                            ? requestUpdatedBucket(new_object, old_bucket.get(),
                                    corresponding_status.get().get(), _actor_context,
                                    _underlying_data_bucket_status_db.get(), _bucket_action_retry_store.get())
                            : requestNewBucket(new_object, is_suspended,
                                    _underlying_data_bucket_status_db.get(), _actor_context),
                    (__, harvest_results) -> {
                        return (Collection<BasicMessageBean>) Stream
                                .concat(Arrays.stream(all_service_registration_complete)
                                        .flatMap(s -> s.join().stream()), harvest_results.stream())
                                .collect(Collectors.toList());
                    })
            .exceptionally(t -> Arrays.asList(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                    "storeValidatedObject", ErrorUtils.get("{0}", t))));

    // Update the status depending on the results of the management channels

    return FutureUtils.createManagementFuture(ret_val,
            MgmtCrudUtils
                    .handleUpdatingStatus(new_object, corresponding_status.get().get(), is_suspended,
                            mgmt_results, _underlying_data_bucket_status_db.get())
                    .thenApply(msgs -> Stream.concat(msgs.stream(), validation_info.stream())
                            .collect(Collectors.toList())));
}