Example usage for java.util Map getOrDefault

List of usage examples for java.util Map getOrDefault

Introduction

In this page you can find the example usage for java.util Map getOrDefault.

Prototype

default V getOrDefault(Object key, V defaultValue) 

Source Link

Document

Returns the value to which the specified key is mapped, or defaultValue if this map contains no mapping for the key.

Usage

From source file:nlmt.topicmodels.HierarchicalLDAModel.java

/**
 * Given the specified document, calculates the complete log likelihood for
 * that document given the words in this path component, and the number of
 * documents that have visited the node.
 *
 * @param wordCountsAtLevel the total word counts for all vocabulary words in the document at this level
 * @param documentWords the set of Word objects appearing in this document
 * @param eta the value of eta for this level
 * @param node the node with the words//w w w . ja  v a 2s  . co m
 * @return the log likelihood of choosing this node
 */
protected double getPathWordsLikelihood(Map<Integer, Integer> wordCountsAtLevel, Set<Word> documentWords,
        double eta, HierarchicalLDANode node) {
    double etaTotal = eta * vocabulary.size();
    int nodeTotalWordCount = node.getTotalWordCount();
    double result = logGamma(
            etaTotal + nodeTotalWordCount - wordCountsAtLevel.values().stream().mapToInt(v -> v).sum());
    result -= logGamma(etaTotal + nodeTotalWordCount);

    for (Word word : documentWords) {
        int vocabIndex = word.getVocabularyId();
        int wordCountAllDocuments = node.getWordCount(vocabIndex);
        result -= logGamma(eta + wordCountAllDocuments - wordCountsAtLevel.getOrDefault(vocabIndex, 0));
        result += logGamma(eta + wordCountAllDocuments);
    }
    return result;
}

From source file:com.homeadvisor.kafdrop.service.CuratorKafkaMonitor.java

private Stream<ConsumerPartitionVO> getConsumerPartitionStream(String groupId, String topicName,
        TopicVO topicOpt) {/*from   w ww  . jav a2  s .c om*/
    ZKGroupTopicDirs groupTopicDirs = new ZKGroupTopicDirs(groupId, topicName);

    if (topicOpt == null || topicOpt.getName().equals(topicName)) {
        topicOpt = getTopic(topicName).orElse(null);
    }

    if (topicOpt != null) {
        final TopicVO topic = topicOpt;

        Map<Integer, Long> consumerOffsets = getConsumerOffsets(groupId, topic);

        return topic.getPartitions().stream().map(partition -> {
            int partitionId = partition.getId();

            final ConsumerPartitionVO consumerPartition = new ConsumerPartitionVO(groupId, topicName,
                    partitionId);
            consumerPartition.setOwner(Optional
                    .ofNullable(consumerTreeCache
                            .getCurrentData(groupTopicDirs.consumerOwnerDir() + "/" + partitionId))
                    .map(data -> new String(data.getData())).orElse(null));

            consumerPartition.setOffset(consumerOffsets.getOrDefault(partitionId, -1L));

            final Optional<TopicPartitionVO> topicPartition = topic.getPartition(partitionId);
            consumerPartition.setSize(topicPartition.map(TopicPartitionVO::getSize).orElse(-1L));
            consumerPartition.setFirstOffset(topicPartition.map(TopicPartitionVO::getFirstOffset).orElse(-1L));

            return consumerPartition;
        });
    } else {
        return Stream.empty();
    }
}

From source file:io.stallion.dataAccess.db.DB.java

private Schema metaDataModelToSchema(Class cls, Map<String, Object> meta) {
    Schema schema = new Schema(meta.get("tableName").toString(), cls);
    for (Map colData : (List<Map>) meta.getOrDefault("columns", list())) {
        Col col = new Col();
        col.setPropertyName(colData.getOrDefault("propertyName", "").toString());
        col.setName(colData.getOrDefault("name", "").toString());
        col.setUpdateable((boolean) colData.getOrDefault("updateable", true));
        col.setInsertable((boolean) colData.getOrDefault("insertable", true));

        schema.getColumns().add(col);/*from w ww  . j a  v  a2 s. c  om*/
    }

    return schema;
}

From source file:org.dita.dost.AbstractIntegrationTest.java

private Document rewriteIds(final Document doc, final Map<String, Pattern> patterns) {
    final Map<String, String> idMap = new HashMap<>();
    AtomicInteger counter = new AtomicInteger();
    final NodeList ns = doc.getElementsByTagName("*");
    for (int i = 0; i < ns.getLength(); i++) {
        final Element e = (Element) ns.item(i);
        for (Map.Entry<String, Pattern> p : patterns.entrySet()) {
            final Attr id = e.getAttributeNode(p.getKey());
            if (id != null) {
                if (p.getKey().equals("headers")) {// split value
                    final List<String> res = new ArrayList<>();
                    for (final String v : id.getValue().trim().split("\\s+")) {
                        rewriteId(v, idMap, counter, p.getValue());
                        res.add(idMap.getOrDefault(v, v));
                    }/*w ww .  j  a v a2s.  c o m*/
                    id.setNodeValue(res.stream().collect(Collectors.joining(" ")));

                } else {
                    final String v = id.getValue();
                    rewriteId(v, idMap, counter, p.getValue());
                    if (idMap.containsKey(v)) {
                        id.setNodeValue(idMap.get(v));
                    }
                }
            }
        }
    }
    return doc;
}

From source file:nlmt.topicmodels.HierarchicalLDAModel.java

/**
 * Infer what the topic distributions should be for an unseen document.
 * Returns a Pair with the left-most item containing a list of nodes that
 * the document belongs to in the topic hierarchy. The right-most item in
 * the Pair contains the distribution of the document over the topics. Both
 * lists will contain <code>maxDepth</code> number of items. If there are
 * no words in the document, will return empty lists for both items. If
 * <code>overrideGamma</code> is <code>true</code>, then gamma
 * will be temporarily set to a very low number to prevent new nodes
 * from being created.//from  w  w w  . ja  va 2 s  .c o m
 *
 * @param document the List of Strings that represents the document
 * @param numIterations the number of times to perform gibbs sampling on the inference
 * @param overrideGamma if true, will prevent new node generation by making gamma very small
 * @return a Pair of Lists, the left being the topic numbers, the right being the distributions
 */
public Pair<List<Integer>, List<Double>> inference(List<String> document, int numIterations,
        boolean overrideGamma) {
    if (numIterations < 1) {
        throw new IllegalArgumentException("numIterations must be >= 1");
    }

    if (document.size() == 0) {
        return Pair.of(new ArrayList<>(), new ArrayList<>());
    }

    // If the caller doesn't want new nodes generated, set gamma to a very low value
    double oldGamma = gamma;
    gamma = (overrideGamma) ? 0.00000000000000001 : gamma;

    // Read the document
    SparseDocument newDocument = new SparseDocument(vocabulary);
    newDocument.readDocument(document, false);
    int temporaryDocumentId = documents.length;

    // Allocate the new document to a random path, and allocate words in the document to the path
    HierarchicalLDAPath newDocumentPath = new HierarchicalLDAPath(rootNode, maxDepth);
    List<Integer> newPath = chooseRandomPath(false);
    newDocumentPath.addPath(newPath, nodeMapper);
    newDocumentPath.addDocument(temporaryDocumentId);
    chooseLevelsForDocument(newDocument, newDocumentPath, true);

    // Perform gibbs sampling but only for the current document
    for (int iteration = 0; iteration < numIterations; iteration++) {
        doGibbsSamplingSingleDocument(temporaryDocumentId, newDocument, newDocumentPath);
    }

    // Restore the previous value of gamma
    gamma = oldGamma;

    // Figure out the word distributions
    List<Integer> pathNodeIds = Arrays.stream(newDocumentPath.getNodes()).map(HierarchicalLDANode::getId)
            .collect(Collectors.toList());
    List<Double> wordDistributions = new ArrayList<>();
    Map<Integer, Integer> wordCountsByTopic = newDocument.getTopicCounts();
    int totalWords = wordCountsByTopic.values().stream().mapToInt(value -> value).sum();

    // If the document contained no valid words, then return empty distributions, and a
    // path that is all nulls
    if (totalWords == 0) {
        wordDistributions = new ArrayList<>();
        pathNodeIds = new ArrayList<>();
        for (int level = 0; level < maxDepth; level++) {
            wordDistributions.add(0.0);
            pathNodeIds.add(-1);
        }
    } else {
        for (int level = 0; level < maxDepth; level++) {
            wordDistributions.add((double) (wordCountsByTopic.getOrDefault(level, 0) / totalWords));
        }

        // Reset the old counts for the nodes and their documents by de-allocating the node from
        // the path that it was associated with
        Set<Word> wordSet = newDocument.getWordSet();
        for (int level = 0; level < maxDepth; level++) {
            HierarchicalLDANode node = newDocumentPath.getNode(level);
            node.removeVisited(temporaryDocumentId);
            for (Word word : wordSet) {
                if (word.getTopic() == level) {
                    node.removeWord(word);
                    word.setTopic(-1);
                }
            }
        }
    }

    // Delete any empty nodes that exist after removing the words and documents
    HierarchicalLDANode.deleteEmptyNodes(nodeMapper);
    return Pair.of(pathNodeIds, wordDistributions);
}

From source file:com.kurtraschke.nyctrtproxy.services.TripUpdateProcessor.java

public List<GtfsRealtime.TripUpdate> processFeed(Integer feedId, GtfsRealtime.FeedMessage fm,
        MatchMetrics totalMetrics) {/*from w ww.  j  av  a  2s.  c o  m*/

    long timestamp = fm.getHeader().getTimestamp();

    MatchMetrics feedMetrics = new MatchMetrics();
    feedMetrics.reportLatency(timestamp);

    if (_latencyLimit > 0 && feedMetrics.getLatency() > _latencyLimit) {
        _log.info("Feed {} ignored, too high latency = {}", feedId, feedMetrics.getLatency());
        if (_listener != null)
            _listener.reportMatchesForSubwayFeed(feedId.toString(), feedMetrics, _cloudwatchNamespace);
        return Collections.emptyList();
    }

    final Map<String, String> realtimeToStaticRouteMap = _realtimeToStaticRouteMapByFeed.getOrDefault(feedId,
            Collections.emptyMap());

    int nExpiredTus = 0, nTotalRecords = 0;

    // Read in trip updates per route. Skip trip updates that have too stale of data.
    Multimap<String, GtfsRealtime.TripUpdate> tripUpdatesByRoute = ArrayListMultimap.create();
    for (GtfsRealtime.FeedEntity entity : fm.getEntityList()) {
        if (entity.hasTripUpdate()) {
            GtfsRealtime.TripUpdate tu = entity.getTripUpdate();
            if (expiredTripUpdate(tu, fm.getHeader().getTimestamp())) {
                nExpiredTus++;
            } else {
                String routeId = tu.getTrip().getRouteId();
                routeId = realtimeToStaticRouteMap.getOrDefault(routeId, routeId);
                tripUpdatesByRoute.put(routeId, tu);
            }
            nTotalRecords++;
        }
    }
    reportRecordsIn(nTotalRecords, nExpiredTus, totalMetrics, feedMetrics);

    List<GtfsRealtime.TripUpdate> ret = Lists.newArrayList();

    for (GtfsRealtimeNYCT.TripReplacementPeriod trp : fm.getHeader()
            .getExtension(GtfsRealtimeNYCT.nyctFeedHeader).getTripReplacementPeriodList()) {
        if (_routeBlacklistByFeed.getOrDefault(feedId, Collections.emptySet()).contains(trp.getRouteId()))
            continue;
        GtfsRealtime.TimeRange range = trp.getReplacementPeriod();

        Date start = range.hasStart() ? new Date(range.getStart() * 1000)
                : earliestTripStart(tripUpdatesByRoute.values());
        Date end = range.hasEnd() ? new Date(range.getEnd() * 1000)
                : new Date(fm.getHeader().getTimestamp() * 1000);

        // All route IDs in this trip replacement period
        Set<String> routeIds = Arrays.stream(trp.getRouteId().split(", ?"))
                .map(routeId -> realtimeToStaticRouteMap.getOrDefault(routeId, routeId))
                .collect(Collectors.toSet());

        for (String routeId : routeIds) {
            String newRouteId = _addToTripReplacementPeriodByRoute.get(routeId);
            if (newRouteId != null)
                routeIds.add(newRouteId);
        }

        // Kurt's trip matching algorithm (ActivatedTripMatcher) requires calculating currently-active static trips at this point.
        _tripMatcher.initForFeed(start, end, routeIds);

        for (String routeId : routeIds) {

            MatchMetrics routeMetrics = new MatchMetrics();

            Multimap<String, TripMatchResult> matchesByTrip = ArrayListMultimap.create();
            Collection<GtfsRealtime.TripUpdate> tripUpdates = tripUpdatesByRoute.get(routeId);
            routeMetrics.reportRecordsIn(tripUpdates.size());
            for (GtfsRealtime.TripUpdate tu : tripUpdates) {
                GtfsRealtime.TripUpdate.Builder tub = GtfsRealtime.TripUpdate.newBuilder(tu);
                GtfsRealtime.TripDescriptor.Builder tb = tub.getTripBuilder();

                // rewrite route ID for some routes
                tb.setRouteId(realtimeToStaticRouteMap.getOrDefault(tb.getRouteId(), tb.getRouteId()));

                // remove timepoints not in GTFS... in some cases this means there may be no STUs left (ex. H shuttle at H19S.)
                removeTimepoints(tub);

                // get ID which consists of route, direction, origin-departure time, possibly a path identifier (for feed 1.)
                NyctTripId rtid = NyctTripId.buildFromTripDescriptor(tb, _routesWithReverseRTDirections);

                // If we were able to parse the trip ID, there are various fixes
                // we may need to apply.
                if (rtid != null) {

                    // Fix stop IDs which don't include direction
                    tub.getStopTimeUpdateBuilderList().forEach(stub -> {
                        if (!(stub.getStopId().endsWith("N") || stub.getStopId().endsWith("S"))) {
                            stub.setStopId(stub.getStopId() + rtid.getDirection());
                        } else if (_routesWithReverseRTDirections.contains(tb.getRouteId())) {
                            String stopId = stub.getStopId();
                            stub.setStopId(stopId.substring(0, stopId.length() - 1) + rtid.getDirection());
                        }
                        if (_stopIdTransformStrategy != null) {
                            String stopId = stub.getStopId();
                            stopId = _stopIdTransformStrategy.transform(rtid.getRouteId(), rtid.getDirection(),
                                    stopId);
                            stub.setStopId(stopId);
                        }
                    });

                    // Re-set the trip ID to the parsed trip ID; coerces IDs to a uniform format.
                    // If the trip is matched, the ID will be rewritten again to the corresponding static trip ID below.
                    tb.setTripId(rtid.toString());
                } else {
                    _log.error("invalid trip_id={} train_id={}", tb.getTripId(),
                            tb.getExtension(GtfsRealtimeNYCT.nyctTripDescriptor).getTrainId());
                }

                // Some routes have start date set incorrectly
                if (tb.getStartDate().length() > 8) {
                    tb.setStartDate(fixedStartDate(tb));
                }

                TripMatchResult result = _tripMatcher.match(tub, rtid, fm.getHeader().getTimestamp());
                matchesByTrip.put(result.getTripId(), result);
            }

            // For TUs that match to same trip - possible they should be merged (route D has mid-line relief points where trip ID changes)
            // If they are NOT merged, then drop the matches for the worse ones
            for (Collection<TripMatchResult> matches : matchesByTrip.asMap().values()) {
                if (!tryMergeResult(matches) && matches.size() > 1 && !_allowDuplicates) {
                    List<TripMatchResult> dups = new ArrayList<>(matches);
                    dups.sort(Collections.reverseOrder());
                    TripMatchResult best = dups.get(0);
                    for (int i = 1; i < dups.size(); i++) {
                        TripMatchResult result = dups.get(i);
                        _log.debug(
                                "dropping duplicate in static trip={}, RT trip={} ({}). Better trip is {} ({})",
                                best.getTripId(), result.getRtTripId(), result.getStatus(), best.getRtTripId(),
                                best.getStatus());
                        result.setStatus(Status.NO_MATCH);
                        result.setResult(null);
                    }
                }
            }

            Set<String> matchedTripIds = new HashSet<>();
            // Read out results of matching. If there is a match, rewrite TU's trip ID. Add TU to return list.
            for (TripMatchResult result : matchesByTrip.values()) {
                if (!result.getStatus().equals(Status.MERGED)) {
                    GtfsRealtime.TripUpdate.Builder tub = result.getTripUpdateBuilder();
                    GtfsRealtime.TripDescriptor.Builder tb = tub.getTripBuilder();
                    if (result.hasResult() && (result.getTripUpdate().getStopTimeUpdateCount() == 0
                            || !result.stopsMatchToEnd())) {
                        _log.info("no stop match rt={} static={} {}",
                                result.getTripUpdate().getTrip().getTripId(),
                                result.getResult().getTrip().getId().getId(),
                                (result.getResult().getStopTimes().get(0).getDepartureTime() / 60) * 100);
                        result.setStatus(Status.NO_MATCH);
                        result.setResult(null);
                    }
                    if (result.hasResult()) {
                        ActivatedTrip at = result.getResult();
                        String staticTripId = at.getTrip().getId().getId();
                        _log.debug("matched {} -> {}", tb.getTripId(), staticTripId);
                        tb.setTripId(staticTripId);
                        removeTimepoints(at, tub);
                        matchedTripIds.add(staticTripId);
                    } else {
                        _log.debug("unmatched: {} due to {}", tub.getTrip().getTripId(), result.getStatus());
                        tb.setScheduleRelationship(GtfsRealtime.TripDescriptor.ScheduleRelationship.ADDED);
                        // ignore ADDED trips without stops
                        if (tub.getStopTimeUpdateCount() == 0)
                            continue;
                        // Trip Headsign and direction
                        String stopId = result.getRtLastStop();
                        String tripHeadsign = _tripActivator.getStopNameForId(stopId);
                        String nsDirection = NyctTripId
                                .buildFromTripDescriptor(tub.getTrip(), _routesWithReverseRTDirections)
                                .getDirection();
                        String tripDirection = "S".equals(nsDirection) ? "1" : "0";
                        GtfsRealtimeOneBusAway.OneBusAwayTripUpdate.Builder obaTripUpdate = GtfsRealtimeOneBusAway.OneBusAwayTripUpdate
                                .newBuilder();
                        if (StringUtils.isNotBlank(tripHeadsign)) {
                            obaTripUpdate.setTripHeadsign(tripHeadsign);
                            //Stop Headsign
                            if (_directionsService != null)
                                _directionsService.fillStopHeadSigns(tub.getStopTimeUpdateBuilderList());
                        }
                        obaTripUpdate.setTripDirection(tripDirection);
                        tub.setExtension(GtfsRealtimeOneBusAway.obaTripUpdate, obaTripUpdate.build());
                    }
                    tub.setTimestamp(timestamp);
                    TripUpdate tripUpdate = tub.build();
                    ret.add(tripUpdate);
                }

                routeMetrics.add(result);
                feedMetrics.add(result);
                totalMetrics.add(result);
            }

            if (_cancelUnmatchedTrips) {
                Iterator<ActivatedTrip> staticTrips = _tripActivator
                        .getTripsForRangeAndRoute(start, end, routeId).iterator();
                while (staticTrips.hasNext()) {
                    ActivatedTrip at = staticTrips.next();
                    if (!matchedTripIds.contains(at.getTrip().getId().getId())) {
                        long time = fm.getHeader().getTimestamp();
                        if (at.activeFor(trp, time)) {
                            TripUpdate.Builder tub = TripUpdate.newBuilder();
                            TripDescriptor.Builder tdb = tub.getTripBuilder();
                            tdb.setTripId(at.getTrip().getId().getId());
                            tdb.setRouteId(at.getTrip().getRoute().getId().getId());
                            tdb.setStartDate(at.getServiceDate().getAsString());
                            tdb.setScheduleRelationship(ScheduleRelationship.CANCELED);
                            ret.add(tub.build());

                            routeMetrics.addCancelled();
                            feedMetrics.addCancelled();
                            totalMetrics.addCancelled();
                        }
                    }
                }
            }

            if (_listener != null)
                _listener.reportMatchesForRoute(routeId, routeMetrics, _cloudwatchNamespace);
        }
    }

    if (_listener != null)
        _listener.reportMatchesForSubwayFeed(feedId.toString(), feedMetrics, _cloudwatchNamespace);

    _log.info("feed={}, expired TUs={}", feedId, nExpiredTus);
    return ret;
}

From source file:org.pentaho.googlecloudstorage.vfs.GoogleCloudStorageFileObject.java

/**
 * Gets the list of child files of the current directory
 *
 * @return A list of file/folders/*from  w  ww.  j  a  va  2s  . co  m*/
 * @throws Exception
 */
@Override
protected String[] doListChildren() throws Exception {
    List<String> items = new ArrayList<>();
    Map<String, List<String>> folders = new HashMap<>();
    Bucket bucket = storage.get(getBucketName());
    if (bucket != null && bucket.exists()) {
        folders.put("", new ArrayList<>());
        for (Blob blob : bucket.list().iterateAll()) {
            String path = blob.getName();
            boolean isDirectory = path.endsWith(DELIMITER);
            String parent = "";
            if (path.contains(DELIMITER)) {
                parent = path.substring(0, path.lastIndexOf(DELIMITER));
            }
            String[] parts = path.split(DELIMITER);
            String name = parts[parts.length - 1];
            if (isDirectory) {
                folders.put(fixSlashes(path, false), new ArrayList<>());
                name = name.concat(DELIMITER);
                parent = path.substring(0, path.lastIndexOf(DELIMITER));
                if (parent.contains(DELIMITER)) {
                    parent = parent.substring(0, parent.lastIndexOf(DELIMITER));
                } else {
                    parent = "";
                }
            }
            List<String> folderList = folders.getOrDefault(fixSlashes(parent, true), new ArrayList<>());
            folderList.add(name);
            folders.put(fixSlashes(parent, true), folderList);
        }
        items = folders.getOrDefault(fixSlashes(getName().getPath(), true), Collections.emptyList());
    }

    return items.toArray(new String[items.size()]);
}

From source file:com.formkiq.core.service.generator.pdfbox.PdfEditorServiceImpl.java

/**
 * Gets Text for Page and merges text together where appropriate.
 *
 * @param textsMap// www.  jav  a 2 s.  co  m
 *            {@link Map} of Page Number and {@link List} of
 *            {@link PdfTextField}
 * @param pageNum
 *            {@link Integer}
 * @return {@link List} of {@link PdfTextField}
 */
private List<PdfTextField> getTextForPage(final Map<Integer, List<PdfTextField>> textsMap,
        final Integer pageNum) {

    List<PdfTextField> texts = new ArrayList<>(textsMap.getOrDefault(pageNum, emptyList()));

    texts = joinHorizontalSimilarText(texts);

    return texts;
}

From source file:org.apache.bookkeeper.client.TestRackawareEnsemblePlacementPolicy.java

static BookiesHealthInfo getBookiesHealthInfo(Map<BookieSocketAddress, Long> bookieFailureHistory,
        Map<BookieSocketAddress, Long> bookiePendingRequests) {
    return new BookiesHealthInfo() {
        @Override/*from w  ww.jav a2  s .co  m*/
        public long getBookieFailureHistory(BookieSocketAddress bookieSocketAddress) {
            return bookieFailureHistory.getOrDefault(bookieSocketAddress, -1L);
        }

        @Override
        public long getBookiePendingRequests(BookieSocketAddress bookieSocketAddress) {
            return bookiePendingRequests.getOrDefault(bookieSocketAddress, 0L);
        }
    };
}