Example usage for java.util Map getOrDefault

List of usage examples for java.util Map getOrDefault

Introduction

In this page you can find the example usage for java.util Map getOrDefault.

Prototype

default V getOrDefault(Object key, V defaultValue) 

Source Link

Document

Returns the value to which the specified key is mapped, or defaultValue if this map contains no mapping for the key.

Usage

From source file:de.csdev.ebus.utils.EBusConsoleUtils.java

/**
 * Returns device table information/*from w  w w .j a  va2 s  .  c o m*/
 *
 * @return
 */
public static String getDeviceTableInformation(Collection<IEBusCommandCollection> collections,
        EBusDeviceTable deviceTable) {

    StringBuilder sb = new StringBuilder();

    Map<String, String> mapping = new HashMap<String, String>();

    for (IEBusCommandCollection collection : collections) {
        for (String identification : collection.getIdentification()) {
            mapping.put(identification, collection.getId());
        }
    }

    EBusDevice ownDevice = deviceTable.getOwnDevice();

    sb.append(String.format("%-2s | %-2s | %-14s | %-14s | %-25s | %-2s | %-10s | %-10s | %-20s\n", "MA", "SA",
            "Identifier", "Device", "Manufacture", "ID", "Firmware", "Hardware", "Last Activity"));

    sb.append(String.format("%-2s-+-%-2s-+-%-14s-+-%-14s-+-%-20s-+-%-2s-+-%-10s-+-%-10s-+-%-20s\n",
            StringUtils.repeat("-", 2), StringUtils.repeat("-", 2), StringUtils.repeat("-", 14),
            StringUtils.repeat("-", 14), StringUtils.repeat("-", 20), StringUtils.repeat("-", 2),
            StringUtils.repeat("-", 10), StringUtils.repeat("-", 10), StringUtils.repeat("-", 20)));

    for (EBusDevice device : deviceTable.getDeviceTable()) {

        boolean isBridge = device.equals(ownDevice);
        String masterAddress = EBusUtils.toHexDumpString(device.getMasterAddress());
        String slaveAddress = EBusUtils.toHexDumpString(device.getSlaveAddress());

        String activity = device.getLastActivity() == 0 ? "---" : new Date(device.getLastActivity()).toString();
        String id = EBusUtils.toHexDumpString(device.getDeviceId()).toString();
        String deviceName = isBridge ? "<interface>" : mapping.getOrDefault(id, "---");
        String manufacture = isBridge ? "eBUS Library" : device.getManufacturerName();

        sb.append(String.format("%-2s | %-2s | %-14s | %-14s | %-25s | %-2s | %-10s | %-10s | %-20s\n",
                masterAddress, slaveAddress, id, deviceName, manufacture,
                EBusUtils.toHexDumpString(device.getManufacturer()), device.getSoftwareVersion(),
                device.getHardwareVersion(), activity));

    }

    sb.append(StringUtils.repeat("-", 118) + "\n");
    sb.append("MA = Master Address / SA = Slave Address / ID = Manufacture ID\n");

    return sb.toString();
}

From source file:org.codice.ddf.catalog.ui.util.EndpointUtil.java

@SuppressWarnings("unchecked")
private void mergeMetacardTypeIntoResults(Map<String, Object> resultTypes, InjectableAttribute attribute,
        Map<String, Object> attributeProperties, String type) {
    Map<String, Object> attributes = (Map) resultTypes.getOrDefault(type, new HashMap<String, Object>());
    attributes.put(attribute.attribute(), attributeProperties);
    resultTypes.put(type, attributes);/*from  w ww.  j a v a  2  s  . co m*/
}

From source file:org.apache.samza.system.kafka.KafkaSystemAdmin.java

/**
 * A helper method that takes oldest, newest, and upcoming offsets for each
 * system stream partition, and creates a single map from stream name to
 * SystemStreamMetadata./*from   w  w  w . j a v a2  s .  c om*/
 *
 * @param newestOffsets map of SSP to newest offset
 * @param oldestOffsets map of SSP to oldest offset
 * @param upcomingOffsets map of SSP to upcoming offset
 * @return a {@link Map} from {@code system} to {@link SystemStreamMetadata}
 */
@VisibleForTesting
static Map<String, SystemStreamMetadata> assembleMetadata(Map<SystemStreamPartition, String> oldestOffsets,
        Map<SystemStreamPartition, String> newestOffsets, Map<SystemStreamPartition, String> upcomingOffsets) {
    HashSet<SystemStreamPartition> allSSPs = new HashSet<>();
    allSSPs.addAll(oldestOffsets.keySet());
    allSSPs.addAll(newestOffsets.keySet());
    allSSPs.addAll(upcomingOffsets.keySet());

    Map<String, SystemStreamMetadata> assembledMetadata = allSSPs.stream()
            .collect(Collectors.groupingBy(SystemStreamPartition::getStream)).entrySet().stream()
            .collect(Collectors.toMap(Map.Entry::getKey, entry -> {
                Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata = entry
                        .getValue().stream()
                        .collect(Collectors.toMap(SystemStreamPartition::getPartition,
                                ssp -> new SystemStreamMetadata.SystemStreamPartitionMetadata(
                                        oldestOffsets.getOrDefault(ssp, null),
                                        newestOffsets.getOrDefault(ssp, null), upcomingOffsets.get(ssp))));
                return new SystemStreamMetadata(entry.getKey(), partitionMetadata);
            }));

    return assembledMetadata;
}

From source file:org.apache.storm.grouping.LoadAwareShuffleGroupingTest.java

@Test
public void testUnevenLoadOverTime() throws Exception {
    LoadAwareShuffleGrouping grouping = new LoadAwareShuffleGrouping();
    WorkerTopologyContext context = mockContext(Arrays.asList(1, 2));
    grouping.prepare(context, new GlobalStreamId("a", "default"), Arrays.asList(1, 2));
    double expectedOneWeight = 100.0;
    double expectedTwoWeight = 100.0;

    Map<Integer, Double> localLoad = new HashMap<>();
    localLoad.put(1, 1.0);//from w  ww  . j  ava 2s .  c o  m
    localLoad.put(2, 0.0);
    LoadMapping lm = new LoadMapping();
    lm.setLocal(localLoad);
    //First verify that if something has a high load it's distribution will drop over time
    for (int i = 9; i >= 0; i--) {
        grouping.refreshLoad(lm);
        expectedOneWeight -= 10.0;
        Map<Integer, Double> countByType = count(grouping.choices, grouping.rets);
        LOG.info("contByType = {}", countByType);
        double expectedOnePercentage = expectedOneWeight / (expectedOneWeight + expectedTwoWeight);
        double expectedTwoPercentage = expectedTwoWeight / (expectedOneWeight + expectedTwoWeight);
        assertEquals("i = " + i, expectedOnePercentage,
                countByType.getOrDefault(1, 0.0) / LoadAwareShuffleGrouping.CAPACITY, 0.01);
        assertEquals("i = " + i, expectedTwoPercentage,
                countByType.getOrDefault(2, 0.0) / LoadAwareShuffleGrouping.CAPACITY, 0.01);
    }

    //Now verify that when it is switched we can recover
    localLoad.put(1, 0.0);
    localLoad.put(2, 1.0);
    lm.setLocal(localLoad);

    while (expectedOneWeight < 100.0) {
        grouping.refreshLoad(lm);
        expectedOneWeight += 1.0;
        expectedTwoWeight = Math.max(0.0, expectedTwoWeight - 10.0);
        Map<Integer, Double> countByType = count(grouping.choices, grouping.rets);
        LOG.info("contByType = {}", countByType);
        double expectedOnePercentage = expectedOneWeight / (expectedOneWeight + expectedTwoWeight);
        double expectedTwoPercentage = expectedTwoWeight / (expectedOneWeight + expectedTwoWeight);
        assertEquals(expectedOnePercentage,
                countByType.getOrDefault(1, 0.0) / LoadAwareShuffleGrouping.CAPACITY, 0.01);
        assertEquals(expectedTwoPercentage,
                countByType.getOrDefault(2, 0.0) / LoadAwareShuffleGrouping.CAPACITY, 0.01);
    }
}

From source file:net.acesinc.data.json.generator.log.TranquilityLogger.java

public TranquilityLogger(Map<String, Object> props) {
    this.jsonUtils = new JsonUtils();
    this.mapper = new ObjectMapper();

    this.indexService = (String) props.get(OVERLORD_NAME_PROP_NAME);
    this.firehosePattern = (String) props.get(FIREHOSE_PATTERN_PROP_NAME);
    this.discoveryPath = (String) props.get(DISCOVERY_PATH_PROP_NAME);
    this.dataSourceName = (String) props.get(DATASOURCE_NAME_PROP_NAME);
    this.dimensionNames = (String) props.get(DIMENSIONS_PROP_NAME);
    this.geoSpatialDims = (String) props.get(GEOSPATIAL_DIMENSIONS_PROP_NAME);
    this.timestampName = (String) props.get(TIMESTAMP_NAME_PROP_NAME);
    this.timestampFormat = (String) props.getOrDefault(TIMESTAMP_FORMAT_PROP_NAME, "auto");
    this.segmentGranularity = ((String) props.getOrDefault(SEGMENT_GRANULARITY_PROP_NAME, "hour"))
            .toUpperCase();//w w w .ja va 2s. co  m
    this.queryGranularity = ((String) props.getOrDefault(QUERY_GRANULARITY_PROP_NAME, "minute")).toUpperCase();
    this.zookeeperHost = (String) props.get(ZOOKEEPER_HOST_PROP_NAME);
    this.zookeeperPort = (Integer) props.get(ZOOKEEPER_PORT_PROP_NAME);
    this.flatten = (Boolean) props.getOrDefault(FLATTEN_PROP_NAME, true);
    this.sync = (Boolean) props.getOrDefault(SYNC_PROP_NAME, false);

    dimensions = new ArrayList<>();
    if (dimensionNames != null && !dimensionNames.isEmpty()) {
        String[] dims = dimensionNames.split(",");
        for (String s : dims) {
            dimensions.add(s.trim());
        }
    }
    if (dimensions.isEmpty()) {
        log.debug("Configuring Tranquility with Schemaless ingestion");
        druidDimensions = DruidDimensions.schemaless();
    } else {
        log.debug("Configuring Tranqulity with the following dimensions: " + dimensions.toString());
        druidDimensions = DruidDimensions.specific(dimensions);
    }

    List<String> geoDims = new ArrayList<>();
    if (geoSpatialDims != null && !geoSpatialDims.isEmpty()) {
        String[] dims = geoSpatialDims.split(",");
        for (String s : dims) {
            geoDims.add(s.trim());
        }
    }
    if (!geoDims.isEmpty()) {
        log.debug("Adding Geospatial Dimensions: " + geoDims.toString());
        druidDimensions = druidDimensions
                .withSpatialDimensions(Lists.newArrayList(DruidSpatialDimension.multipleField("geo", geoDims)));
    }

    aggregators = ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("events"));

    // Tranquility needs to be able to extract timestamps from your object type (in this case, Map<String, Object>).
    timestamper = new Timestamper<Map<String, Object>>() {
        @Override
        public DateTime timestamp(Map<String, Object> theMap) {
            return new DateTime(theMap.get(timestampName));
        }
    };

    // Tranquility uses ZooKeeper (through Curator) for coordination.
    curator = CuratorFrameworkFactory.builder().connectString(zookeeperHost + ":" + zookeeperPort.toString())
            .retryPolicy(new ExponentialBackoffRetry(1000, 20, 30000)).build();
    curator.start();

    // The JSON serialization of your object must have a timestamp field in a format that Druid understands. By default,
    // Druid expects the field to be called "timestamp" and to be an ISO8601 timestamp.
    log.debug("Confiuring Tranqulity Timestamp Spec with { name: " + timestampName + ", format: "
            + timestampFormat + " }");
    timestampSpec = new TimestampSpec(timestampName, timestampFormat);

    // Tranquility needs to be able to serialize your object type to JSON for transmission to Druid. By default this is
    // done with Jackson. If you want to provide an alternate serializer, you can provide your own via ```.objectWriter(...)```.
    // In this case, we won't provide one, so we're just using Jackson.
    log.debug("Creating Druid Beam for DataSource [ " + dataSourceName + " ]");
    druidService = DruidBeams.builder(timestamper).curator(curator).discoveryPath(discoveryPath)
            .location(DruidLocation.create(indexService, firehosePattern, dataSourceName))
            .timestampSpec(timestampSpec)
            .rollup(DruidRollup.create(druidDimensions, aggregators,
                    QueryGranularity.fromString(queryGranularity)))
            .tuning(ClusteredBeamTuning.builder().segmentGranularity(Granularity.valueOf(segmentGranularity))
                    .windowPeriod(new Period("PT10M")).partitions(1).replicants(1).build())
            .buildJavaService();

}

From source file:io.gravitee.repository.elasticsearch.analytics.ElasticAnalyticsRepository.java

private HealthResponse toHealthResponse(SearchResponse searchResponse) {
    HealthResponse healthResponse = new HealthResponse();

    if (searchResponse.getAggregations() == null) {
        return healthResponse;
    }/*from  w w  w  . ja  v  a 2 s  .co m*/

    // First aggregation is always a date histogram aggregation
    Histogram histogram = searchResponse.getAggregations().get("by_date");

    Map<Boolean, long[]> values = new HashMap<>(2);
    long[] timestamps = new long[histogram.getBuckets().size()];

    // Prepare data
    int idx = 0;
    for (Histogram.Bucket bucket : histogram.getBuckets()) {
        timestamps[idx] = ((DateTime) bucket.getKey()).getMillis();

        Terms terms = bucket.getAggregations().get("by_result");

        for (Terms.Bucket termBucket : terms.getBuckets()) {
            long[] valuesByStatus = values.getOrDefault(Integer.parseInt(termBucket.getKeyAsString()) == 1,
                    new long[timestamps.length]);

            valuesByStatus[idx] = termBucket.getDocCount();

            values.put(Integer.parseInt(termBucket.getKeyAsString()) == 1, valuesByStatus);
        }

        idx++;
    }

    healthResponse.timestamps(timestamps);
    healthResponse.buckets(values);

    return healthResponse;
}

From source file:com.netflix.spinnaker.halyard.deploy.spinnaker.v1.service.distributed.kubernetes.KubernetesDistributedService.java

default List<ConfigSource> stageProfiles(AccountDeploymentDetails<KubernetesAccount> details,
        GenerateService.ResolvedConfiguration resolvedConfiguration) {
    SpinnakerService thisService = getService();
    ServiceSettings thisServiceSettings = resolvedConfiguration.getServiceSettings(thisService);
    SpinnakerRuntimeSettings runtimeSettings = resolvedConfiguration.getRuntimeSettings();
    Integer version = getRunningServiceDetails(details, runtimeSettings).getLatestEnabledVersion();
    if (version == null) {
        version = 0;// w w  w. ja v  a 2s .c  o  m
    } else {
        version++;
    }

    String namespace = getNamespace(thisServiceSettings);
    KubernetesProviderUtils.createNamespace(details, namespace);

    String name = getServiceName();
    Map<String, String> env = new HashMap<>();
    List<ConfigSource> configSources = new ArrayList<>();

    Map<String, Profile> serviceProfiles = resolvedConfiguration.getProfilesForService(thisService.getType());
    Set<String> requiredFiles = new HashSet<>();

    for (SidecarService sidecarService : getSidecars(runtimeSettings)) {
        for (Profile profile : sidecarService.getSidecarProfiles(resolvedConfiguration, thisService)) {
            if (profile == null) {
                throw new HalException(Problem.Severity.FATAL,
                        "Service " + sidecarService.getService().getCanonicalName()
                                + " is required but was not supplied for deployment.");
            }

            serviceProfiles.put(profile.getName(), profile);
            requiredFiles.addAll(profile.getRequiredFiles());
        }
    }

    Map<String, Set<Profile>> collapseByDirectory = new HashMap<>();

    for (Map.Entry<String, Profile> entry : serviceProfiles.entrySet()) {
        Profile profile = entry.getValue();
        String mountPoint = Paths.get(profile.getOutputFile()).getParent().toString();
        Set<Profile> profiles = collapseByDirectory.getOrDefault(mountPoint, new HashSet<>());
        profiles.add(profile);
        requiredFiles.addAll(profile.getRequiredFiles());
        collapseByDirectory.put(mountPoint, profiles);
    }

    String stagingPath = getSpinnakerStagingPath(details.getDeploymentName());
    if (!requiredFiles.isEmpty()) {
        String secretName = KubernetesProviderUtils.componentDependencies(name, version);
        String mountPoint = null;
        for (String file : requiredFiles) {
            String nextMountPoint = Paths.get(file).getParent().toString();
            if (mountPoint == null) {
                mountPoint = nextMountPoint;
            }
            assert (mountPoint.equals(nextMountPoint));
        }

        Set<Pair<File, String>> pairs = requiredFiles.stream().map(f -> {
            return new ImmutablePair<>(new File(f), new File(f).getName());
        }).collect(Collectors.toSet());

        KubernetesProviderUtils.upsertSecret(details, pairs, secretName, namespace);
        configSources.add(new ConfigSource().setId(secretName).setMountPath(mountPoint));
    }

    int ind = 0;
    for (Map.Entry<String, Set<Profile>> entry : collapseByDirectory.entrySet()) {
        env.clear();
        String mountPoint = entry.getKey();
        Set<Profile> profiles = entry.getValue();
        env.putAll(profiles.stream().reduce(new HashMap<>(), (acc, profile) -> {
            acc.putAll(profile.getEnv());
            return acc;
        }, (a, b) -> {
            a.putAll(b);
            return a;
        }));

        String secretName = KubernetesProviderUtils.componentSecret(name + ind, version);
        ind += 1;

        Set<Pair<File, String>> pairs = profiles.stream().map(p -> {
            return new ImmutablePair<>(new File(stagingPath, p.getName()),
                    new File(p.getOutputFile()).getName());
        }).collect(Collectors.toSet());

        KubernetesProviderUtils.upsertSecret(details, pairs, secretName, namespace);
        configSources.add(new ConfigSource().setId(secretName).setMountPath(mountPoint).setEnv(env));
    }

    return configSources;
}

From source file:org.apache.pulsar.client.impl.auth.AuthenticationAthenz.java

private void setAuthParams(Map<String, String> authParams) {
    this.tenantDomain = authParams.get("tenantDomain");
    this.tenantService = authParams.get("tenantService");
    this.providerDomain = authParams.get("providerDomain");
    // privateKeyPath is deprecated, this is for compatibility
    if (isBlank(authParams.get("privateKey")) && isNotBlank(authParams.get("privateKeyPath"))) {
        this.privateKey = loadPrivateKey(authParams.get("privateKeyPath"));
    } else {//from  w  w  w  .java 2s .  c o m
        this.privateKey = loadPrivateKey(authParams.get("privateKey"));
    }

    if (this.privateKey == null) {
        throw new IllegalArgumentException(
                "Failed to load private key from privateKey or privateKeyPath field");
    }

    this.keyId = authParams.getOrDefault("keyId", "0");
    if (authParams.containsKey("athenzConfPath")) {
        System.setProperty("athenz.athenz_conf", authParams.get("athenzConfPath"));
    }
    if (authParams.containsKey("principalHeader")) {
        System.setProperty("athenz.auth.principal.header", authParams.get("principalHeader"));
    }
    if (authParams.containsKey("roleHeader")) {
        System.setProperty("athenz.auth.role.header", authParams.get("roleHeader"));
    }
    if (authParams.containsKey("ztsUrl")) {
        this.ztsUrl = authParams.get("ztsUrl");
    }
}

From source file:com.hortonworks.streamline.streams.metrics.storm.ambari.AmbariMetricsServiceWithStormQuerier.java

private Map<Long, List<Pair<String, Double>>> getMetricsStreamToValueMap(String topologyName,
        String componentId, String metricName, long from, long to) {
    List<Map<String, ?>> metrics = getMetricsMap(topologyName, componentId, metricName, from, to);
    Map<Long, List<Pair<String, Double>>> ret = new HashMap<>();
    if (metrics.size() > 0) {
        for (Map<String, ?> metric : metrics) {
            String retrievedMetricName = (String) metric.get("metricname");

            // exclude system streams
            if (!isMetricFromSystemStream(retrievedMetricName)) {
                Map<String, Number> points = (Map<String, Number>) metric.get("metrics");
                for (Map.Entry<String, Number> timestampToValue : points.entrySet()) {
                    Long timestamp = Long.valueOf(timestampToValue.getKey());
                    List<Pair<String, Double>> values = ret.getOrDefault(timestamp, new ArrayList<>());
                    if (values.isEmpty()) {
                        ret.put(timestamp, values);
                    }//  ww  w.  j  av  a2s  . c  om

                    values.add(Pair.of(retrievedMetricName, timestampToValue.getValue().doubleValue()));
                }
            }
        }
    }
    return ret;
}

From source file:com.serphacker.serposcope.task.google.GoogleTask.java

protected void initializeTargets() {
    Map<Integer, Integer> previousScorePercent = new HashMap<>();

    if (previousRun != null) {
        previousScorePercent = googleDB.targetSummary.getPreviousScore(previousRun.getId());
    }//from w  w w. j ava2  s  .c  om

    List<GoogleTarget> targets = googleDB.target.list();
    for (GoogleTarget target : targets) {
        targetsByGroup.putIfAbsent(target.getGroupId(), new ArrayList<>());
        targetsByGroup.get(target.getGroupId()).add(target);
        summariesByTarget.put(target.getId(), new GoogleTargetSummary(target.getGroupId(), target.getId(),
                run.getId(), previousScorePercent.getOrDefault(target.getId(), 0)));
    }

    if (updateRun) {
        List<GoogleTargetSummary> summaries = googleDB.targetSummary.list(run.getId());
        for (GoogleTargetSummary summary : summaries) {
            summariesByTarget.put(summary.getTargetId(), summary);
        }
    }
}