Example usage for com.google.common.collect Maps newConcurrentMap

List of usage examples for com.google.common.collect Maps newConcurrentMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newConcurrentMap.

Prototype

public static <K, V> ConcurrentMap<K, V> newConcurrentMap() 

Source Link

Document

Returns a general-purpose instance of ConcurrentMap , which supports all optional operations of the ConcurrentMap interface.

Usage

From source file:org.onosproject.cpman.impl.ControlPlaneMonitor.java

@Override
public void updateMetric(ControlMetric cm, int updateIntervalInMinutes, String resourceName) {
    // update disk metrics
    if (DISK_METRICS.contains(cm.metricType())) {
        diskBuf.putIfAbsent(resourceName, Maps.newConcurrentMap());

        availableResourceMap.putIfAbsent(Type.DISK, Sets.newHashSet());
        availableResourceMap.computeIfPresent(Type.DISK, (k, v) -> {
            v.add(resourceName);/*  w  w w. j  a va 2s  . co  m*/
            return v;
        });

        diskBuf.get(resourceName).putIfAbsent(cm.metricType(), (double) cm.metricValue().getLoad());
        if (diskBuf.get(resourceName).keySet().containsAll(DISK_METRICS)) {
            updateDiskMetrics(diskBuf.get(resourceName), resourceName);
            diskBuf.clear();
        }
    }

    // update network metrics
    if (NETWORK_METRICS.contains(cm.metricType())) {
        networkBuf.putIfAbsent(resourceName, Maps.newConcurrentMap());

        availableResourceMap.putIfAbsent(Type.NETWORK, Sets.newHashSet());
        availableResourceMap.computeIfPresent(Type.NETWORK, (k, v) -> {
            v.add(resourceName);
            return v;
        });

        networkBuf.get(resourceName).putIfAbsent(cm.metricType(), (double) cm.metricValue().getLoad());
        if (networkBuf.get(resourceName).keySet().containsAll(NETWORK_METRICS)) {
            updateNetworkMetrics(networkBuf.get(resourceName), resourceName);
            networkBuf.clear();
        }
    }
}

From source file:org.onosproject.incubator.store.virtual.impl.SimpleVirtualIntentStore.java

/**
 * Returns the pending intent map for a specific virtual network.
 *
 * @param networkId a virtual network identifier
 * @return the pending intent map for the requested virtual network
 *//*from w  ww  .  j av  a 2 s  .  c  om*/
private Map<Key, IntentData> getPendingMap(NetworkId networkId) {
    pendingByNetwork.computeIfAbsent(networkId, n -> Maps.newConcurrentMap());
    return pendingByNetwork.get(networkId);
}

From source file:org.onosproject.store.primitives.impl.StorageManager.java

@Override
public Map<String, Long> getCounters() {
    Map<String, Long> counters = Maps.newConcurrentMap();
    federatedPrimitiveCreator.getAsyncAtomicCounterNames().forEach(name -> counters.put(name,
            federatedPrimitiveCreator.newAsyncCounter(name).asAtomicCounter().get()));
    return counters;
}

From source file:ratpack.health.HealthCheckHandler.java

private Promise<HealthCheckResults> execute(ExecControl execControl, Registry registry,
        Iterable<? extends HealthCheck> healthChecks) {
    Iterator<? extends HealthCheck> iterator = healthChecks.iterator();
    if (!iterator.hasNext()) {
        return execControl.promiseOf(new HealthCheckResults(ImmutableSortedMap.of()));
    }/*  w  w  w  .  j a  v  a 2 s  .  c  o  m*/

    return execControl.<Map<String, HealthCheck.Result>>promise(f -> {
        AtomicInteger counter = new AtomicInteger();
        Map<String, HealthCheck.Result> results = Maps.newConcurrentMap();
        while (iterator.hasNext()) {
            counter.incrementAndGet();
            HealthCheck healthCheck = iterator.next();
            execControl.exec().start(e -> execute(e, registry, healthCheck).throttled(throttle).then(r -> {
                results.put(healthCheck.getName(), r);
                if (counter.decrementAndGet() == 0 && !iterator.hasNext()) {
                    f.success(results);
                }
            }));
        }
    }).map(ImmutableSortedMap::copyOf).map(HealthCheckResults::new);
}

From source file:org.onosproject.store.primitives.impl.DefaultDatabaseState.java

private Map<String, Update> getLockMap(String mapName) {
    return locks.computeIfAbsent(mapName, name -> Maps.newConcurrentMap());
}

From source file:org.apache.kylin.cube.cuboid.algorithm.CuboidStats.java

private CuboidStats(String key, long baseCuboidId, Set<Long> mandatoryCuboids, Map<Long, Long> statistics,
        Map<Long, Double> size, Map<Long, Long> hitFrequencyMap,
        Map<Long, Map<Long, Long>> scanCountSourceMap) {

    this.key = key;
    this.baseCuboid = baseCuboidId;
    /** Initial mandatory cuboids */
    Set<Long> cuboidsForMandatory = Sets.newHashSet(mandatoryCuboids);
    //Always add base cuboid.
    if (!cuboidsForMandatory.contains(baseCuboid)) {
        cuboidsForMandatory.add(baseCuboid);
    }/*from   w  ww. j  ava2 s. c  o m*/
    logger.info("Mandatory cuboids: " + cuboidsForMandatory);

    /** Initial selection cuboids */
    Set<Long> cuboidsForSelection = Sets.newHashSet(statistics.keySet());
    cuboidsForSelection.removeAll(cuboidsForMandatory);

    //There's no overlap between mandatoryCuboidSet and selectionCuboidSet
    this.mandatoryCuboidSet = ImmutableSet.<Long>builder().addAll(cuboidsForMandatory).build();
    this.selectionCuboidSet = ImmutableSet.<Long>builder().addAll(cuboidsForSelection).build();
    if (selectionCuboidSet.isEmpty()) {
        logger.warn("The selection set should not be empty!!!");
    }

    /** Initialize row count for mandatory cuboids */
    CuboidStatsUtil.complementRowCountForMandatoryCuboids(statistics, baseCuboid, mandatoryCuboidSet);

    this.cuboidCountMap = ImmutableMap.<Long, Long>builder().putAll(statistics).build();
    this.cuboidSizeMap = ImmutableMap.<Long, Double>builder().putAll(size).build();

    /** Initialize the hit probability for each selection cuboid */
    Map<Long, Double> tmpCuboidHitProbabilityMap = Maps.newHashMapWithExpectedSize(selectionCuboidSet.size());
    if (hitFrequencyMap != null) {
        long totalHitFrequency = 0L;
        for (Map.Entry<Long, Long> hitFrequency : hitFrequencyMap.entrySet()) {
            if (selectionCuboidSet.contains(hitFrequency.getKey())) {
                totalHitFrequency += hitFrequency.getValue();
            }
        }

        final double unitUncertainProb = WEIGHT_FOR_UN_QUERY / selectionCuboidSet.size();
        for (Long cuboid : selectionCuboidSet) {
            //Calculate hit probability for each cuboid
            if (hitFrequencyMap.get(cuboid) != null) {
                tmpCuboidHitProbabilityMap.put(cuboid, unitUncertainProb
                        + (1 - WEIGHT_FOR_UN_QUERY) * hitFrequencyMap.get(cuboid) / totalHitFrequency);
            } else {
                tmpCuboidHitProbabilityMap.put(cuboid, unitUncertainProb);
            }
        }
    } else {
        for (Long cuboid : selectionCuboidSet) {
            tmpCuboidHitProbabilityMap.put(cuboid, 1.0 / selectionCuboidSet.size());
        }
    }
    this.cuboidHitProbabilityMap = ImmutableMap.<Long, Double>builder().putAll(tmpCuboidHitProbabilityMap)
            .build();

    /** Initialize the scan count when query for each selection cuboid + one base cuboid */
    Map<Long, Long> tmpCuboidScanCountMap = Maps.newHashMapWithExpectedSize(1 + selectionCuboidSet.size());
    tmpCuboidScanCountMap.put(baseCuboid, getExpScanCount(baseCuboid, statistics, scanCountSourceMap));
    for (Long cuboid : selectionCuboidSet) {
        tmpCuboidScanCountMap.put(cuboid, getExpScanCount(cuboid, statistics, scanCountSourceMap));
    }
    this.cuboidScanCountMap = ImmutableMap.<Long, Long>builder().putAll(tmpCuboidScanCountMap).build();

    this.directChildrenCache = ImmutableMap.<Long, List<Long>>builder()
            .putAll(CuboidStatsUtil.createDirectChildrenCache(statistics.keySet())).build();

    this.allDescendantsCache = Maps.newConcurrentMap();
}

From source file:org.apache.gobblin.source.extractor.extract.kafka.KafkaSource.java

@Override
public List<WorkUnit> getWorkunits(SourceState state) {
    this.metricContext = Instrumented.getMetricContext(state, KafkaSource.class);
    this.lineageInfo = LineageInfo.getLineageInfo(state.getBroker());

    Map<String, List<WorkUnit>> workUnits = Maps.newConcurrentMap();
    if (state.getPropAsBoolean(KafkaSource.GOBBLIN_KAFKA_EXTRACT_ALLOW_TABLE_TYPE_NAMESPACE_CUSTOMIZATION)) {
        String tableTypeStr = state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY,
                KafkaSource.DEFAULT_TABLE_TYPE.toString());
        tableType = Extract.TableType.valueOf(tableTypeStr);
        extractNamespace = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY,
                KafkaSource.DEFAULT_NAMESPACE_NAME);
    } else {//from ww  w .ja  v  a2  s .c om
        // To be compatible, reject table type and namespace configuration keys as previous implementation
        tableType = KafkaSource.DEFAULT_TABLE_TYPE;
        extractNamespace = KafkaSource.DEFAULT_NAMESPACE_NAME;
    }
    isFullExtract = state.getPropAsBoolean(ConfigurationKeys.EXTRACT_IS_FULL_KEY);
    kafkaBrokers = state.getProp(ConfigurationKeys.KAFKA_BROKERS, "");
    this.shouldEnableDatasetStateStore = state.getPropAsBoolean(GOBBLIN_KAFKA_SHOULD_ENABLE_DATASET_STATESTORE,
            DEFAULT_GOBBLIN_KAFKA_SHOULD_ENABLE_DATASET_STATESTORE);

    try {
        Config config = ConfigUtils.propertiesToConfig(state.getProperties());
        GobblinKafkaConsumerClientFactory kafkaConsumerClientFactory = kafkaConsumerClientResolver
                .resolveClass(state.getProp(GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS,
                        DEFAULT_GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS))
                .newInstance();

        this.kafkaConsumerClient.set(kafkaConsumerClientFactory.create(config));

        List<KafkaTopic> topics = getFilteredTopics(state);
        this.topicsToProcess = topics.stream().map(KafkaTopic::getName).collect(toSet());

        for (String topic : this.topicsToProcess) {
            LOG.info("Discovered topic " + topic);
        }
        Map<String, State> topicSpecificStateMap = DatasetUtils
                .getDatasetSpecificProps(Iterables.transform(topics, new Function<KafkaTopic, String>() {

                    @Override
                    public String apply(KafkaTopic topic) {
                        return topic.getName();
                    }
                }), state);

        int numOfThreads = state.getPropAsInt(ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_THREADS,
                ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_DEFAULT_THREAD_COUNT);
        ExecutorService threadPool = Executors.newFixedThreadPool(numOfThreads,
                ExecutorsUtils.newThreadFactory(Optional.of(LOG)));

        if (state.getPropAsBoolean(ConfigurationKeys.KAFKA_SOURCE_SHARE_CONSUMER_CLIENT,
                ConfigurationKeys.DEFAULT_KAFKA_SOURCE_SHARE_CONSUMER_CLIENT)) {
            this.sharedKafkaConsumerClient = this.kafkaConsumerClient.get();
        } else {
            // preallocate one client per thread
            for (int i = 0; i < numOfThreads; i++) {
                kafkaConsumerClientPool.offer(kafkaConsumerClientFactory.create(config));
            }
        }

        Stopwatch createWorkUnitStopwatch = Stopwatch.createStarted();

        for (KafkaTopic topic : topics) {
            threadPool.submit(new WorkUnitCreator(topic, state,
                    Optional.fromNullable(topicSpecificStateMap.get(topic.getName())), workUnits));
        }

        ExecutorsUtils.shutdownExecutorService(threadPool, Optional.of(LOG), 1L, TimeUnit.HOURS);
        LOG.info(String.format("Created workunits for %d topics in %d seconds", workUnits.size(),
                createWorkUnitStopwatch.elapsed(TimeUnit.SECONDS)));

        // Create empty WorkUnits for skipped partitions (i.e., partitions that have previous offsets,
        // but aren't processed).
        createEmptyWorkUnitsForSkippedPartitions(workUnits, topicSpecificStateMap, state);

        int numOfMultiWorkunits = state.getPropAsInt(ConfigurationKeys.MR_JOB_MAX_MAPPERS_KEY,
                ConfigurationKeys.DEFAULT_MR_JOB_MAX_MAPPERS);
        List<WorkUnit> workUnitList = KafkaWorkUnitPacker.getInstance(this, state).pack(workUnits,
                numOfMultiWorkunits);
        addTopicSpecificPropsToWorkUnits(workUnitList, topicSpecificStateMap);
        setLimiterReportKeyListToWorkUnits(workUnitList, getLimiterExtractorReportKeys());
        return workUnitList;
    } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
        throw new RuntimeException(e);
    } finally {
        try {
            if (this.kafkaConsumerClient.get() != null) {
                this.kafkaConsumerClient.get().close();
            }

            // cleanup clients from pool
            for (GobblinKafkaConsumerClient client : kafkaConsumerClientPool) {
                client.close();
            }
        } catch (IOException e) {
            throw new RuntimeException("Exception closing kafkaConsumerClient");
        }
    }
}

From source file:org.apache.giraph.comm.messages.SimpleMessageStore.java

@Override
public void readFieldsForPartition(DataInput in, int partitionId) throws IOException {
    if (in.readBoolean()) {
        ConcurrentMap<I, T> partitionMap = Maps.newConcurrentMap();
        int numVertices = in.readInt();
        for (int v = 0; v < numVertices; v++) {
            I vertexId = config.createVertexId();
            vertexId.readFields(in);/*from   w w  w  .j a v  a2  s .c o  m*/
            partitionMap.put(vertexId, readFieldsForMessages(in));
        }
        map.put(partitionId, partitionMap);
    }
}

From source file:org.onosproject.store.primitives.impl.StorageManager.java

@Override
public Map<String, WorkQueueStats> getQueueStats() {
    Map<String, WorkQueueStats> workQueueStats = Maps.newConcurrentMap();
    federatedPrimitiveCreator.getWorkQueueNames()
            .forEach(name -> workQueueStats.put(name, federatedPrimitiveCreator
                    .newWorkQueue(name, Serializer.using(KryoNamespaces.BASIC)).stats().join()));
    return workQueueStats;
}

From source file:org.eclipse.hawkbit.ui.artifacts.state.ArtifactUploadState.java

private Map<FileUploadId, FileUploadProgress> getOverallFilesInUploadProcessMap() {
    if (overallFilesInUploadProcess == null) {
        overallFilesInUploadProcess = Maps.newConcurrentMap();
    }/*from  ww w .j a v a 2s .c o m*/
    return overallFilesInUploadProcess;
}