List of usage examples for com.google.common.collect Maps transformEntries
@GwtIncompatible("NavigableMap") public static <K, V1, V2> NavigableMap<K, V2> transformEntries(NavigableMap<K, V1> fromMap, EntryTransformer<? super K, ? super V1, V2> transformer)
From source file:com.metamx.druid.master.DruidMasterLogger.java
@Override public DruidMasterRuntimeParams run(DruidMasterRuntimeParams params) { DruidCluster cluster = params.getDruidCluster(); MasterStats stats = params.getMasterStats(); ServiceEmitter emitter = params.getEmitter(); Map<String, AtomicLong> assigned = stats.getPerTierStats().get("assignedCount"); if (assigned != null) { for (Map.Entry<String, AtomicLong> entry : assigned.entrySet()) { log.info("[%s] : Assigned %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size()); }//w ww . jav a 2 s . c o m } Map<String, AtomicLong> dropped = stats.getPerTierStats().get("droppedCount"); if (dropped != null) { for (Map.Entry<String, AtomicLong> entry : dropped.entrySet()) { log.info("[%s] : Dropped %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size()); } } emitTieredStats(emitter, "master/%s/cost/raw", stats.getPerTierStats().get("initialCost")); emitTieredStats(emitter, "master/%s/cost/normalization", stats.getPerTierStats().get("normalization")); emitTieredStats(emitter, "master/%s/moved/count", stats.getPerTierStats().get("movedCount")); emitTieredStats(emitter, "master/%s/deleted/count", stats.getPerTierStats().get("deletedCount")); Map<String, AtomicLong> normalized = stats.getPerTierStats().get("normalizedInitialCostTimesOneThousand"); if (normalized != null) { emitTieredStats(emitter, "master/%s/cost/normalized", Maps.transformEntries(normalized, new Maps.EntryTransformer<String, AtomicLong, Number>() { @Override public Number transformEntry(String key, AtomicLong value) { return value.doubleValue() / 1000d; } })); } Map<String, AtomicLong> unneeded = stats.getPerTierStats().get("unneededCount"); if (unneeded != null) { for (Map.Entry<String, AtomicLong> entry : unneeded.entrySet()) { log.info("[%s] : Removed %s unneeded segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size()); } } emitter.emit(new ServiceMetricEvent.Builder().build("master/overShadowed/count", stats.getGlobalStats().get("overShadowedCount"))); Map<String, AtomicLong> moved = stats.getPerTierStats().get("movedCount"); if (moved != null) { for (Map.Entry<String, AtomicLong> entry : moved.entrySet()) { log.info("[%s] : Moved %,d segment(s)", entry.getKey(), entry.getValue().get()); } } log.info("Load Queues:"); for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) { for (ServerHolder serverHolder : serverHolders) { DruidServer server = serverHolder.getServer(); LoadQueuePeon queuePeon = serverHolder.getPeon(); log.info( "Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize()); if (log.isDebugEnabled()) { for (DataSegment segment : queuePeon.getSegmentsToLoad()) { log.debug("Segment to load[%s]", segment); } for (DataSegment segment : queuePeon.getSegmentsToDrop()) { log.debug("Segment to drop[%s]", segment); } } } } // Emit master metrics final Set<Map.Entry<String, LoadQueuePeon>> peonEntries = params.getLoadManagementPeons().entrySet(); for (Map.Entry<String, LoadQueuePeon> entry : peonEntries) { String serverName = entry.getKey(); LoadQueuePeon queuePeon = entry.getValue(); emitter.emit(new ServiceMetricEvent.Builder().setUser1(serverName).build("master/loadQueue/size", queuePeon.getLoadQueueSize())); emitter.emit(new ServiceMetricEvent.Builder().setUser1(serverName).build("master/loadQueue/failed", queuePeon.getAndResetFailedAssignCount())); emitter.emit(new ServiceMetricEvent.Builder().setUser1(serverName).build("master/loadQueue/count", queuePeon.getSegmentsToLoad().size())); emitter.emit(new ServiceMetricEvent.Builder().setUser1(serverName).build("master/dropQueue/count", queuePeon.getSegmentsToDrop().size())); } // Emit segment metrics CountingMap<String> segmentSizes = new CountingMap<String>(); CountingMap<String> segmentCounts = new CountingMap<String>(); for (DruidDataSource dataSource : params.getDataSources()) { for (DataSegment segment : dataSource.getSegments()) { segmentSizes.add(dataSource.getName(), segment.getSize()); segmentCounts.add(dataSource.getName(), 1L); } } for (Map.Entry<String, Long> entry : segmentSizes.snapshot().entrySet()) { String dataSource = entry.getKey(); Long size = entry.getValue(); emitter.emit(new ServiceMetricEvent.Builder().setUser1(dataSource).build("master/segment/size", size)); } for (Map.Entry<String, Long> entry : segmentCounts.snapshot().entrySet()) { String dataSource = entry.getKey(); Long count = entry.getValue(); emitter.emit( new ServiceMetricEvent.Builder().setUser1(dataSource).build("master/segment/count", count)); } return params; }
From source file:io.druid.server.coordinator.DruidCoordinatorLogger.java
@Override public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) { DruidCluster cluster = params.getDruidCluster(); CoordinatorStats stats = params.getCoordinatorStats(); ServiceEmitter emitter = params.getEmitter(); Map<String, AtomicLong> assigned = stats.getPerTierStats().get("assignedCount"); if (assigned != null) { for (Map.Entry<String, AtomicLong> entry : assigned.entrySet()) { log.info("[%s] : Assigned %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size()); }/*from ww w . j a v a 2 s. com*/ } Map<String, AtomicLong> dropped = stats.getPerTierStats().get("droppedCount"); if (dropped != null) { for (Map.Entry<String, AtomicLong> entry : dropped.entrySet()) { log.info("[%s] : Dropped %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size()); } } emitTieredStats(emitter, "coordinator/%s/cost/raw", stats.getPerTierStats().get("initialCost")); emitTieredStats(emitter, "coordinator/%s/cost/normalization", stats.getPerTierStats().get("normalization")); emitTieredStats(emitter, "coordinator/%s/moved/count", stats.getPerTierStats().get("movedCount")); emitTieredStats(emitter, "coordinator/%s/deleted/count", stats.getPerTierStats().get("deletedCount")); Map<String, AtomicLong> normalized = stats.getPerTierStats().get("normalizedInitialCostTimesOneThousand"); if (normalized != null) { emitTieredStats(emitter, "coordinator/%s/cost/normalized", Maps.transformEntries(normalized, new Maps.EntryTransformer<String, AtomicLong, Number>() { @Override public Number transformEntry(String key, AtomicLong value) { return value.doubleValue() / 1000d; } })); } Map<String, AtomicLong> unneeded = stats.getPerTierStats().get("unneededCount"); if (unneeded != null) { for (Map.Entry<String, AtomicLong> entry : unneeded.entrySet()) { log.info("[%s] : Removed %s unneeded segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size()); } } emitter.emit(new ServiceMetricEvent.Builder().build("coordinator/overShadowed/count", stats.getGlobalStats().get("overShadowedCount"))); Map<String, AtomicLong> moved = stats.getPerTierStats().get("movedCount"); if (moved != null) { for (Map.Entry<String, AtomicLong> entry : moved.entrySet()) { log.info("[%s] : Moved %,d segment(s)", entry.getKey(), entry.getValue().get()); } } log.info("Load Queues:"); for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) { for (ServerHolder serverHolder : serverHolders) { DruidServer server = serverHolder.getServer(); LoadQueuePeon queuePeon = serverHolder.getPeon(); log.info( "Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize()); if (log.isDebugEnabled()) { for (DataSegment segment : queuePeon.getSegmentsToLoad()) { log.debug("Segment to load[%s]", segment); } for (DataSegment segment : queuePeon.getSegmentsToDrop()) { log.debug("Segment to drop[%s]", segment); } } } } // Emit coordinator metrics final Set<Map.Entry<String, LoadQueuePeon>> peonEntries = params.getLoadManagementPeons().entrySet(); for (Map.Entry<String, LoadQueuePeon> entry : peonEntries) { String serverName = entry.getKey(); LoadQueuePeon queuePeon = entry.getValue(); emitter.emit(new ServiceMetricEvent.Builder().setUser1(serverName).build("coordinator/loadQueue/size", queuePeon.getLoadQueueSize())); emitter.emit(new ServiceMetricEvent.Builder().setUser1(serverName).build("coordinator/loadQueue/failed", queuePeon.getAndResetFailedAssignCount())); emitter.emit(new ServiceMetricEvent.Builder().setUser1(serverName).build("coordinator/loadQueue/count", queuePeon.getSegmentsToLoad().size())); emitter.emit(new ServiceMetricEvent.Builder().setUser1(serverName).build("coordinator/dropQueue/count", queuePeon.getSegmentsToDrop().size())); } // Emit segment metrics CountingMap<String> segmentSizes = new CountingMap<String>(); CountingMap<String> segmentCounts = new CountingMap<String>(); for (DruidDataSource dataSource : params.getDataSources()) { for (DataSegment segment : dataSource.getSegments()) { segmentSizes.add(dataSource.getName(), segment.getSize()); segmentCounts.add(dataSource.getName(), 1L); } } for (Map.Entry<String, Long> entry : segmentSizes.snapshot().entrySet()) { String dataSource = entry.getKey(); Long size = entry.getValue(); emitter.emit( new ServiceMetricEvent.Builder().setUser1(dataSource).build("coordinator/segment/size", size)); } for (Map.Entry<String, Long> entry : segmentCounts.snapshot().entrySet()) { String dataSource = entry.getKey(); Long count = entry.getValue(); emitter.emit(new ServiceMetricEvent.Builder().setUser1(dataSource).build("coordinator/segment/count", count)); } return params; }
From source file:hudson.maven.MavenProbeAction.java
/** * Gets the environment variables of the JVM on this computer. * If this is the master, it returns the system property of the master computer. *///from w w w .ja v a 2s. com public Map<String, String> getEnvVars() throws IOException, InterruptedException { EnvVars vars = EnvVars.getRemote(channel); if (build != null) { final Set<String> sensitiveBuildVars = build.getSensitiveBuildVariables(); vars = new EnvVars(Maps.transformEntries(vars, new Maps.EntryTransformer<String, String, String>() { public String transformEntry(String key, String value) { return sensitiveBuildVars.contains(key) ? "********" : value; } })); } return vars; }
From source file:com.google.devtools.build.skyframe.NotifyingGraph.java
@Override public Map<SkyKey, NodeEntry> getBatch(Iterable<SkyKey> keys) { if (delegate instanceof ProcessableGraph) { return Maps.transformEntries(getProcessableDelegate().getBatch(keys), wrapEntry); } else {//from w w w. j av a 2 s . c o m return Maps.transformEntries(delegate.getBatch(keys), wrapEntry); } }
From source file:com.nesscomputing.tracking.MockedHttpServletRequest.java
@Override public Map<String, String[]> getParameterMap() { return Maps.transformEntries(parameters, new EntryTransformer<String, List<String>, String[]>() { @Override/* www . j av a 2 s . c om*/ public String[] transformEntry(String key, List<String> values) { return values == null ? null : values.toArray(new String[values.size()]); } }); }
From source file:io.druid.server.coordinator.helper.DruidCoordinatorLogger.java
@Override public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) { DruidCluster cluster = params.getDruidCluster(); CoordinatorStats stats = params.getCoordinatorStats(); ServiceEmitter emitter = params.getEmitter(); Map<String, AtomicLong> assigned = stats.getPerTierStats().get("assignedCount"); if (assigned != null) { for (Map.Entry<String, AtomicLong> entry : assigned.entrySet()) { log.info("[%s] : Assigned %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size()); }//from www .j a va 2 s. co m } emitTieredStats(emitter, "segment/assigned/count", assigned); Map<String, AtomicLong> dropped = stats.getPerTierStats().get("droppedCount"); if (dropped != null) { for (Map.Entry<String, AtomicLong> entry : dropped.entrySet()) { log.info("[%s] : Dropped %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size()); } } emitTieredStats(emitter, "segment/dropped/count", dropped); emitTieredStats(emitter, "segment/cost/raw", stats.getPerTierStats().get("initialCost")); emitTieredStats(emitter, "segment/cost/normalization", stats.getPerTierStats().get("normalization")); emitTieredStats(emitter, "segment/moved/count", stats.getPerTierStats().get("movedCount")); emitTieredStats(emitter, "segment/deleted/count", stats.getPerTierStats().get("deletedCount")); Map<String, AtomicLong> normalized = stats.getPerTierStats().get("normalizedInitialCostTimesOneThousand"); if (normalized != null) { emitTieredStats(emitter, "segment/cost/normalized", Maps.transformEntries(normalized, new Maps.EntryTransformer<String, AtomicLong, Number>() { @Override public Number transformEntry(String key, AtomicLong value) { return value.doubleValue() / 1000d; } })); } Map<String, AtomicLong> unneeded = stats.getPerTierStats().get("unneededCount"); if (unneeded != null) { for (Map.Entry<String, AtomicLong> entry : unneeded.entrySet()) { log.info("[%s] : Removed %s unneeded segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size()); } } emitTieredStats(emitter, "segment/unneeded/count", stats.getPerTierStats().get("unneededCount")); emitter.emit(new ServiceMetricEvent.Builder().build("segment/overShadowed/count", stats.getGlobalStats().get("overShadowedCount"))); Map<String, AtomicLong> moved = stats.getPerTierStats().get("movedCount"); if (moved != null) { for (Map.Entry<String, AtomicLong> entry : moved.entrySet()) { log.info("[%s] : Moved %,d segment(s)", entry.getKey(), entry.getValue().get()); } } log.info("Load Queues:"); for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) { for (ServerHolder serverHolder : serverHolders) { ImmutableDruidServer server = serverHolder.getServer(); LoadQueuePeon queuePeon = serverHolder.getPeon(); log.info( "Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize()); if (log.isDebugEnabled()) { for (DataSegment segment : queuePeon.getSegmentsToLoad()) { log.debug("Segment to load[%s]", segment); } for (DataSegment segment : queuePeon.getSegmentsToDrop()) { log.debug("Segment to drop[%s]", segment); } } } } // Emit coordinator metrics final Set<Map.Entry<String, LoadQueuePeon>> peonEntries = params.getLoadManagementPeons().entrySet(); for (Map.Entry<String, LoadQueuePeon> entry : peonEntries) { String serverName = entry.getKey(); LoadQueuePeon queuePeon = entry.getValue(); emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName) .build("segment/loadQueue/size", queuePeon.getLoadQueueSize())); emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName) .build("segment/loadQueue/failed", queuePeon.getAndResetFailedAssignCount())); emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName) .build("segment/loadQueue/count", queuePeon.getSegmentsToLoad().size())); emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName) .build("segment/dropQueue/count", queuePeon.getSegmentsToDrop().size())); } // Emit segment metrics CountingMap<String> segmentSizes = new CountingMap<String>(); CountingMap<String> segmentCounts = new CountingMap<String>(); for (DruidDataSource dataSource : params.getDataSources()) { for (DataSegment segment : dataSource.getSegments()) { segmentSizes.add(dataSource.getName(), segment.getSize()); segmentCounts.add(dataSource.getName(), 1L); } } for (Map.Entry<String, Long> entry : segmentSizes.snapshot().entrySet()) { String dataSource = entry.getKey(); Long size = entry.getValue(); emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource) .build("segment/size", size)); } for (Map.Entry<String, Long> entry : segmentCounts.snapshot().entrySet()) { String dataSource = entry.getKey(); Long count = entry.getValue(); emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource) .build("segment/count", count)); } return params; }
From source file:com.mind_era.knime_rapidminer.knime.nodes.util.KnimeExampleSet.java
private static Map<Integer, Map<String, Double>> createMapping(final BufferedDataTable inData) { final Iterable<Integer> keys = Iterables.transform(Iterables.filter( // add index for transform Zip.zipWithIndex(inData.getDataTableSpec(), 0), // use only the string valued columns new Predicate<Map.Entry<DataColumnSpec, Integer>>() { @Override//ww w . ja va 2s. c om public boolean apply(final Map.Entry<DataColumnSpec, Integer> input) { return input.getKey().getType().isCompatible(org.knime.core.data.StringValue.class); } }), // Project to the index new Function<Map.Entry<?, Integer>, Integer>() { @Override public Integer apply(final Entry<?, Integer> input) { return input.getValue(); } }); // Initialise helper and result maps final TreeMap<Integer, Integer> max = Maps.<Integer, Integer>newTreeMap(); final Builder<Integer, Map<String, Double>> builder = ImmutableMap.<Integer, Map<String, Double>>builder(); for (final Integer key : keys) { max.put(key, Integer.valueOf(0)); builder.put(key, Maps.<String, Double>newHashMap()); } final ImmutableMap<Integer, Map<String, Double>> ret = builder.build(); // Go through the data final CloseableRowIterator it = inData.iterator(); try { ForEach.consume( // Fill the result map values Iterators.transform(it, new Function<DataRow, Void>() { @Override public Void apply(final DataRow row) { // Updating max and ret maps. ForEach.consume(Maps.transformEntries(ret, new Maps.EntryTransformer<Integer, Map<String, Double>, Void>() { @Override public Void transformEntry(final Integer key, final Map<String, Double> value) { final DataCell cell = row.getCell(key.intValue()); final String val = cell.isMissing() ? null : ((org.knime.core.data.StringValue) cell).getStringValue(); if (!value.containsKey(val)) { final Integer maxValue = max.get(key); value.put(val, Double.valueOf(maxValue.doubleValue())); max.put(key, Integer.valueOf(maxValue.intValue() + 1)); } return null; } }).entrySet()); return null; } })); return ret; } finally { it.close(); } }
From source file:org.kie.workbench.common.screens.projecteditor.client.build.DeploymentPopupBuilder.java
private Map<String, Set<String>> containerNamesByServerTemplateIds( final Map<String, ServerTemplate> serverTemplatesIds) { return Maps.transformEntries(serverTemplatesIds, (id, server) -> FluentIterable .from(server.getContainersSpec()).transform(c -> c.getContainerName()).toSet()); }
From source file:org.graylog2.alarmcallbacks.twilio.TwilioSmsAlarmCallback.java
@Override public Map<String, Object> getAttributes() { return Maps.transformEntries(configuration.getSource(), new Maps.EntryTransformer<String, Object, Object>() { @Override/*from w w w . j av a 2 s .c o m*/ public Object transformEntry(String key, Object value) { if (SENSITIVE_CONFIGURATION_KEYS.contains(key)) { return "****"; } return value; } }); }
From source file:com.facebook.buck.rules.TargetGraphHashing.java
/** * Given a {@link TargetGraph} and any number of root nodes to traverse, * returns a map of {@code (BuildTarget, HashCode)} pairs for all root * build targets and their dependencies. */// w ww .j a v a 2s .co m public ImmutableMap<BuildTarget, HashCode> hashTargetGraph() throws CycleException { try (SimplePerfEvent.Scope scope = SimplePerfEvent.scope(eventBus, PerfEventId.of("ShowTargetHashes"))) { AcyclicDepthFirstPostOrderTraversal<TargetNode<?, ?>> traversal = new AcyclicDepthFirstPostOrderTraversal<>( node -> targetGraph.getAll(node.getDeps()).iterator()); final Map<BuildTarget, ForkJoinTask<HashCode>> buildTargetHashes = new HashMap<>(); Queue<ForkJoinTask<HashCode>> tasksToSchedule = new ArrayDeque<>(); // Create our mapping of build-rules to tasks and arrange in bottom-up order // Start all the node tasks, bottom up for (final TargetNode<?, ?> node : traversal.traverse(roots)) { HashNodeTask task = new HashNodeTask(node, buildTargetHashes); buildTargetHashes.put(node.getBuildTarget(), task); tasksToSchedule.add(task); } // Execute tasks in parallel ForkJoinPool pool = new ForkJoinPool(numThreads); for (ForkJoinTask<HashCode> task : tasksToSchedule) { pool.execute(task); } // Wait for all scheduled tasks to complete return ImmutableMap.copyOf(Maps.transformEntries(buildTargetHashes, (key, value) -> value.join())); } }