List of usage examples for com.google.common.collect Maps newConcurrentMap
public static <K, V> ConcurrentMap<K, V> newConcurrentMap()
From source file:com.eucalyptus.cluster.callback.reporting.CloudWatchHelper.java
public List<AbsoluteMetricQueueItem> collectMetricData(final Collection<String> expectedInstanceIds, final DescribeSensorsResponse msg) throws Exception { ArrayList<AbsoluteMetricQueueItem> absoluteMetricQueueItems = new ArrayList<>(); // cloudwatch metric caches final ConcurrentMap<String, DiskReadWriteMetricTypeCache> metricCacheMap = Maps.newConcurrentMap(); final EC2DiskMetricCache ec2DiskMetricCache = new EC2DiskMetricCache(); for (final SensorsResourceType sensorData : msg.getSensorsResources()) { if (!RESOURCE_TYPE_INSTANCE.equals(sensorData.getResourceType()) || !expectedInstanceIds.contains(sensorData.getResourceName())) continue; for (final MetricsResourceType metricType : sensorData.getMetrics()) { for (final MetricCounterType counterType : metricType.getCounters()) { for (final MetricDimensionsType dimensionType : counterType.getDimensions()) { // find and fire most recent value for metric/dimension final List<MetricDimensionsValuesType> values = Lists .newArrayList(stripMilliseconds(dimensionType.getValues())); //CloudWatch use case of metric data // best to enter older data first... Collections.sort(values, Ordering.natural().onResultOf(GetTimestamp.INSTANCE)); for (final MetricDimensionsValuesType value : values) { if (LOG.isTraceEnabled()) { LOG.trace("ResourceUUID: " + sensorData.getResourceUuid()); LOG.trace("ResourceName: " + sensorData.getResourceName()); LOG.trace("Metric: " + metricType.getMetricName()); LOG.trace("Dimension: " + dimensionType.getDimensionName()); LOG.trace("Timestamp: " + value.getTimestamp()); LOG.trace("Value: " + value.getValue()); }/*from w w w . j a va 2s . c o m*/ final Long currentTimeStamp = value.getTimestamp().getTime(); final Double currentValue = value.getValue(); if (currentValue == null) { LOG.debug("Event received with null 'value', skipping for cloudwatch"); continue; } boolean hasEc2DiskMetricName = EC2_DISK_METRICS .contains(metricType.getMetricName().replace("Volume", "Disk")); // Let's try only creating "zero" points for timestamps from disks if (hasEc2DiskMetricName) { ec2DiskMetricCache.initializeMetrics(sensorData.getResourceUuid(), sensorData.getResourceName(), currentTimeStamp); // Put a place holder in in case we don't have any non-EBS volumes } boolean isEbsMetric = dimensionType.getDimensionName().startsWith("vol-"); boolean isEc2DiskMetric = !isEbsMetric && hasEc2DiskMetricName; if (isEbsMetric || !isEc2DiskMetric) { addToQueueItems(absoluteMetricQueueItems, new Supplier<InstanceUsageEvent>() { @Override public InstanceUsageEvent get() { return new InstanceUsageEvent(sensorData.getResourceUuid(), sensorData.getResourceName(), metricType.getMetricName(), dimensionType.getSequenceNum(), dimensionType.getDimensionName(), currentValue, currentTimeStamp); } }); if (isEbsMetric) { // special case to calculate VolumeConsumedReadWriteOps // As it is (VolumeThroughputPercentage / 100) * (VolumeReadOps + VolumeWriteOps), and we are hard coding // VolumeThroughputPercentage as 100%, we will just use VolumeReadOps + VolumeWriteOps // And just in case VolumeReadOps is called DiskReadOps we do both cases... addToQueueItems(absoluteMetricQueueItems, combineReadWriteDiskMetric("DiskReadOps", "DiskWriteOps", metricCacheMap, "DiskConsumedReadWriteOps", metricType, sensorData, dimensionType, value)); addToQueueItems(absoluteMetricQueueItems, combineReadWriteDiskMetric("VolumeReadOps", "VolumeWriteOps", metricCacheMap, "VolumeConsumedReadWriteOps", metricType, sensorData, dimensionType, value)); // Also need VolumeTotalReadWriteTime to compute VolumeIdleTime addToQueueItems(absoluteMetricQueueItems, combineReadWriteDiskMetric("VolumeTotalReadTime", "VolumeTotalWriteTime", metricCacheMap, "VolumeTotalReadWriteTime", metricType, sensorData, dimensionType, value)); } } else { // see if it is a volume metric String metricName = metricType.getMetricName().replace("Volume", "Disk"); ec2DiskMetricCache.addToMetric(sensorData.getResourceUuid(), sensorData.getResourceName(), metricName, currentValue, currentTimeStamp); } } } } } if (Iterables .tryFind(absoluteMetricQueueItems, withMetric("AWS/EC2", null, "InstanceId", sensorData.getResourceName())) .isPresent() && !Iterables.tryFind(absoluteMetricQueueItems, withMetric("AWS/EC2", Count.StatusCheckFailed.name(), "InstanceId", sensorData.getResourceName())) .isPresent()) { absoluteMetricQueueItems.addAll(buildInstanceStatusPut(sensorData.getResourceName())); } } Collection<Supplier<InstanceUsageEvent>> ec2DiskMetrics = ec2DiskMetricCache.getMetrics(); List<Supplier<InstanceUsageEvent>> ec2DiskMetricsSorted = Lists.newArrayList(ec2DiskMetrics); Collections.sort(ec2DiskMetricsSorted, Ordering.natural().onResultOf(new Function<Supplier<InstanceUsageEvent>, Long>() { @Override @Nullable public Long apply(@Nullable Supplier<InstanceUsageEvent> supplier) { return supplier.get().getValueTimestamp(); } })); for (Supplier<InstanceUsageEvent> ec2DiskMetric : ec2DiskMetricsSorted) { try { addToQueueItems(absoluteMetricQueueItems, ec2DiskMetric); } catch (Exception ex) { LOG.debug("Unable to add system metric " + ec2DiskMetric, ex); } } return absoluteMetricQueueItems; }
From source file:com.baidu.rigel.biplatform.ma.report.utils.QueryUtils.java
/** * trans cube// w w w . j a va2 s.c o m * @param cube * @return new Cube */ public static Cube transformCube(Cube cube) { MiniCube newCube = (MiniCube) DeepcopyUtils.deepCopy(cube); final Map<String, Measure> measures = Maps.newConcurrentMap(); cube.getMeasures().values().forEach(m -> { measures.put(m.getName(), m); }); newCube.setMeasures(measures); final Map<String, Dimension> dimensions = Maps.newLinkedHashMap(); cube.getDimensions().values().forEach(dim -> { MiniCubeDimension tmp = (MiniCubeDimension) DeepcopyUtils.deepCopy(dim); LinkedHashMap<String, Level> tmpLevel = Maps.newLinkedHashMap(); dim.getLevels().values().forEach(level -> { level.setDimension(dim); tmpLevel.put(level.getName(), level); }); tmp.setLevels(tmpLevel); dimensions.put(tmp.getName(), tmp); }); newCube.setDimensions(dimensions); return newCube; }
From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java
@Override public void prepareSuperstep() { rwLock.writeLock().lock();/* w w w .ja va 2 s .c o m*/ super.prepareSuperstep(); pendingCurrentMessages = pendingIncomingMessages; currentMessagesOnDisk = incomingMessagesOnDisk; pendingIncomingMessages = Maps.newConcurrentMap(); incomingMessagesOnDisk = Maps.newConcurrentMap(); rwLock.writeLock().unlock(); }
From source file:org.opendaylight.ovsdb.plugin.impl.ConfigurationServiceImpl.java
/** * inserts a Tree of Rows in multiple Tables that has parent-child relationships referenced through the OVSDB schema's refTable construct * * @param node OVSDB Node/*w w w . ja va2 s . com*/ * @param databaseName Database Name that represents the Schema supported by the node. * @param tableName Table on which the row is inserted * @param parentTable Name of the Parent Table to which this operation will result in attaching/mutating. * @param parentUuid UUID of a Row in parent table to which this operation will result in attaching/mutating. * @param parentColumn Name of the Column in the Parent Table to be mutated with the UUID that results from the insert operation. * @param row Row Tree with parent-child relationships via column of type refTable. * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception. * @return Returns the row tree with the UUID of every inserted Row populated in the _uuid column of every row in the tree */ @Override public Row<GenericTableSchema> insertTree(Node node, String databaseName, String tableName, String parentTable, UUID parentUuid, String parentColumn, Row<GenericTableSchema> row) throws OvsdbPluginException { Connection connection = connectionService.getConnection(node); OvsdbClient client = connection.getClient(); if (databaseName == null || tableName == null) { throw new OvsdbPluginException("databaseName, tableName and parentUuid are Mandatory Parameters"); } if (parentTable == null && parentUuid != null) { parentTable = this.getTableNameForRowUuid(node, databaseName, parentUuid); } if (parentColumn == null && parentTable != null) { DatabaseSchema dbSchema = client.getDatabaseSchema(databaseName); TableSchema<GenericTableSchema> parentTableSchema = dbSchema.table(parentTable, GenericTableSchema.class); parentColumn = this.getReferencingColumn(parentTableSchema, tableName); } LOGGER.debug( "insertTree Connection : {} Table : {} ParentTable : {} Parent Column: {} Parent UUID : {} Row : {}", client.getConnectionInfo(), tableName, parentTable, parentColumn, parentUuid, row); Map<UUID, Map.Entry<String, Row<GenericTableSchema>>> referencedRows = Maps.newConcurrentMap(); extractReferencedRows(node, databaseName, row, referencedRows, 0); DatabaseSchema dbSchema = client.getDatabaseSchema(OvsVswitchdSchemaConstants.DATABASE_NAME); TransactionBuilder transactionBuilder = client.transactBuilder(dbSchema); String namedUuid = "Transaction_" + tableName; this.processInsertTransaction(client, databaseName, tableName, parentTable, parentUuid, parentColumn, namedUuid, row, transactionBuilder); int referencedRowsInsertIndex = transactionBuilder.getOperations().size(); // Insert Referenced Rows if (referencedRows != null) { for (UUID refUuid : referencedRows.keySet()) { Map.Entry<String, Row<GenericTableSchema>> referencedRow = referencedRows.get(refUuid); TableSchema<GenericTableSchema> refTableSchema = dbSchema.table(referencedRow.getKey(), GenericTableSchema.class); transactionBuilder .add(op.insert(refTableSchema, referencedRow.getValue()).withId(refUuid.toString())); } } ListenableFuture<List<OperationResult>> results = transactionBuilder.execute(); List<OperationResult> operationResults; try { operationResults = results.get(); if (operationResults.isEmpty() || (transactionBuilder.getOperations().size() != operationResults.size())) { throw new OvsdbPluginException("Insert Operation Failed"); } for (OperationResult result : operationResults) { if (result.getError() != null) { throw new OvsdbPluginException( "Insert Operation Failed with Error : " + result.getError().toString()); } } return getNormalizedRow(dbSchema, tableName, row, referencedRows, operationResults, referencedRowsInsertIndex); } catch (InterruptedException | ExecutionException e) { throw new OvsdbPluginException("Exception : " + e.getLocalizedMessage()); } }
From source file:com.baidu.rigel.biplatform.ma.resource.QueryDataResource.java
private Map<String, Object> updateLocalContextAndReturn(ReportRuntimeModel runTimeModel, String areaId, Map<String, String[]> contextParams) { /**// w w w. ja v a 2 s .co m * ?? */ QueryContext localContext = runTimeModel.getLocalContextByAreaId(areaId); localContext.reset(); for (String key : contextParams.keySet()) { /** * runtimeModel? */ String[] value = contextParams.get(key); if (value != null && value.length > 0) { localContext.put(key, value[0]); } } /** * ?? */ final Map<String, Object> queryParams = Maps.newHashMap(); /** * TODO ??? */ Map<String, Object> localParams = localContext.getParams(); if ("true".equals(localParams.get("isOverride"))) { queryParams.putAll(localParams); runTimeModel.getContext().getParams().forEach((key, value) -> { if (!queryParams.containsKey(key)) { queryParams.put(key, value); } }); return queryParams; } /** * ?? */ // Iterator<String> it = localParams.keySet().iterator(); // while (it.hasNext()) { // String key = it.next(); // String value = localParams.get(key).toString(); // if (value.contains("start") && value.contains("end")) { // it.remove(); // } // } // for (String key : localParams.keySet()) { // String value = localParams.get(key).toString(); // if (value.contains("start") && value.contains("end")) { // localParams.remove(key); // } // } queryParams.putAll(localParams); if (runTimeModel.getContext() != null) { queryParams.putAll(runTimeModel.getContext().getParams()); } else { throw new RuntimeException("?"); } Map<String, Object> tmp = Maps.newConcurrentMap(); queryParams.forEach((k, v) -> { if (v != null && !StringUtils.isEmpty(v.toString())) { tmp.put(k, v); } }); return tmp; }
From source file:com.baidu.rigel.biplatform.ma.report.utils.QueryUtils.java
/** * decorate chart with extend area//from ww w. j av a 2 s .c om * @param chart * @param area * @param index */ public static void decorateChart(DIReportChart chart, ExtendArea area, Schema schema, int index) { if (area.getType() == ExtendAreaType.CHART) { assert area.getLogicModel() != null : "?"; // topN if (area.getLogicModel().getTopSetting() != null) { MeasureTopSetting topSetting = area.getLogicModel().getTopSetting(); chart.setRecordSize(topSetting.getRecordSize()); chart.setTopedMeasureId(topSetting.getMeasureId()); chart.setTopType(topSetting.getTopType().name()); chart.setAreaId(area.getId()); } FormatModel formatModel = area.getFormatModel(); if (formatModel != null && formatModel.getDataFormat() != null) { addDataFormatInfo(chart, formatModel.getDataFormat()); Map<String, String> colorFormat = formatModel.getColorFormat(); if (colorFormat != null && !colorFormat.isEmpty() && chart.getSeriesData() != null) { for (SeriesDataUnit data : chart.getSeriesData()) { if (data == null) { continue; } data.setColorDefine(colorFormat.get(data.getyAxisName())); } } Map<String, String> positions = formatModel.getPositions(); if (colorFormat != null && !positions.isEmpty() && chart.getSeriesData() != null) { for (SeriesDataUnit data : chart.getSeriesData()) { if (data == null) { continue; } data.setPosition(positions.get(data.getyAxisName())); } } } final Map<String, String> dimMap = Maps.newConcurrentMap(); String[] allDims = area.getLogicModel().getSelectionDims().values().stream().map(item -> { OlapElement tmp = getOlapElement(area, schema, item); if (tmp != null) { dimMap.put(tmp.getId(), tmp.getName()); return tmp.getCaption(); } else { return null; } }).filter(x -> x != null).toArray(String[]::new); chart.setDimMap(dimMap); chart.setAllDims(allDims); String[] allMeasures = area.getLogicModel().getSelectionMeasures().values().stream().map(item -> { OlapElement tmp = getOlapElement(area, schema, item); if (tmp != null) { chart.getMeasureMap().put(tmp.getId(), tmp.getCaption()); return tmp.getCaption(); } else { return null; } }).filter(x -> x != null).toArray(String[]::new); chart.setAllMeasures(allMeasures); final Item[] columns = area.getLogicModel().getColumns(); List<String> tmp = getOlapElementNames(columns, area.getCubeId(), schema); if (tmp.size() > 0) { chart.setDefaultMeasures(tmp.toArray(new String[0])); } for (int i = 0; i < columns.length; ++i) { chart.getMeasureMap().put(columns[i].getOlapElementId(), tmp.get(i)); } // List<String> defaultDims = getOlapElementNames( // area.getLogicModel().getRows(), area.getCubeId(), schema); if (index >= 0 && index < chart.getAllMeasures().length) { chart.setDefaultMeasures(new String[] { chart.getAllMeasures()[index] }); } // else { // if (defaultDims.size() > 0) { // chart.setDefaultDims(defaultDims.toArray(new String[0])); // } // } } }
From source file:com.google.devtools.build.lib.skyframe.ActionExecutionFunction.java
/** * Should be called once execution is over, and the intra-build cache of in-progress computations * should be discarded. If the cache is non-empty (due to an interrupted/failed build), failure to * call complete() can both cause a memory leak and incorrect results on the subsequent build. *///from ww w . ja va2 s. c o m @Override public void complete() { // Discard all remaining state (there should be none after a successful execution). stateMap = Maps.newConcurrentMap(); }
From source file:org.opendaylight.ovsdb.plugin.impl.ConfigurationServiceImpl.java
@Override public ConcurrentMap<UUID, Row<GenericTableSchema>> getRows(Node node, String databaseName, String tableName) throws OvsdbPluginException { ConcurrentMap<String, Row> ovsTable = ovsdbInventoryService.getTableCache(node, databaseName, tableName); if (ovsTable == null) { return null; }/*ww w. ja v a 2s. c o m*/ ConcurrentMap<UUID, Row<GenericTableSchema>> tableDB = Maps.newConcurrentMap(); for (String uuidStr : ovsTable.keySet()) { tableDB.put(new UUID(uuidStr), ovsTable.get(uuidStr)); } return tableDB; }
From source file:com.baidu.rigel.biplatform.ma.resource.QueryDataResource.java
/** * ??/*from w ww.ja v a2 s . c om*/ */ @RequestMapping(value = "/{reportId}/members/{areaId}", method = { RequestMethod.POST }) public ResponseResult getMemberWithParent(@PathVariable("reportId") String reportId, @PathVariable("areaId") String areaId, HttpServletRequest request) { long begin = System.currentTimeMillis(); logger.info("[INFO]--- ---begin init params with report id {}", reportId); String currentUniqueName = request.getParameter("uniqueName"); int level = MetaNameUtil.parseUnique2NameArray(currentUniqueName).length - 1; final ReportDesignModel model = getDesignModelFromRuntimeModel(reportId); final ReportRuntimeModel runtimeModel = reportModelCacheManager.getRuntimeModel(reportId); Map<String, Map<String, List<Map<String, String>>>> datas = Maps.newConcurrentMap(); Map<String, String> params = Maps.newHashMap(); runtimeModel.getContext().getParams().forEach((k, v) -> { params.put(k, v == null ? "" : v.toString()); }); ExtendArea area = model.getExtendById(areaId); if (area != null && isQueryComp(area.getType()) && !area.listAllItems().isEmpty()) { Item item = area.listAllItems().values().toArray(new Item[0])[0]; Cube cube = model.getSchema().getCubes().get(area.getCubeId()); Cube tmpCube = QueryUtils.transformCube(cube); String dimId = item.getOlapElementId(); Dimension dim = cube.getDimensions().get(dimId); if (dim != null) { List<Map<String, String>> values; try { values = Lists.newArrayList(); params.remove(dim.getId()); List<Member> members = reportModelQueryService.getMembers(tmpCube, currentUniqueName, params, securityKey); members.forEach(m -> { Map<String, String> tmp = Maps.newHashMap(); tmp.put("value", m.getUniqueName()); tmp.put("text", m.getCaption()); tmp.put("isLeaf", Boolean.toString(level < dim.getLevels().size())); values.add(tmp); }); Map<String, List<Map<String, String>>> datasource = Maps.newHashMap(); datasource.put("datasource", values); datas.put(areaId, datasource); } catch (Exception e) { logger.info(e.getMessage(), e); } // end catch } // end if dim != null } // end if area != null ResponseResult rs = new ResponseResult(); rs.setStatus(0); rs.setData(datas); rs.setStatusInfo("OK"); logger.info("[INFO]--- --- successfully query member, cost {} ms", (System.currentTimeMillis() - begin)); return rs; }
From source file:io.terminus.snz.user.service.CompanyServiceImpl.java
@Override public Response<Map<Long, String>> companyHasVcode(Integer pageNp, Integer size) { Response<Map<Long, String>> result = new Response<Map<Long, String>>(); PageInfo page = new PageInfo(pageNp, size); try {/*from w w w . jav a 2 s .c o m*/ List<Company> companyList = companyDao.findCompanyHasVCode(page.toMap()); Map<Long, String> vcodes = Maps.newConcurrentMap(); for (Company c : companyList) { vcodes.put(c.getId(), c.getSupplierCode()); } result.setResult(vcodes); } catch (Exception e) { log.error("`companyHasVcode` invoke fail. with page no:{}, page size:{}, e:{}", pageNp, size, e); result.setError("company.find.fail"); return result; } return result; }