List of usage examples for com.google.common.collect Maps newHashMapWithExpectedSize
public static <K, V> HashMap<K, V> newHashMapWithExpectedSize(int expectedSize)
From source file:com.opengamma.engine.fudgemsg.CompiledViewCalculationConfigurationFudgeBuilder.java
@SuppressWarnings("unchecked") protected Map<ValueSpecification, Set<ValueRequirement>> decodeTerminalOutputSpecifications( final FudgeDeserializer deserializer, final FudgeMsg msg) { final FudgeMsg submsg = msg.getMessage(TERMINAL_OUTPUT_SPECIFICATIONS_FIELD); if (submsg == null) { return Collections.emptyMap(); }//from ww w. j av a 2 s .c om final Map<ValueSpecification, Set<ValueRequirement>> result = Maps .newHashMapWithExpectedSize(submsg.getNumFields() / 2); LinkedList<Object> overflow = null; ValueSpecification key = null; Set<ValueRequirement> value = null; for (final FudgeField field : submsg) { if (MAP_KEY.equals(field.getOrdinal())) { final ValueSpecification fieldValue = deserializer.fieldValueToObject(ValueSpecification.class, field); if (key == null) { if (value == null) { key = fieldValue; } else { result.put(fieldValue, value); if (overflow != null) { value = overflow.isEmpty() ? null : (Set<ValueRequirement>) overflow.removeFirst(); } else { value = null; } } } else { if (overflow == null) { overflow = new LinkedList<Object>(); } overflow.add(fieldValue); } } else if (MAP_VALUE.equals(field.getOrdinal())) { final FudgeMsg submsg2 = (FudgeMsg) field.getValue(); final Set<ValueRequirement> fieldValue = Sets.newHashSetWithExpectedSize(submsg2.getNumFields()); for (final FudgeField field2 : submsg2) { fieldValue.add(deserializer.fieldValueToObject(ValueRequirement.class, field2)); } if (value == null) { if (key == null) { value = fieldValue; } else { result.put(key, fieldValue); if (overflow != null) { key = overflow.isEmpty() ? null : (ValueSpecification) overflow.removeFirst(); } else { key = null; } } } else { if (overflow == null) { overflow = new LinkedList<Object>(); } overflow.add(fieldValue); } } } return result; }
From source file:com.opengamma.web.analytics.blotter.BeanBuildingVisitor.java
private Map<?, ?> buildMap(MetaProperty<?> property, BeanTraverser traverser) { Map<?, ?> sourceData = _data.getMapValues(property.name()); Class<? extends Bean> beanType = property.metaBean().beanType(); Class<?> keyType = JodaBeanUtils.mapKeyType(property, beanType); Class<?> valueType = JodaBeanUtils.mapValueType(property, beanType); Map<Object, Object> map = Maps.newHashMapWithExpectedSize(sourceData.size()); for (Map.Entry<?, ?> entry : sourceData.entrySet()) { Object key = convert(entry.getKey(), property, keyType, traverser); Object value = convert(entry.getValue(), property, valueType, traverser); map.put(key, value);// ww w.ja v a 2 s .c om } return map; }
From source file:com.android.tools.idea.templates.ParameterValueResolver.java
/** * Returns a map of parameters to their resolved values. *//*from w w w. j a v a 2s.c om*/ @NotNull public Map<Parameter, Object> resolve() throws CircularParameterDependencyException { Map<String, Object> staticValues = getStaticParameterValues(myUserValues, myAdditionalValues); Map<String, Object> computedValues = computeParameterValues(staticValues); HashMap<Parameter, Object> allValues = Maps .newHashMapWithExpectedSize(computedValues.size() + staticValues.size()); for (Parameter parameter : Iterables.concat(myStaticParameters, myComputedParameters)) { allValues.put(parameter, computedValues.get(parameter.id)); } return allValues; }
From source file:org.apache.kylin.storage.hbase.HBaseKeyRange.java
private void init(Collection<ColumnValueRange> andDimensionRanges) { int size = andDimensionRanges.size(); Map<TblColRef, String> startValues = Maps.newHashMapWithExpectedSize(size); Map<TblColRef, String> stopValues = Maps.newHashMapWithExpectedSize(size); Map<TblColRef, Set<String>> fuzzyValues = Maps.newHashMapWithExpectedSize(size); for (ColumnValueRange dimRange : andDimensionRanges) { TblColRef column = dimRange.getColumn(); startValues.put(column, dimRange.getBeginValue()); stopValues.put(column, dimRange.getEndValue()); fuzzyValues.put(column, dimRange.getEqualValues()); TblColRef partitionDateColumnRef = cubeSeg.getCubeDesc().getModel().getPartitionDesc() .getPartitionDateColumnRef(); if (column.equals(partitionDateColumnRef)) { initPartitionRange(dimRange); }/*from w w w. j a v a 2 s .c o m*/ } AbstractRowKeyEncoder encoder = AbstractRowKeyEncoder.createInstance(cubeSeg, cuboid); encoder.setBlankByte(RowConstants.ROWKEY_LOWER_BYTE); this.startKey = encoder.encode(startValues); encoder.setBlankByte(RowConstants.ROWKEY_UPPER_BYTE); // In order to make stopRow inclusive add a trailing 0 byte. #See // Scan.setStopRow(byte [] stopRow) this.stopKey = Bytes.add(encoder.encode(stopValues), ZERO_TAIL_BYTES); // restore encoder defaults for later reuse (note // AbstractRowKeyEncoder.createInstance() caches instances) encoder.setBlankByte(AbstractRowKeyEncoder.DEFAULT_BLANK_BYTE); // always fuzzy match cuboid ID to lock on the selected cuboid this.fuzzyKeys = buildFuzzyKeys(fuzzyValues); }
From source file:org.apache.phoenix.compile.DeleteCompiler.java
private static MutationState deleteRows(StatementContext childContext, TableRef targetTableRef, TableRef indexTableRef, ResultIterator iterator, RowProjector projector, TableRef sourceTableRef) throws SQLException { PTable table = targetTableRef.getTable(); PhoenixStatement statement = childContext.getStatement(); PhoenixConnection connection = statement.getConnection(); PName tenantId = connection.getTenantId(); byte[] tenantIdBytes = null; if (tenantId != null) { tenantIdBytes = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId);//from www.ja v a 2 s.c o m } final boolean isAutoCommit = connection.getAutoCommit(); ConnectionQueryServices services = connection.getQueryServices(); final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize); Map<ImmutableBytesPtr, RowMutationState> mutations = Maps.newHashMapWithExpectedSize(batchSize); Map<ImmutableBytesPtr, RowMutationState> indexMutations = null; // If indexTableRef is set, we're deleting the rows from both the index table and // the data table through a single query to save executing an additional one. if (indexTableRef != null) { indexMutations = Maps.newHashMapWithExpectedSize(batchSize); } List<PColumn> pkColumns = table.getPKColumns(); boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null; boolean isSharedViewIndex = table.getViewIndexId() != null; int offset = (table.getBucketNum() == null ? 0 : 1); byte[][] values = new byte[pkColumns.size()][]; if (isMultiTenant) { values[offset++] = tenantIdBytes; } if (isSharedViewIndex) { values[offset++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId()); } try (PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) { int rowCount = 0; while (rs.next()) { ImmutableBytesPtr ptr = new ImmutableBytesPtr(); // allocate new as this is a key in a Map // Use tuple directly, as projector would not have all the PK columns from // our index table inside of our projection. Since the tables are equal, // there's no transation required. if (sourceTableRef.equals(targetTableRef)) { rs.getCurrentRow().getKey(ptr); } else { for (int i = offset; i < values.length; i++) { byte[] byteValue = rs.getBytes(i + 1 - offset); // The ResultSet.getBytes() call will have inverted it - we need to invert it back. // TODO: consider going under the hood and just getting the bytes if (pkColumns.get(i).getSortOrder() == SortOrder.DESC) { byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length); byteValue = SortOrder.invert(byteValue, 0, tempByteValue, 0, byteValue.length); } values[i] = byteValue; } table.newKey(ptr, values); } // When issuing deletes, we do not care about the row time ranges. Also, if the table had a row timestamp column, then the // row key will already have its value. mutations.put(ptr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO)); if (indexTableRef != null) { ImmutableBytesPtr indexPtr = new ImmutableBytesPtr(); // allocate new as this is a key in a Map rs.getCurrentRow().getKey(indexPtr); indexMutations.put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO)); } if (mutations.size() > maxSize) { throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize); } rowCount++; // Commit a batch if auto commit is true and we're at our batch size if (isAutoCommit && rowCount % batchSize == 0) { MutationState state = new MutationState(targetTableRef, mutations, 0, maxSize, connection); connection.getMutationState().join(state); if (indexTableRef != null) { MutationState indexState = new MutationState(indexTableRef, indexMutations, 0, maxSize, connection); connection.getMutationState().join(indexState); } connection.commit(); mutations.clear(); if (indexMutations != null) { indexMutations.clear(); } } } // If auto commit is true, this last batch will be committed upon return int nCommittedRows = rowCount / batchSize * batchSize; MutationState state = new MutationState(targetTableRef, mutations, nCommittedRows, maxSize, connection); if (indexTableRef != null) { // To prevent the counting of these index rows, we have a negative for remainingRows. MutationState indexState = new MutationState(indexTableRef, indexMutations, 0, maxSize, connection); state.join(indexState); } return state; } }
From source file:com.opengamma.core.AbstractEHCachingSource.java
@Override public Map<UniqueId, V> get(final Collection<UniqueId> uids) { final Map<UniqueId, V> results = Maps.newHashMapWithExpectedSize(uids.size()); final Collection<UniqueId> misses = new ArrayList<UniqueId>(uids.size()); for (UniqueId uid : uids) { if (uid.isLatest()) { misses.add(uid);//w w w . java 2s. co m } else { V result = _frontCacheByUID.get(uid); if (result != null) { results.put(uid, result); } else { final Element e = _uidCache.get(uid); if (e != null) { @SuppressWarnings("unchecked") V objectValue = (V) e.getObjectValue(); result = objectValue; s_logger.debug("retrieved object: {} from uid-cache", result); V existing = _frontCacheByUID.putIfAbsent(uid, result); if (existing != null) { result = existing; } results.put(uid, result); } else { misses.add(uid); } } } } if (!misses.isEmpty()) { final Map<UniqueId, V> underlying = getUnderlying().get(misses); for (UniqueId uid : misses) { V result = underlying.get(uid); if (result != null) { result = cacheItem(result); results.put(uid, result); } } } return results; }
From source file:gobblin.source.extractor.extract.kafka.KafkaExtractor.java
public KafkaExtractor(WorkUnitState state) { super(state); this.workUnitState = state; this.topicName = KafkaUtils.getTopicName(state); this.partitions = KafkaUtils.getPartitions(state); this.lowWatermark = state.getWorkunit().getLowWatermark(MultiLongWatermark.class); this.highWatermark = state.getWorkunit().getExpectedHighWatermark(MultiLongWatermark.class); this.nextWatermark = new MultiLongWatermark(this.lowWatermark); this.kafkaConsumerClientResolver = new ClassAliasResolver<>(GobblinKafkaConsumerClientFactory.class); try {//from www . j ava 2 s . co m this.kafkaConsumerClient = this.closer.register(this.kafkaConsumerClientResolver .resolveClass(state.getProp(KafkaSource.GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS, KafkaSource.DEFAULT_GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS)) .newInstance().create(ConfigUtils.propertiesToConfig(state.getProperties()))); } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) { throw new RuntimeException(e); } this.stopwatch = Stopwatch.createUnstarted(); this.decodingErrorCount = Maps.newHashMap(); this.avgMillisPerRecord = Maps.newHashMapWithExpectedSize(this.partitions.size()); this.avgRecordSizes = Maps.newHashMapWithExpectedSize(this.partitions.size()); this.errorPartitions = Sets.newHashSet(); // The actual high watermark starts with the low watermark this.workUnitState.setActualHighWatermark(this.lowWatermark); }
From source file:org.graylog2.rest.resources.alarmcallbacks.AlarmCallbacksResource.java
@GET @Path("/types") @Timed//ww w. j av a 2 s .co m @ApiOperation(value = "Get a list of all alarm callbacks types") public AvailableAlarmCallbacksResponse available() { final Map<String, AvailableAlarmCallbackSummaryResponse> types = Maps .newHashMapWithExpectedSize(availableAlarmCallbacks.size()); for (AlarmCallback availableAlarmCallback : availableAlarmCallbacks) { final AvailableAlarmCallbackSummaryResponse type = new AvailableAlarmCallbackSummaryResponse(); type.name = availableAlarmCallback.getName(); type.requested_configuration = getConfigurationRequest(availableAlarmCallback).asList(); types.put(availableAlarmCallback.getClass().getCanonicalName(), type); } final AvailableAlarmCallbacksResponse response = new AvailableAlarmCallbacksResponse(); response.types = types; return response; }
From source file:org.graylog2.inputs.InputImpl.java
@Override public Map<String, String> getStaticFields() { if (fields.get(EMBEDDED_STATIC_FIELDS) == null) { return Collections.emptyMap(); }/*from w ww.ja v a 2 s . c om*/ final BasicDBList list = (BasicDBList) fields.get(EMBEDDED_STATIC_FIELDS); final Map<String, String> staticFields = Maps.newHashMapWithExpectedSize(list.size()); for (final Object element : list) { try { final DBObject field = (DBObject) element; staticFields.put((String) field.get(FIELD_STATIC_FIELD_KEY), (String) field.get(FIELD_STATIC_FIELD_VALUE)); } catch (Exception e) { LOG.error("Cannot build static field from persisted data. Skipping.", e); } } return staticFields; }
From source file:com.ourlife.dev.common.web.BaseController.java
/** * json ??//ww w. j a v a 2 s. c om * @param code * @param msg * @return */ protected Map withMap(Integer code, String msg) { HashMap map = Maps.newHashMapWithExpectedSize(2); map.put("code", code); map.put("msg", msg); return map; }