List of usage examples for com.google.common.collect Sets newHashSetWithExpectedSize
public static <E> HashSet<E> newHashSetWithExpectedSize(int expectedSize)
From source file:com.opengamma.financial.analytics.model.equity.option.EquityVanillaBarrierOptionVegaMatrixFunction.java
@Override public Set<ValueSpecification> getResults(final FunctionCompilationContext context, final ComputationTarget target, final Map<ValueSpecification, ValueRequirement> inputs) { final Set<ValueSpecification> results = super.getResults(context, target, inputs); final SecuritySource securitySource = OpenGammaCompilationContext.getSecuritySource(context); final FinancialSecurity security = (FinancialSecurity) target.getSecurity(); final ExternalId underlyingId = FinancialSecurityUtils.getUnderlyingId(security); //final String bbgTicker = getBloombergTicker(securitySource, underlyingId); final Set<ValueSpecification> resultsWithExtraProperties = Sets.newHashSetWithExpectedSize(results.size()); for (final ValueSpecification spec : results) { final String name = spec.getValueName(); final ComputationTargetSpecification targetSpec = spec.getTargetSpecification(); final ValueProperties properties = spec.getProperties().copy() .with(InstrumentTypeProperties.PROPERTY_SURFACE_INSTRUMENT_TYPE, InstrumentTypeProperties.EQUITY_OPTION) // .with(ValuePropertyNames.UNDERLYING_TICKER, bbgTicker) .get();//from w ww . j a v a 2 s . c o m resultsWithExtraProperties.add(new ValueSpecification(name, targetSpec, properties)); } return results; }
From source file:com.android.ide.eclipse.ddms.systrace.SystraceOptionsDialogV2.java
private static Set<String> getEnabledTags(Table table, List<SystraceTag> tags) { Set<String> enabledTags = Sets.newHashSetWithExpectedSize(tags.size()); for (int i = 0; i < table.getItemCount(); i++) { TableItem it = table.getItem(i); if (it.getChecked()) { enabledTags.add(tags.get(i).tag); }//from w w w .j av a 2 s . co m } return enabledTags; }
From source file:org.apache.giraph.worker.WorkerAggregatorHandler.java
/** * Get set of all worker task ids except the current one * * @return Set of all other worker task ids *//* w ww. ja v a 2 s . co m*/ public Set<Integer> getOtherWorkerIdsSet() { Set<Integer> otherWorkers = Sets.newHashSetWithExpectedSize(serviceWorker.getWorkerInfoList().size()); for (WorkerInfo workerInfo : serviceWorker.getWorkerInfoList()) { if (workerInfo.getTaskId() != serviceWorker.getWorkerInfo().getTaskId()) { otherWorkers.add(workerInfo.getTaskId()); } } return otherWorkers; }
From source file:org.apache.phoenix.index.PhoenixTransactionalIndexer.java
private Collection<Pair<Mutation, byte[]>> getIndexUpdates(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData, Iterator<Mutation> mutationIterator, byte[] txRollbackAttribute) throws IOException { Transaction tx = indexMetaData.getTransaction(); if (tx == null) { throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString()); }//from ww w. ja v a 2 s .c o m boolean isRollback = txRollbackAttribute != null; boolean isImmutable = indexMetaData.isImmutableRows(); ResultScanner currentScanner = null; TransactionAwareHTable txTable = null; // Collect up all mutations in batch Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>(); Map<ImmutableBytesPtr, MultiMutation> findPriorValueMutations; if (isImmutable && !isRollback) { findPriorValueMutations = new HashMap<ImmutableBytesPtr, MultiMutation>(); } else { findPriorValueMutations = mutations; } while (mutationIterator.hasNext()) { Mutation m = mutationIterator.next(); // add the mutation to the batch set ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow()); if (mutations != findPriorValueMutations && isDeleteMutation(m)) { addMutation(findPriorValueMutations, row, m); } addMutation(mutations, row, m); } // Collect the set of mutable ColumnReferences so that we can first // run a scan to get the current state. We'll need this to delete // the existing index rows. List<IndexMaintainer> indexMaintainers = indexMetaData.getIndexMaintainers(); int estimatedSize = indexMaintainers.size() * 10; Set<ColumnReference> mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize); for (IndexMaintainer indexMaintainer : indexMaintainers) { // For transactional tables, we use an index maintainer // to aid in rollback if there's a KeyValue column in the index. The alternative would be // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the // client side. Set<ColumnReference> allColumns = indexMaintainer.getAllColumns(); mutableColumns.addAll(allColumns); } Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>( mutations.size() * 2 * indexMaintainers.size()); try { // Track if we have row keys with Delete mutations (or Puts that are // Tephra's Delete marker). If there are none, we don't need to do the scan for // prior versions, if there are, we do. Since rollbacks always have delete mutations, // this logic will work there too. if (!findPriorValueMutations.isEmpty()) { List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size()); for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) { keys.add(PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary())); } Scan scan = new Scan(); // Project all mutable columns for (ColumnReference ref : mutableColumns) { scan.addColumn(ref.getFamily(), ref.getQualifier()); } /* * Indexes inherit the storage scheme of the data table which means all the indexes have the same * storage scheme and empty key value qualifier. Note that this assumption would be broken if we start * supporting new indexes over existing data tables to have a different storage scheme than the data * table. */ byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier(); // Project empty key value column scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier); ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, KeyRange.EVERYTHING_RANGE, null, true, -1); scanRanges.initializeScan(scan); TableName tableName = env.getRegion().getRegionInfo().getTable(); HTableInterface htable = env.getTable(tableName); txTable = new TransactionAwareHTable(htable); txTable.startTx(tx); // For rollback, we need to see all versions, including // the last committed version as there may be multiple // checkpointed versions. SkipScanFilter filter = scanRanges.getSkipScanFilter(); if (isRollback) { filter = new SkipScanFilter(filter, true); tx.setVisibility(VisibilityLevel.SNAPSHOT_ALL); } scan.setFilter(filter); currentScanner = txTable.getScanner(scan); } if (isRollback) { processRollback(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations); } else { processMutation(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations, findPriorValueMutations); } } finally { if (txTable != null) txTable.close(); } return indexUpdates; }
From source file:org.gradle.internal.component.model.MultipleCandidateMatcher.java
private Set<Object> getCandidateValues(int a) { // It's often the case that all the candidate values are the same. In this case, we avoid // the creation of a set, and just iterate until we find a different value. Then, only in // this case, we lazily initialize a set and collect all the candidate values. Set<Object> candidateValues = null; Object compatibleValue = null; boolean first = true; for (int c = compatible.nextSetBit(0); c >= 0; c = compatible.nextSetBit(c + 1)) { Object candidateValue = getCandidateValue(c, a); if (candidateValue == null) { continue; }/* w ww.ja va 2 s .c o m*/ if (first) { // first match, just record the value. We can't use "null" as the candidate value may be null compatibleValue = candidateValue; first = false; } else if (compatibleValue != candidateValue || candidateValues != null) { // we see a different value, or the set already exists, in which case we initialize // the set if it wasn't done already, and collect all values. if (candidateValues == null) { candidateValues = Sets.newHashSetWithExpectedSize(compatible.cardinality()); candidateValues.add(compatibleValue); } candidateValues.add(candidateValue); } } if (candidateValues == null) { if (compatibleValue == null) { return Collections.emptySet(); } return Collections.singleton(compatibleValue); } return candidateValues; }
From source file:com.android.tools.lint.checks.MissingClassDetector.java
@Override public void visitElement(@NonNull XmlContext context, @NonNull Element element) { String pkg = null;/*from w w w . jav a 2 s .c o m*/ Node classNameNode; String className; String tag = element.getTagName(); ResourceFolderType folderType = context.getResourceFolderType(); if (folderType == VALUES) { if (!tag.equals(TAG_STRING)) { return; } Attr attr = element.getAttributeNode(ATTR_NAME); if (attr == null) { return; } className = attr.getValue(); classNameNode = attr; } else if (folderType == LAYOUT) { if (tag.indexOf('.') > 0) { className = tag; classNameNode = element; } else if (tag.equals(VIEW_FRAGMENT) || tag.equals(VIEW_TAG)) { Attr attr = element.getAttributeNodeNS(ANDROID_URI, ATTR_NAME); if (attr == null) { attr = element.getAttributeNode(ATTR_CLASS); } if (attr == null) { return; } className = attr.getValue(); classNameNode = attr; } else { return; } } else if (folderType == XML) { if (!tag.equals(TAG_HEADER)) { return; } Attr attr = element.getAttributeNodeNS(ANDROID_URI, ATTR_FRAGMENT); if (attr == null) { return; } className = attr.getValue(); classNameNode = attr; } else { // Manifest file if (TAG_APPLICATION.equals(tag) || TAG_ACTIVITY.equals(tag) || TAG_SERVICE.equals(tag) || TAG_RECEIVER.equals(tag) || TAG_PROVIDER.equals(tag)) { Attr attr = element.getAttributeNodeNS(ANDROID_URI, ATTR_NAME); if (attr == null) { return; } className = attr.getValue(); classNameNode = attr; pkg = context.getMainProject().getPackage(); } else { return; } } if (className.isEmpty()) { return; } String fqcn; int dotIndex = className.indexOf('.'); if (dotIndex <= 0) { if (pkg == null) { return; // value file } if (dotIndex == 0) { fqcn = pkg + className; } else { // According to the <activity> manifest element documentation, this is not // valid ( http://developer.android.com/guide/topics/manifest/activity-element.html ) // but it appears in manifest files and appears to be supported by the runtime // so handle this in code as well: fqcn = pkg + '.' + className; } } else { // else: the class name is already a fully qualified class name fqcn = className; // Only look for fully qualified tracker names in analytics files if (folderType == VALUES && !SdkUtils.endsWith(context.file.getPath(), "analytics.xml")) { //$NON-NLS-1$ return; } } String signature = ClassContext.getInternalName(fqcn); if (signature.isEmpty() || signature.startsWith(ANDROID_PKG_PREFIX)) { return; } if (!context.getProject().getReportIssues()) { // If this is a library project not being analyzed, ignore it return; } Handle handle = null; if (!context.getDriver().isSuppressed(context, MISSING, element)) { if (mReferencedClasses == null) { mReferencedClasses = Maps.newHashMapWithExpectedSize(16); mCustomViews = Sets.newHashSetWithExpectedSize(8); } handle = context.createLocationHandle(element); mReferencedClasses.put(signature, handle); if (folderType == LAYOUT && !tag.equals(VIEW_FRAGMENT)) { mCustomViews.add(ClassContext.getInternalName(className)); } } if (signature.indexOf('$') != -1) { checkInnerClass(context, element, pkg, classNameNode, className); // The internal name contains a $ which means it's an inner class. // The conversion from fqcn to internal name is a bit ambiguous: // "a.b.C.D" usually means "inner class D in class C in package a.b". // However, it can (see issue 31592) also mean class D in package "a.b.C". // To make sure we don't falsely complain that foo/Bar$Baz doesn't exist, // in case the user has actually created a package named foo/Bar and a proper // class named Baz, we register *both* into the reference map. // When generating errors we'll look for these an rip them back out if // it looks like one of the two variations have been seen. if (handle != null) { // Assume that each successive $ is really a capitalized package name // instead. In other words, for A$B$C$D (assumed to be class A with // inner classes A.B, A.B.C and A.B.C.D) generate the following possible // referenced classes A/B$C$D (class B in package A with inner classes C and C.D), // A/B/C$D and A/B/C/D while (true) { int index = signature.indexOf('$'); if (index == -1) { break; } signature = signature.substring(0, index) + '/' + signature.substring(index + 1); mReferencedClasses.put(signature, handle); if (folderType == LAYOUT && !tag.equals(VIEW_FRAGMENT)) { mCustomViews.add(signature); } } } } }
From source file:com.samsung.px.pig.storage.DynamoDBStorage.java
/** HELPERS * @throws IOException **//*from w w w . j ava 2 s .c o m*/ private void checkPigSchemaForDynamo(ResourceSchema schema) throws IOException { // extract field names Set<String> fieldNames = Sets.newHashSetWithExpectedSize(schema.getFields().length); for (ResourceFieldSchema field : schema.getFields()) { String fieldName = field.getName(); if (fieldNames.contains(fieldName)) { throw new IOException( "Schema cannot contain duplicated field name. Found duplicated: " + fieldName); } if (field.getType() == DataType.MAP || // field.getType() == DataType.TUPLE || field.getType() == DataType.BAG) { throw new IOException( "DynamoDBStorage can not store map, or bag types. Found one in field name: " + fieldName); } fieldNames.add(fieldName); } // ensure that Dynamo table primary keys are found in field names DescribeTableResult describe = describeDynamoTable(); KeySchema dynamoKeySchema = describe.getTable().getKeySchema(); if (dynamoKeySchema.getHashKeyElement() != null) { String expectedFieldName = dynamoKeySchema.getHashKeyElement().getAttributeName(); if (!fieldNames.contains(expectedFieldName)) { throw new IOException("Dynamo table " + this.tableName + " hash primary key [" + expectedFieldName + "] not found in " + " pig schema fields: " + fieldNames); } } if (dynamoKeySchema.getRangeKeyElement() != null) { String expectedFieldName = dynamoKeySchema.getRangeKeyElement().getAttributeName(); if (!fieldNames.contains(expectedFieldName)) { throw new IOException("Dynamo table " + this.tableName + " range secondary key [" + expectedFieldName + "] not found in " + " pig schema fields: " + fieldNames); } } }
From source file:com.google.gerrit.server.git.GroupCollector.java
private Set<String> resolveGroups(ObjectId forCommit, Collection<String> candidates) throws OrmException { Set<String> actual = Sets.newTreeSet(); Set<String> done = Sets.newHashSetWithExpectedSize(candidates.size()); Set<String> seen = Sets.newHashSetWithExpectedSize(candidates.size()); Deque<String> todo = new ArrayDeque<>(candidates); // BFS through all aliases to find groups that are not aliased to anything // else.//from w w w.j a va 2 s . co m while (!todo.isEmpty()) { String g = todo.removeFirst(); if (!seen.add(g)) { continue; } Set<String> aliases = groupAliases.get(g); if (aliases.isEmpty()) { if (!done.contains(g)) { Iterables.addAll(actual, resolveGroup(forCommit, g)); done.add(g); } } else { todo.addAll(aliases); } } return actual; }
From source file:com.google.walkaround.util.server.appengine.MemcacheTable.java
/** * @return the set of keys for which new entries were created (some may not have been created * because of the policy).//w w w.j av a2s .c om */ public Set<K> putAll(Map<K, V> mappings, @Nullable Expiration expires, SetPolicy policy) { Map<TaggedKey<K>, V> rawMappings = Maps.newHashMapWithExpectedSize(mappings.size()); for (Map.Entry<K, V> entry : mappings.entrySet()) { rawMappings.put(tagKey(entry.getKey()), entry.getValue()); } Set<TaggedKey<K>> rawResult = service.putAll(rawMappings, expires, policy); Set<K> result = Sets.newHashSetWithExpectedSize(rawResult.size()); for (TaggedKey<K> key : rawResult) { result.add(key.getKey()); } return result; }
From source file:org.apache.impala.analysis.InlineViewRef.java
/** * Create and register a non-materialized tuple descriptor for this inline view. * This method is called from the analyzer when registering this inline view. * Create a non-materialized tuple descriptor for this inline view. *///from www . jav a 2 s .c o m @Override public TupleDescriptor createTupleDescriptor(Analyzer analyzer) throws AnalysisException { int numColLabels = getColLabels().size(); Preconditions.checkState(numColLabels > 0); HashSet<String> uniqueColAliases = Sets.newHashSetWithExpectedSize(numColLabels); ArrayList<StructField> fields = Lists.newArrayListWithCapacity(numColLabels); for (int i = 0; i < numColLabels; ++i) { // inline view select statement has been analyzed. Col label should be filled. Expr selectItemExpr = queryStmt_.getResultExprs().get(i); String colAlias = getColLabels().get(i).toLowerCase(); // inline view col cannot have duplicate name if (!uniqueColAliases.add(colAlias)) { throw new AnalysisException("duplicated inline view column alias: '" + colAlias + "'" + " in inline view " + "'" + getUniqueAlias() + "'"); } fields.add(new StructField(colAlias, selectItemExpr.getType(), null)); } // Create the non-materialized tuple and set its type. TupleDescriptor result = analyzer.getDescTbl() .createTupleDescriptor(getClass().getSimpleName() + " " + getUniqueAlias()); result.setIsMaterialized(false); result.setType(new StructType(fields)); return result; }