List of usage examples for com.google.common.collect Sets newLinkedHashSetWithExpectedSize
public static <E> LinkedHashSet<E> newLinkedHashSetWithExpectedSize(int expectedSize)
From source file:com.google.gerrit.server.git.GarbageCollectionQueue.java
public synchronized Set<Project.NameKey> addAll(Collection<Project.NameKey> projects) { Set<Project.NameKey> added = Sets.newLinkedHashSetWithExpectedSize(projects.size()); for (Project.NameKey p : projects) { if (projectsScheduledForGc.add(p)) { added.add(p);//w w w. j a va 2 s . co m } } return added; }
From source file:com.google.gerrit.server.change.MergeabilityCheckQueue.java
synchronized Set<Change> addAll(Collection<Change> changes, boolean force) { Set<Change> r = Sets.newLinkedHashSetWithExpectedSize(changes.size()); for (Change c : changes) { if (force ? forcePending.add(c.getId()) : pending.add(c.getId())) { r.add(c);//from w w w . ja va 2 s . c om } } return r; }
From source file:org.javersion.object.types.SetType.java
@Override public Object instantiate(PropertyTree propertyTree, Object value, ReadContext context) throws Exception { prepareElements(propertyTree, context); return Sets.newLinkedHashSetWithExpectedSize(propertyTree.getChildren().size()); }
From source file:org.gradle.plugins.ide.eclipse.internal.LinkedResourcesCreator.java
public Set<Link> links(final Project project) { SourceSetContainer sourceSets = project.getConvention().getPlugin(JavaPluginConvention.class) .getSourceSets();/*from w w w . j a va 2s . c o m*/ List<SourceFolder> sourceFolders = new SourceFoldersCreator().getExternalSourceFolders(sourceSets, new Function<File, String>() { @Override public String apply(File dir) { return project.relativePath(dir); } }); Set<Link> links = Sets.newLinkedHashSetWithExpectedSize(sourceFolders.size()); for (SourceFolder sourceFolder : sourceFolders) { links.add(new Link(sourceFolder.getName(), "2", sourceFolder.getAbsolutePath(), null)); } return links; }
From source file:com.google.devtools.build.lib.skyframe.serialization.LinkedHashSetCodec.java
@Override public LinkedHashSet<E> deserialize(DeserializationContext context, CodedInputStream codedIn) throws SerializationException, IOException { int size = codedIn.readInt32(); LinkedHashSet<E> set = Sets.newLinkedHashSetWithExpectedSize(size); for (int i = 0; i < size; i++) { set.add(context.<E>deserialize(codedIn)); }// w ww . j a va2s.c o m return set; }
From source file:de.uniulm.omi.cloudiator.sword.remote.overthere.OverthereDecidingConnectionFactory.java
public OverthereDecidingConnectionFactory() { overthereSSHConnectionFactory = new OverthereSSHConnectionFactory(); overthereWinRMConnectionFactory = new OverthereWinRMConnectionFactory(); final LinkedHashSet<RemoteConnectionFactory> remoteConnectionFactories = Sets .newLinkedHashSetWithExpectedSize(2); remoteConnectionFactories.add(overthereSSHConnectionFactory); remoteConnectionFactories.add(overthereWinRMConnectionFactory); compositeConnectionFactory = new CompositeConnectionFactory(remoteConnectionFactories); }
From source file:org.terasology.utilities.collection.TypeSetMultimap.java
private <U extends T> Set<U> convertSet(Class<U> type, Set<T> values) { Set<U> results = Sets.newLinkedHashSetWithExpectedSize(values.size()); for (T value : values) { results.add(type.cast(value));//w ww . j ava 2s .c o m } return results; }
From source file:eu.numberfour.n4js.resource.OrderedResourceDescriptionsData.java
@SuppressWarnings("unchecked") @Override//from w w w . j a v a 2s. co m protected void registerDescription(final IResourceDescription description, final Map<QualifiedName, Object> target) { for (final IEObjectDescription object : description.getExportedObjects()) { final QualifiedName lowerCase = object.getName().toLowerCase(); final Object existing = target.put(lowerCase, description); if (existing != null && existing != description) { Set<IResourceDescription> set = null; if (existing instanceof IResourceDescription) { // The linked hash set is the difference comparing to the super class. set = Sets.newLinkedHashSetWithExpectedSize(2); set.add((IResourceDescription) existing); } else { set = (Set<IResourceDescription>) existing; } set.add(description); target.put(lowerCase, set); } } }
From source file:com.attribyte.essem.BasicAuth.java
/** * Create from properties of the form username:password=index0,index1,... * @param props The properties./*from w ww. j a v a 2 s .c om*/ */ public BasicAuth(final Properties props) { Map<HashCode, Set<String>> authMap = Maps.newHashMapWithExpectedSize(16); Set<String> indexSet = Sets.newLinkedHashSetWithExpectedSize(16); Splitter indexSplitter = Splitter.on(CharMatcher.anyOf(", \t")).trimResults().omitEmptyStrings(); Enumeration names = props.propertyNames(); while (names.hasMoreElements()) { String key = ((String) names.nextElement()).trim(); String up = key.replace('|', ':'); String expectedValue = "Basic " + EncodingUtil.encodeBase64(up.getBytes(Charsets.UTF_8)); HashCode expectedValueCode = hashFunction.hashString(expectedValue, Charsets.UTF_8); Set<String> allowSet = authMap.get(expectedValueCode); if (allowSet == null) { allowSet = Sets.newLinkedHashSetWithExpectedSize(16); authMap.put(expectedValueCode, allowSet); } String indexStr = props.getProperty(key).trim(); for (String index : indexSplitter.split(indexStr)) { if (!index.equals("*")) { indexSet.add(index); } allowSet.add(index); } } this.authorizedIndexes = ImmutableSet.copyOf(indexSet); ImmutableMap.Builder<HashCode, ImmutableSet<String>> builder = ImmutableMap.builder(); for (Map.Entry<HashCode, Set<String>> entry : authMap.entrySet()) { builder.put(entry.getKey(), ImmutableSet.copyOf(entry.getValue())); } this.authMap = builder.build(); }
From source file:org.apache.phoenix.compile.OrderByCompiler.java
/** * Gets a list of columns in the ORDER BY clause * @param context the query context for tracking various states * associated with the given select statement * @param statement TODO/* w w w. ja v a2 s . c o m*/ * @param groupBy the list of columns in the GROUP BY clause * @param limit the row limit or null if no limit * @return the compiled ORDER BY clause * @throws SQLException */ public static OrderBy compile(StatementContext context, SelectStatement statement, GroupBy groupBy, Integer limit, RowProjector rowProjector, TupleProjector tupleProjector, boolean isInRowKeyOrder) throws SQLException { List<OrderByNode> orderByNodes = statement.getOrderBy(); if (orderByNodes.isEmpty()) { return OrderBy.EMPTY_ORDER_BY; } ExpressionCompiler compiler = new ExpressionCompiler(context, groupBy); // accumulate columns in ORDER BY OrderPreservingTracker tracker = new OrderPreservingTracker(context, groupBy, Ordering.ORDERED, orderByNodes.size(), tupleProjector); LinkedHashSet<OrderByExpression> orderByExpressions = Sets .newLinkedHashSetWithExpectedSize(orderByNodes.size()); for (OrderByNode node : orderByNodes) { ParseNode parseNode = node.getNode(); Expression expression = null; if (parseNode instanceof LiteralParseNode && ((LiteralParseNode) parseNode).getType() == PInteger.INSTANCE) { Integer index = (Integer) ((LiteralParseNode) parseNode).getValue(); int size = rowProjector.getColumnProjectors().size(); if (index > size || index <= 0) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND).build() .buildException(); } expression = rowProjector.getColumnProjector(index - 1).getExpression(); } else { expression = node.getNode().accept(compiler); // Detect mix of aggregate and non aggregates (i.e. ORDER BY txns, SUM(txns) if (!expression.isStateless() && !compiler.isAggregate()) { if (statement.isAggregate() || statement.isDistinct()) { // Detect ORDER BY not in SELECT DISTINCT: SELECT DISTINCT count(*) FROM t ORDER BY x if (statement.isDistinct()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.ORDER_BY_NOT_IN_SELECT_DISTINCT) .setMessage(expression.toString()).build().buildException(); } ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString()); } } } if (!expression.isStateless()) { boolean isAscending = node.isAscending(); boolean isNullsLast = node.isNullsLast(); tracker.track(expression, isAscending ? SortOrder.ASC : SortOrder.DESC, isNullsLast); // If we have a schema where column A is DESC, reverse the sort order and nulls last // since this is the order they actually are in. if (expression.getSortOrder() == SortOrder.DESC) { isAscending = !isAscending; isNullsLast = !isNullsLast; } OrderByExpression orderByExpression = new OrderByExpression(expression, isNullsLast, isAscending); orderByExpressions.add(orderByExpression); } compiler.reset(); } if (orderByExpressions.isEmpty()) { return OrderBy.EMPTY_ORDER_BY; } // If we're ordering by the order returned by the scan, we don't need an order by if (isInRowKeyOrder && tracker.isOrderPreserving()) { if (tracker.isReverse()) { // Don't use reverse scan if we're using a skip scan, as our skip scan doesn't support this yet. // REV_ROW_KEY_ORDER_BY scan would not take effect for a projected table, so don't return it for such table types. if (context.getConnection().getQueryServices().getProps().getBoolean( QueryServices.USE_REVERSE_SCAN_ATTRIB, QueryServicesOptions.DEFAULT_USE_REVERSE_SCAN) && !context.getScanRanges().useSkipScanFilter() && context.getCurrentTable().getTable().getType() != PTableType.PROJECTED && context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY) { return OrderBy.REV_ROW_KEY_ORDER_BY; } } else { return OrderBy.FWD_ROW_KEY_ORDER_BY; } } return new OrderBy(Lists.newArrayList(orderByExpressions.iterator())); }