List of usage examples for com.google.common.collect Sets newLinkedHashSetWithExpectedSize
public static <E> LinkedHashSet<E> newLinkedHashSetWithExpectedSize(int expectedSize)
From source file:com.textocat.textokit.postagger.opennlp.FeatureExtractorsBasedContextGenerator.java
@Override public String[] getContext(int index, Token[] sequence, String[] priorDecisions, Object[] additionalContext) { if (additionalContext == null || additionalContext.length < 1) { throw sentenceExpected(); }//from w ww .j av a2 s . c o m if (!(additionalContext[0] instanceof Annotation)) { throw sentenceExpected(); } Annotation sent = (Annotation) additionalContext[0]; // TODO cache features that does not dependent on prev tags Token curToken = sequence[index]; List<Feature> features = Lists.newLinkedList(); try { JCas jCas = curToken.getCAS().getJCas(); for (FeatureExtractor1 fe : featureExtractors) { if (fe instanceof CleartkExtractor) { features.addAll(((CleartkExtractor) fe).extractBetween(jCas, curToken, sent)); } else { features.addAll(fe.extract(jCas, curToken)); } } } catch (Exception e) { throw new RuntimeException(e); } // encode Set<String> contexts = Sets.newLinkedHashSetWithExpectedSize(features.size() + prevTagsInHistory); // TODO move to utils for (Feature f : features) { try { contexts.addAll(featureEncoders.encode(f)); } catch (CleartkEncoderException e) { throw new RuntimeException(e); } } ContextGeneratorUtils.addPreviousTags(index, priorDecisions, prevTagsInHistory, contexts); if (dictContextGen != null) { String prevTag = ContextGeneratorUtils.getPreviousTag(index, priorDecisions); contexts.addAll(dictContextGen.extract(curToken, prevTag)); } return contexts.toArray(new String[contexts.size()]); }
From source file:defrac.intellij.projectView.DefracViewProjectNode.java
@NotNull private static Set<Module> getModules(@NotNull final Project project) { final List<VirtualFile> topLevelContentRoots = ProjectViewDirectoryHelper.getInstance(project) .getTopLevelRoots();/*from w ww.ja v a 2s . c o m*/ final Set<Module> modules = Sets.newLinkedHashSetWithExpectedSize(topLevelContentRoots.size()); for (VirtualFile root : topLevelContentRoots) { final Module module = ModuleUtil.findModuleForFile(root, project); if (module == null) { continue; } modules.add(module); } return modules; }
From source file:com.cinchapi.concourse.util.Convert.java
/** * Return a Set that represents the Thrift representation of each of the * {@code objects} in the input Set./* ww w .j a va 2 s . c om*/ * * @param objects a Set of java objects * @return a Set of TObjects */ public static Set<TObject> javaSetToThrift(Set<Object> objects) { Set<TObject> thrift = Sets.newLinkedHashSetWithExpectedSize(objects.size()); javaCollectionToThrift(objects, thrift); return thrift; }
From source file:de.uniulm.omi.cloudiator.sword.drivers.openstack.config.OpenstackComputeModule.java
@Provides @Singleton/*ww w. j a v a2 s .c o m*/ FloatingIpPoolStrategy provideFloatingIpPoolStrategy(Injector injector) { Set<FloatingIpPoolStrategy> availableStrategies = Sets.newLinkedHashSetWithExpectedSize(2); availableStrategies.add(injector.getInstance(ConfigurationFloatingIpPoolStrategy.class)); availableStrategies.add(injector.getInstance(OneFloatingIpPoolStrategy.class)); return new CompositeFloatingIpPoolStrategy(availableStrategies); }
From source file:org.terasology.world.propagation.BatchPropagator.java
private void processReduction() { int depth = 0; while (depth < rules.getMaxValue()) { byte oldValue = (byte) (rules.getMaxValue() - depth); Set<Vector3i> toProcess = reduceQueues[depth]; reduceQueues[depth] = Sets.newLinkedHashSetWithExpectedSize(toProcess.size()); for (Vector3i pos : toProcess) { purge(pos, oldValue);/*from w w w .ja va 2s . c om*/ } if (reduceQueues[depth].isEmpty()) { depth++; } } }
From source file:com.cinchapi.concourse.util.Transformers.java
/** * Populate a {@link Set} with the items in {@code original} after applying * {@code function}.//w ww.j a va 2 s . c o m * * <p> * <strong>WARNING:</strong> There is the potential for data loss in the * event that {@code function} returns duplicate transformed results for * items in {@code original}. * </p> * * @param original the {@link Set} to transform * @param function the transformation {@link Function} * @return the transformed Set */ public static <F, V> Set<V> transformSet(Set<F> original, Function<? super F, ? extends V> function) { Set<V> transformed = Sets.newLinkedHashSetWithExpectedSize(original.size()); for (F item : original) { transformed.add(function.apply(item)); } return transformed; }
From source file:org.terasology.world.propagation.StandardBatchPropagator.java
private void processReduction() { int depth = 0; while (depth < rules.getMaxValue()) { byte oldValue = (byte) (rules.getMaxValue() - depth); Set<Vector3i> toProcess = reduceQueues[depth]; if (!toProcess.isEmpty()) { reduceQueues[depth] = Sets.newLinkedHashSetWithExpectedSize(toProcess.size()); for (Vector3i pos : toProcess) { purge(pos, oldValue);//ww w .j a va2 s. c om } if (toProcess.isEmpty()) { depth++; } } else { depth++; } } }
From source file:org.ambraproject.wombat.freemarker.asset.RenderAssetsDirective.java
/** * Sort assets by their dependencies and return their paths in order. * <p/>// w ww . j a v a2 s.c o m * The result is a topological sort that preserves the input order as much as possible. The algorithm repeatedly pulls * the first node from the sequence that does not depend on any nodes not yet pulled. * <p/> * This method clobbers the nodes' {@code dependencies} fields. Specifically, a successful run will empty all the * dependency sets, with the assumption that the node objects will be discarded immediately after this method * returns. * * @param assetNodes an ordered collection of assets * @return a list of asset paths, sorted by dependency */ @VisibleForTesting static List<String> sortNodes(Collection<AssetNode> assetNodes) { List<String> simplePaths = extractPathsIfSimple(assetNodes); if (simplePaths != null) return simplePaths; // Topological sort by Kahn's algorithm Set<String> assetPaths = Sets.newLinkedHashSetWithExpectedSize(assetNodes.size()); Deque<AssetNode> queue = new LinkedList<>(assetNodes); while (!queue.isEmpty()) { boolean foundAvailableNode = false; for (Iterator<AssetNode> queueIterator = queue.iterator(); queueIterator.hasNext();) { AssetNode candidate = queueIterator.next(); // Check whether the candidate has any dependents not yet in assetPaths Collection<String> dependencies = candidate.getDependencies(); for (Iterator<String> dependencyIterator = dependencies.iterator(); dependencyIterator.hasNext();) { String dependent = dependencyIterator.next(); if (assetPaths.contains(dependent)) { dependencyIterator.remove(); } else break; } if (dependencies.isEmpty()) { assetPaths.add(candidate.getPath()); queueIterator.remove(); foundAvailableNode = true; break; } } if (!foundAvailableNode) { String message = "Can't resolve asset dependencies. " + "(There is either a cycle or a reference to a nonexistent asset.) " + queue; throw new RuntimeException(message); } } return new ArrayList<>(assetPaths); }
From source file:org.terasology.world.propagation.BatchPropagator.java
private void processIncrease() { int depth = 0; while (depth < rules.getMaxValue() - 1) { byte value = (byte) (rules.getMaxValue() - depth); Set<Vector3i> toProcess = increaseQueues[depth]; increaseQueues[depth] = Sets.newLinkedHashSetWithExpectedSize(toProcess.size()); for (Vector3i pos : toProcess) { push(pos, value);// w w w. j a v a 2s . co m } if (increaseQueues[depth].isEmpty()) { depth++; } } }
From source file:com.google.gerrit.server.query.change.ChangeData.java
public void limitToPatchSets(Collection<PatchSet.Id> ids) { limitedIds = Sets.newLinkedHashSetWithExpectedSize(ids.size()); for (PatchSet.Id id : ids) { if (!id.getParentKey().equals(legacyId)) { throw new IllegalArgumentException( String.format("invalid patch set %s for change %s", id, legacyId)); }//w ww . j av a2 s . co m limitedIds.add(id); } }