List of usage examples for com.google.common.collect Iterables get
public static <T> T get(Iterable<T> iterable, int position)
From source file:com.github.naios.wide.framework.internal.storage.server.builder.SQLScope.java
private void buildInserts(final StringBuilder builder, final Entry<ServerStorage<?>, Collection<ServerStorageStructure>> structures) { // Build delete before insert queries buildDeletes(builder, structures);//from w ww . ja v a 2 s . com final SQLMaker sqlMaker = new SQLMaker(sqlBuilder, sqlBuilder.getInsertConfig()); if (!structures.getValue().isEmpty()) builder.setLength(builder.length() - "\n".length()); final ServerStorageStructure anyStructure = Iterables.get(structures.getValue(), 0); final String valuePart = sqlMaker.createInsertValuePart(structures.getValue()); builder.append(sqlMaker.createInsertQuery(anyStructure.getOwner().getTableName(), anyStructure.getValues(), valuePart)).append(SQLMaker.NEWLINE); }
From source file:org.jiemamy.eclipse.core.ui.editor.diagram.connection.CreateConnectionCommand.java
/** * ?????????JmLocalKeyConstraint??// w ww . jav a 2s .co m * * @param table ? * @return . ????????{@code null} */ private JmLocalKeyConstraint getKey(JmTable table) { JmLocalKeyConstraint key = table.getPrimaryKey(); if (key == null) { Collection<JmLocalKeyConstraint> localKeys = table.getConstraints(JmLocalKeyConstraint.class); if (localKeys.size() > 0) { key = Iterables.get(localKeys, 0); } } return key; }
From source file:com.b2international.snowowl.snomed.reasoner.server.classification.ReasonerTaxonomyWalker.java
private long getConceptId(final OWLClass owlClass) { final String strippedShortForm = pm.getShortForm(owlClass.getIRI()) .substring(SnomedOntologyUtils.PREFIX_SNOMED.length()); return Long.parseLong(Iterables.get(Splitter.on('_').split(strippedShortForm), 1)); }
From source file:org.apache.mahout.knn.cluster.StreamingKMeans.java
private UpdatableSearcher clusterInternal(Iterable<Centroid> datapoints, boolean collapseClusters) { int oldNumProcessedDataPoints = numProcessedDatapoints; // We clear the centroids we have in case of cluster collapse, the old clusters are the // datapoints but we need to re-cluster them. if (collapseClusters) { centroids.clear();// w ww .j ava 2 s . c o m numProcessedDatapoints = 0; } int numCentroidsToSkip = 0; if (centroids.size() == 0) { // Assign the first datapoint to the first cluster. // Adding a vector to a searcher would normally just reference the copy, // but we could potentially mutate it and so we need to make a clone. centroids.add(Iterables.get(datapoints, 0).clone()); numCentroidsToSkip = 1; ++numProcessedDatapoints; } Random rand = RandomUtils.getRandom(); // To cluster, we scan the data and either add each point to the nearest group or create a new group. // when we get too many groups, we need to increase the threshold and rescan our current groups for (WeightedVector row : Iterables.skip(datapoints, numCentroidsToSkip)) { // Get the closest vector and its weight as a WeightedThing<Vector>. // The weight of the WeightedThing is the distance to the query and the value is a // reference to one of the vectors we added to the searcher previously. WeightedThing<Vector> closestPair = centroids.search(row, 1).get(0); // We get a uniformly distributed random number between 0 and 1 and compare it with the // distance to the closest cluster divided by the distanceCutoff. // This is so that if the closest cluster is further than distanceCutoff, // closestPair.getWeight() / distanceCutoff > 1 which will trigger the creation of a new // cluster anyway. // However, if the ratio is less than 1, we want to create a new cluster with probability // proportional to the distance to the closest cluster. if (rand.nextDouble() < closestPair.getWeight() / distanceCutoff) { // Add new centroid, note that the vector is copied because we may mutate it later. centroids.add(row.clone()); } else { // Merge the new point with the existing centroid. This will update the centroid's actual // position. // We know that all the points we inserted in the centroids searcher are (or extend) // WeightedVector, so the cast will always succeed. Centroid centroid = (Centroid) closestPair.getValue(); // We will update the centroid by removing it from the searcher and reinserting it to // ensure consistency. if (!centroids.remove(centroid, 1e-7)) { throw new RuntimeException("Unable to remove centroid"); } centroid.update(row); centroids.add(centroid); } progressLogger.debug( "numProcessedDataPoints: {}, estimatedNumClusters: {}, " + "distanceCutoff: {}, numCentroids: {}", numProcessedDatapoints, estimatedNumClusters, distanceCutoff, centroids.size()); if (!collapseClusters && centroids.size() > estimatedNumClusters) { estimatedNumClusters = (int) Math.max(estimatedNumClusters, clusterLogFactor * Math.log(numProcessedDatapoints)); // TODO does shuffling help? List<Centroid> shuffled = Lists.newArrayList(); for (Vector v : centroids) { shuffled.add((Centroid) v); } Collections.shuffle(shuffled); // Re-cluster using the shuffled centroids as data points. The centroids member variable // is modified directly. clusterInternal(shuffled, true); // In the original algorithm, with distributions with sharp scale effects, the // distanceCutoff can grow to excessive size leading sub-clustering to collapse // the centroids set too much. This test prevents increase in distanceCutoff if // the current value is doing well at collapsing the clusters. if (centroids.size() > clusterOvershoot * estimatedNumClusters) { distanceCutoff *= beta; } } ++numProcessedDatapoints; } if (collapseClusters) { numProcessedDatapoints = oldNumProcessedDataPoints; } // Normally, iterating through the searcher produces Vectors, // but since we always used Centroids, we adapt the return type. return centroids; }
From source file:org.aksw.mex.log4mex.MyMEX.java
public ExperimentConfigurationVO Configuration(String value) throws Exception { ExperimentConfigurationVO ret = null; try {/*ww w . j av a 2 s .c o m*/ Collection<ExperimentConfigurationVO> t = Collections2.filter(this.experimentConfigurationList, experimentConfigurationVO -> experimentConfigurationVO.getId().equals(value)); if (t != null && t.size() > 0) { ret = Iterables.get(t, 0); } else { throw new Exception("Configuration ID has not been found: " + value); } } catch (Exception e) { LOGGER.error(e.toString()); } return ret; }
From source file:org.netbeans.modules.android.project.configs.AndroidConfigProvider.java
private void fixConfigurations(String activeConfig) { if (configs.isEmpty()) { configs.put(DEFAULT_CONFIG.getDisplayName(), DEFAULT_CONFIG); }// w w w .j a v a 2 s. c o m if (activeConfig != null && configs.containsKey(activeConfig)) { activeConfigName = activeConfig; } else { activeConfigName = Iterables.get(configs.keySet(), 0); } }
From source file:org.aksw.mex.log4mex.ExperimentConfigurationVO.java
/** * gets a specific algorithm of a configuration based on the algorithm class * @param algo/*from ww w . j a va 2 s. co m*/ * @return */ public AlgorithmVO Algorithm(MEXEnum.EnumAlgorithmsClasses algo) { if (this._algorithms == null) { this._algorithms = new ArrayList<>(); } AlgorithmVO ret = null; try { Collection<AlgorithmVO> t = Collections2.filter(this._algorithms, p -> p.getClassName().equals(algo.name())); if (t != null && t.size() > 0) { ret = Iterables.get(t, 0); } } catch (Exception e) { System.out.println(e.toString()); } return ret; }
From source file:net.lldp.checksims.algorithm.smithwaterman.SmithWatermanAlgorithm.java
/** * Compute a Smith-Waterman alignment.//from w w w . j av a 2 s .co m * * TODO tests for this * * @return Pair of Token Lists representing optimal detected alignments * @throws InternalAlgorithmError Thrown if internal error causes violation of preconditions */ public Pair<TokenList, TokenList> computeSmithWatermanAlignment() throws InternalAlgorithmError { // Make sure our candidates list is initially empty candidates.clear(); // Start by computing the entire array, and adding the results to candidates mergeIntoCandidates(computeArraySubset(wholeArray)); // Go through all candidates while (!candidates.isEmpty()) { // Need to identify the largest key (largest value in the S-W array) int largestKey = Ordering.natural().max(candidates.keySet()); // Get coordinate(s) with largest value in S-W array Set<Coordinate> largestCoords = candidates.get(largestKey); if (largestCoords == null || largestCoords.isEmpty()) { throw new InternalAlgorithmError("Null or empty mapping from largest coordinates!"); } // Arbitrarily break ties, if they exist Coordinate currMax = Iterables.get(largestCoords, 0); // Check to verify that this match is over the threshold // This should never happen, so log if it does // TODO investigate why this is happening if (s[currMax.getX()][currMax.getY()] < threshold) { logs.trace("Potential algorithm error: identified candidate pointing to 0 at " + currMax); largestCoords.remove(currMax); if (largestCoords.isEmpty()) { candidates.remove(largestKey); } else { candidates.put(largestKey, largestCoords); } continue; } // Get match coordinates Set<Coordinate> coords = getMatchCoordinates(currMax); // Get match origin Coordinate currOrigin = getFirstMatchCoordinate(coords); if (currMax.equals(currOrigin)) { throw new InternalAlgorithmError("Maximum and Origin point to same point - " + currMax + " and " + currOrigin + ". Size of match coordinates set is " + coords.size()); } // Filter postdominated results candidates = filterPostdominated(currOrigin, currMax); // Set match invalid setMatchesInvalid(coords); // Zero the match zeroMatch(currOrigin, currMax); // Generate array subsets we need to recompute Set<ArraySubset> subsetsToCompute = generateSubsets(currOrigin, currMax); // Recompute given array subsets for (ArraySubset subset : subsetsToCompute) { mergeIntoCandidates(computeArraySubset(subset)); } } // IntelliJ has an aversion to passing anything with a 'y' in it as the right side of a pair // This alleviates the warning //noinspection SuspiciousNameCombination return Pair.of(xList, yList); }
From source file:org.ldp4j.http.AlternativeEvaluation.java
private static <T extends Negotiable> AttributeQuality<T> computeAttributeQuality(T attribute, List<Weighted<T>> acceptable, Matcher<T> matcher, Comparator<T> comparator) { if (attribute == null) { return AttributeQuality.create(1.0D, getPreferredValue(acceptable, comparator)); }/*from www . ja v a 2 s. co m*/ if (acceptable.isEmpty()) { return AttributeQuality.create(1.0D); } final SortedSet<Weighted<T>> compatible = Sets.newTreeSet(WeightedComparator.create(comparator)); for (Weighted<T> accept : acceptable) { if (matcher.includes(accept.entity(), attribute)) { compatible.add(accept); } } if (compatible.isEmpty()) { return AttributeQuality.create(0.0D); } return AttributeQuality.create(Iterables.get(compatible, 0)); }
From source file:com.nuodb.migrator.cli.parse.option.ArgumentImpl.java
@Override public void help(StringBuilder buffer, Collection<HelpHint> hints, Comparator<Option> comparator) { int minimum = getMinimum(); int maximum = getMaximum(); boolean optional = hints.contains(HelpHint.OPTIONAL); boolean numbered = (maximum > 1) && hints.contains(HelpHint.ARGUMENT_NUMBERED); boolean bracketed = hints.contains(HelpHint.ARGUMENT_BRACKETED); // if infinite args are allowed then crop the list int count;/*from ww w. j a v a 2s.co m*/ Collection<String> helpValues = getHelpValues(); boolean hasHelpValues; if (helpValues != null && helpValues.size() > 0) { count = helpValues.size(); hasHelpValues = true; } else { count = (maximum == Integer.MAX_VALUE) ? 2 : maximum; hasHelpValues = false; } int i = 0; // for each argument while (i < count) { // if we're past the first append a space if (i > 0) { buffer.append(' '); } // if the next arg is optional if ((i >= minimum) && (optional || (i > 0))) { buffer.append('['); } if (bracketed) { buffer.append('<'); } // append name buffer.append(hasHelpValues ? Iterables.get(helpValues, i) : getName()); ++i; // if numbering if (numbered) { buffer.append(i); } if (bracketed) { buffer.append('>'); } } // if infinite args are allowed if (!hasHelpValues && maximum == Integer.MAX_VALUE) { buffer.append(" ..."); } // for each argument while (i > 0) { --i; // if the next arg is optional if ((i >= minimum) && (optional || (i > 0))) { buffer.append(']'); } } }