List of usage examples for com.google.common.collect Multimap size
int size();
From source file:com.vecna.dbDiff.builder.RelationalDatabaseBuilderImpl.java
/** * Retrieve index information for a table. * @param table the table./*from ww w . java 2s.c o m*/ * @return list of indices. * @throws SQLException if thrown by the jdbc driver. */ private List<RelationalIndex> getIndices(RelationalTable table) throws SQLException { List<RelationalIndex> indices = new ArrayList<>(); // maps index name to column names Multimap<String, String> idxColumns = ArrayListMultimap.create(); // one row per index-column pair ResultSet rs = m_metadataFactory.getMetadata().getIndexInfo(table.getCatalogSchema().getCatalog(), table.getCatalogSchema().getSchema(), table.getName(), false, false); while (rs.next()) { String idxName = rs.getString(6); Collection<String> columns = idxColumns.get(idxName); if (columns.isEmpty()) { // build a new index RelationalIndex index = new RelationalIndex(table.getCatalogSchema(), rs.getString(6)); indices.add(index); } columns.add(rs.getString(9)); } for (RelationalIndex index : indices) { List<Column> columns = new ArrayList<>(idxColumns.size()); for (String idxColumnName : idxColumns.get(index.getName())) { // Some db preserved names are double-quoted String columnName = idxColumnName.replaceAll("^\"|\"$", ""); Column column = table.getColumnByName(columnName); if (column == null) { throw new InconsistentSchemaException("cannot find column " + columnName + " referenced by index " + index.getName() + " in table " + table.getName()); } columns.add(column); } index.setColumns(columns); } return indices; }
From source file:org.joda.beans.ser.GuavaSerIteratorFactory.java
/** * Gets an iterator wrapper for {@code Multimap}. * /* ww w .j a v a 2 s . c o m*/ * @param map the collection, not null * @param declaredType the declared type, not null * @param keyType the key type, not null * @param valueType the value type, not null * @param valueTypeTypes the generic parameters of the value type * @return the iterator, not null */ @SuppressWarnings("rawtypes") public static final SerIterator multimap(final Multimap<?, ?> map, final Class<?> declaredType, final Class<?> keyType, final Class<?> valueType, final List<Class<?>> valueTypeTypes) { return new SerIterator() { private final Iterator it = map.entries().iterator(); private Map.Entry current; @Override public String metaTypeName() { if (map instanceof SetMultimap) { return "SetMultimap"; } if (map instanceof ListMultimap) { return "ListMultimap"; } return "Multimap"; } @Override public boolean metaTypeRequired() { if (map instanceof SetMultimap) { return SetMultimap.class.isAssignableFrom(declaredType) == false; } if (map instanceof ListMultimap) { return ListMultimap.class.isAssignableFrom(declaredType) == false; } return Multimap.class.isAssignableFrom(declaredType) == false; } @Override public SerCategory category() { return SerCategory.MAP; } @Override public int size() { return map.size(); } @Override public boolean hasNext() { return it.hasNext(); } @Override public void next() { current = (Map.Entry) it.next(); } @Override public Class<?> keyType() { return keyType; } @Override public Object key() { return current.getKey(); } @Override public Class<?> valueType() { return valueType; } @Override public List<Class<?>> valueTypeTypes() { return valueTypeTypes; } @Override public Object value() { return current.getValue(); } }; }
From source file:edu.uci.ics.sourcerer.tools.java.extractor.missing.MissingTypeResolver.java
private Set<Integer> matchClustersToLibraryVersions(Set<Integer> clusters) { TaskProgressLogger task = TaskProgressLogger.get(); task.start("Matching clusters to library versions"); // Ideally we'd want to minimize the number of extra clusters included and the overlap of clusters between libraries // TODO augment this with some measure of how well the cluster matches the missing types: weight the clusters by how many types they contain final Multimap<Integer, Integer> lv2c = HashMultimap.create(); final Multimap<Integer, Integer> c2lv = HashMultimap.create(); final Map<Integer, Integer> clusterSizes = new HashMap<>(); // Build the maps task.start("Building the maps"); for (Integer cluster : clusters) { for (Integer libraryVersion : findLibraryVersionByCluster.select(cluster)) { c2lv.put(cluster, libraryVersion); for (Integer clus : findClusterByLibraryVersion.select(libraryVersion)) { lv2c.put(libraryVersion, clus); }/*from w w w . j a v a2s . c om*/ } } task.report("Library versions by cluster: " + c2lv.keySet().size() + " keys, " + c2lv.size() + " entries"); task.report("Cluster by library version: " + lv2c.keySet().size() + " keys, " + lv2c.size() + " entries"); for (Integer clusterID : lv2c.values()) { if (!clusterSizes.containsKey(clusterID)) { clusterSizes.put(clusterID, findTypeCountByCluster.select(clusterID)); } } task.finish(); Set<Integer> coreLibraryVersions = new HashSet<>(); Set<Integer> coveredClusters = new HashSet<>(); // Start by picking all the library versions that don't contain extra clusters task.start("Checking for core library versions"); for (Integer libraryVersionID : lv2c.keySet()) { boolean noExtra = true; for (Integer clusterID : lv2c.get(libraryVersionID)) { if (!clusters.contains(clusterID)) { noExtra = false; } } if (noExtra) { coreLibraryVersions.add(libraryVersionID); coveredClusters.addAll(lv2c.get(libraryVersionID)); } } if (coveredClusters.retainAll(clusters)) { task.report("Retaining should have done nothing"); } task.report(coveredClusters.size() + " of " + clusters.size() + " covered"); task.finish(); Set<Integer> finalLibraryVersions = new HashSet<>(); Set<Integer> clustersToBeCovered = new HashSet<>(clusters); // If we covered all the clusters, skip this step if (coveredClusters.size() < clusters.size()) { task.start("Checking for additional library versions"); final Set<Integer> missingClusters = new HashSet<>(); for (Integer clusterID : clusters) { if (!coveredClusters.contains(clusterID)) { missingClusters.add(clusterID); } } task.report(missingClusters.size() + " missing clusters"); Set<Integer> additionalLibraryVersions = new HashSet<>(); // Find each library that can provide missing clusters, // and measure their "cost per cluster" (number of extra clusters - number of clusters provided) // let's try measuring cost instead by number of extra types - number of types provided for (Integer clusterID : missingClusters) { Integer bestLibraryVersionID = null; int bestCost = Integer.MAX_VALUE; for (Integer libraryVersionID : c2lv.get(clusterID)) { Collection<Integer> clus = lv2c.get(libraryVersionID); int provided = 0; int extra = 0; for (Integer cluster : clus) { if (clusters.contains(cluster)) { provided += clusterSizes.get(cluster); } else { extra += clusterSizes.get(cluster); } } int cost = extra - provided; if (cost < bestCost) { bestLibraryVersionID = libraryVersionID; bestCost = cost; } } additionalLibraryVersions.add(bestLibraryVersionID); } task.report(additionalLibraryVersions.size() + " additional library versions identified"); task.start("Sorting additional library versions"); // Sort the additional library versions by the number of additional types they contain Integer[] arr = additionalLibraryVersions.toArray(new Integer[additionalLibraryVersions.size()]); Arrays.sort(arr, new Comparator<Integer>() { @Override public int compare(Integer o1, Integer o2) { return -Integer.compare(CollectionUtils.intersectionSize(lv2c.get(o1), missingClusters), CollectionUtils.intersectionSize(lv2c.get(o2), missingClusters)); } }); task.finish(); task.start("Picking additional library versions"); // Pick the libraries to actually add for (Integer libraryVersionID : arr) { Collection<Integer> clus = lv2c.get(libraryVersionID); if (CollectionUtils.containsAny(missingClusters, clus)) { finalLibraryVersions.add(libraryVersionID); missingClusters.removeAll(clus); clustersToBeCovered.removeAll(clus); } } task.report("Added " + finalLibraryVersions.size() + " library versions"); task.finish(); task.finish(); } task.start("Sorting core library versions"); // Now order the core libraries by the number of clusters they contain Integer[] arr = coreLibraryVersions.toArray(new Integer[coreLibraryVersions.size()]); Arrays.sort(arr, new Comparator<Integer>() { @Override public int compare(Integer o1, Integer o2) { return -Integer.compare(lv2c.get(o1).size(), lv2c.get(o2).size()); } }); task.finish(); task.start("Picking core library versions"); // Pick the core libraries to actually add for (Integer libraryVersionID : arr) { Collection<Integer> clus = lv2c.get(libraryVersionID); if (CollectionUtils.containsAny(clustersToBeCovered, clus)) { finalLibraryVersions.add(libraryVersionID); clustersToBeCovered.removeAll(clus); } } task.finish(); task.report(finalLibraryVersions.size() + " library versions matched."); task.finish(); return finalLibraryVersions; }
From source file:org.apache.hadoop.hbase.index.Indexer.java
@Override public void postOpen(final ObserverContext<RegionCoprocessorEnvironment> c) { Multimap<HTableInterfaceReference, Mutation> updates = failedIndexEdits .getEdits(c.getEnvironment().getRegion()); if (this.disabled) { super.postOpen(c); return;/*from w w w. ja va 2 s .c o m*/ } LOG.info("Found some outstanding index updates that didn't succeed during" + " WAL replay - attempting to replay now."); //if we have no pending edits to complete, then we are done if (updates == null || updates.size() == 0) { return; } // do the usual writer stuff, killing the server again, if we can't manage to make the index // writes succeed again try { writer.writeAndKillYourselfOnFailure(updates); } catch (IOException e) { LOG.error("Exception thrown instead of killing server during index writing", e); } }
From source file:org.summer.dsl.model.types.util.TypeArgumentContextProvider.java
protected Map<JvmTypeParameter, JvmTypeReference> normalizedCopy(Multimap<JvmTypeParameter, ResolveInfo> map) { if (map.isEmpty()) return Collections.emptyMap(); if (map.size() == 1) { Map.Entry<JvmTypeParameter, ResolveInfo> singleElement = Iterables.getOnlyElement(map.entries()); ResolveInfo singleResolveInfo = singleElement.getValue(); JvmTypeReference reference = wildcardAwareGetReference(singleResolveInfo); return Collections.singletonMap(singleElement.getKey(), reference); }// w w w . ja v a 2 s.co m Map<JvmTypeParameter, JvmTypeReference> result = createMapWithTweakedToString(); for (JvmTypeParameter boundParameter : map.keySet()) { Collection<ResolveInfo> boundTo = map.get(boundParameter); if (boundTo.size() == 1) { ResolveInfo singleResolveInfo = Iterables.getOnlyElement(boundTo); JvmTypeReference reference = wildcardAwareGetReference(singleResolveInfo); result.put(boundParameter, reference); } else { List<ResolveInfo> boundToList = Lists.newArrayList(boundTo); List<JvmTypeReference> uppers = Lists.newArrayListWithCapacity(boundToList.size()); List<ResolveInfo> lowers = Lists.newArrayListWithCapacity(boundToList.size()); boolean done = false; int lowerIndex = Integer.MAX_VALUE; int upperIndex = Integer.MAX_VALUE; for (int i = 0; i < boundToList.size(); i++) { ResolveInfo info = boundToList.get(i); if (info.kind == ResolveInfoKind.EXACT) { result.put(boundParameter, info.reference); done = true; break; } else if (info.kind == ResolveInfoKind.UPPER || info.kind == ResolveInfoKind.WC_UPPER) { if (upperIndex == Integer.MAX_VALUE) upperIndex = i; if (!lowers.isEmpty() && upperIndex < lowerIndex) { boolean conformant = true; for (ResolveInfo lower : lowers) { if (!getConformanceComputer().isConformant(info.reference, lower.reference)) { conformant = false; break; } } if (conformant) { uppers.add(info.reference); } } else { uppers.add(info.reference); } } else if (info.kind == ResolveInfoKind.LOWER || info.kind == ResolveInfoKind.WC_LOWER) { if (lowerIndex == Integer.MAX_VALUE) lowerIndex = i; lowers.add(info); } } if (!done) { JvmTypeReference reference = null; if (!uppers.isEmpty() && upperIndex < lowerIndex) { reference = conformanceComputer.getCommonSuperType(uppers); if (uppers.size() == 1 && boundToList.get(upperIndex).kind == ResolveInfoKind.WC_UPPER) { boolean useWildcard = true; for (ResolveInfo lowerResolve : lowers) { if (!conformanceComputer.isConformant(lowerResolve.reference, reference)) { useWildcard = false; break; } } if (useWildcard) { if (reference.eContainer() != null) { JvmDelegateTypeReference delegate = typesFactory .createJvmDelegateTypeReference(); delegate.setDelegate(reference); reference = delegate; } JvmWildcardTypeReference wildCard = typeReferences.wildCard(); JvmUpperBound upperBound = typesFactory.createJvmUpperBound(); wildCard.getConstraints().add(upperBound); upperBound.setTypeReference(reference); reference = wildCard; } } } else if (!lowers.isEmpty()) { boolean lowerWithWildcard = false; ResolveInfo bestResolvedLower = null; for (ResolveInfo resolvedLower : lowers) { lowerWithWildcard |= resolvedLower.kind == ResolveInfoKind.WC_LOWER; if (bestResolvedLower == null) { bestResolvedLower = resolvedLower; } else { TypeConformanceResult conformanceResult = conformanceComputer.isConformant( bestResolvedLower.reference, resolvedLower.reference, new TypeConformanceComputationArgument(false, false, true)); if (conformanceResult.isConformant() && conformanceResult.getKinds() .contains(TypeConformanceResult.Kind.SUBTYPE)) bestResolvedLower = resolvedLower; } } if (bestResolvedLower != null) { if (lowers.size() == 1 || lowerWithWildcard) { if (bestResolvedLower.kind != ResolveInfoKind.WC_LOWER) { if (!uppers.isEmpty()) { JvmTypeReference upper = conformanceComputer.getCommonSuperType(uppers); if (conformanceComputer.isConformant(bestResolvedLower.reference, upper)) reference = upper; else reference = wildcardAwareGetReference(bestResolvedLower); } else { reference = wildcardAwareGetReference(bestResolvedLower); } } else { reference = wildcardAwareGetReference(bestResolvedLower); } } else { reference = bestResolvedLower.reference; if (!uppers.isEmpty()) { JvmTypeReference upper = conformanceComputer.getCommonSuperType(uppers); if (conformanceComputer.isConformant(reference, upper)) reference = upper; } } } } if (reference != null) result.put(boundParameter, reference); } } } Map<JvmTypeParameter, JvmTypeReference> normalizedCopy = normalizedCopy(result); return normalizedCopy; }
From source file:org.artifactory.build.BuildServiceImpl.java
@Override public Set<ArtifactoryBuildArtifact> getBuildArtifactsFileInfos(Build build, boolean useFallBack, String sourceRepo) {/*from w ww .j a va 2s . c o m*/ AqlBase.AndClause and = and(); log.debug("Executing Artifacts search for build {}:{}", build.getName(), build.getNumber()); if (StringUtils.isNotBlank(sourceRepo)) { log.debug("Search limited to repo: {}", sourceRepo); and.append(AqlApiItem.repo().equal(sourceRepo)); } and.append(AqlApiItem.property().property("build.name", AqlComparatorEnum.equals, build.getName())); and.append(AqlApiItem.property().property("build.number", AqlComparatorEnum.equals, build.getNumber())); AqlBase buildArtifactsQuery = AqlApiItem.create().filter(and); AqlEagerResult<AqlBaseFullRowImpl> aqlResult = aqlService.executeQueryEager(buildArtifactsQuery); log.debug("Search returned {} artifacts", aqlResult.getSize()); Multimap<String, Artifact> buildArtifacts = BuildServiceUtils.getBuildArtifacts(build); log.debug("This build contains {} artifacts (taken from build info)", buildArtifacts.size()); List<String> virtualRepoKeys = getVirtualRepoKeys(); Set<ArtifactoryBuildArtifact> matchedArtifacts = matchArtifactsToFileInfos(aqlResult.getResults(), buildArtifacts, virtualRepoKeys); log.debug("Matched {} build artifacts to actual paths returned by search", matchedArtifacts.size()); //buildArtifacts contains all remaining artifacts that weren't matched - match them with the weak search //only if indicated and if such remaining unmatched artifacts still exist in the map. if (useFallBack && !buildArtifacts.isEmpty()) { log.debug("Unmatched artifacts exist and 'use weak match fallback' flag is lit - executing weak match"); Set<ArtifactoryBuildArtifact> weaklyMatchedArtifacts = matchUnmatchedArtifactsNonStrict(build, sourceRepo, buildArtifacts, virtualRepoKeys); log.debug("Weak match has matched {} additional artifacts", weaklyMatchedArtifacts); matchedArtifacts.addAll(weaklyMatchedArtifacts); } //Lastly, populate matchedArtifacts with all remaining unmatched artifacts with null values to help users of //this function know if all build artifacts were found. log.debug("{} artifacts were not matched to actual paths", buildArtifacts.size()); for (Artifact artifact : buildArtifacts.values()) { matchedArtifacts.add(new ArtifactoryBuildArtifact(artifact, null)); } return matchedArtifacts; }
From source file:com.palantir.atlasdb.keyvalue.jdbc.JdbcKeyValueService.java
@Override public void delete(final String tableName, final Multimap<Cell, Long> keys) { if (keys.isEmpty()) { return;/*from ww w.j a va 2 s. com*/ } run(new Function<DSLContext, Void>() { @Override public Void apply(DSLContext ctx) { Collection<Row3<byte[], byte[], Long>> rows = Lists.newArrayListWithCapacity(keys.size()); for (Entry<Cell, Long> entry : keys.entries()) { rows.add(row(entry.getKey().getRowName(), entry.getKey().getColumnName(), entry.getValue())); } ctx.deleteFrom(atlasTable(tableName).as(ATLAS_TABLE)) .where(row(A_ROW_NAME, A_COL_NAME, A_TIMESTAMP).in(rows)).execute(); return null; } }); }
From source file:org.eclipse.xtext.xbase.typesystem.override.ResolvedFeatures.java
protected List<IResolvedOperation> computeAllOperations() { JvmType rawType = getRawType();//ww w . j a v a2s . c o m if (!(rawType instanceof JvmDeclaredType)) { return Collections.emptyList(); } Multimap<String, AbstractResolvedOperation> processedOperations = LinkedHashMultimap.create(); for (IResolvedOperation resolvedOperation : getDeclaredOperations()) { processedOperations.put(resolvedOperation.getDeclaration().getSimpleName(), (AbstractResolvedOperation) resolvedOperation); } if (targetVersion.isAtLeast(JavaVersion.JAVA8)) { computeAllOperationsFromSortedSuperTypes((JvmDeclaredType) rawType, processedOperations); } else { Set<JvmType> processedTypes = Sets.newHashSet(rawType); computeAllOperationsFromSuperTypes((JvmDeclaredType) rawType, processedOperations, processedTypes); } // make sure the declared operations are the first in the list List<IResolvedOperation> result = new ArrayList<IResolvedOperation>(processedOperations.size()); result.addAll(getDeclaredOperations()); for (AbstractResolvedOperation operation : processedOperations.values()) { if (operation.getDeclaration().getDeclaringType() != rawType) { result.add(operation); } } return Collections.unmodifiableList(result); }
From source file:org.artifactory.build.BuildServiceImpl.java
@Override public Map<Dependency, FileInfo> getBuildDependenciesFileInfos(Build build) { AqlBase.AndClause<AqlApiBuild> and = AqlApiBuild.and(AqlApiBuild.name().equal(build.getName()), AqlApiBuild.number().equal(build.getNumber())); log.debug("Executing dependencies search for build {}:{}", build.getName(), build.getNumber()); AqlBase buildDependenciesQuery = AqlApiBuild.create().filter(and); buildDependenciesQuery.include(AqlApiBuild.module().dependecy().name(), AqlApiBuild.module().dependecy().item().sha1Actual(), AqlApiBuild.module().dependecy().item().md5Actual(), AqlApiBuild.module().dependecy().item().sha1Orginal(), AqlApiBuild.module().dependecy().item().md5Orginal(), AqlApiBuild.module().dependecy().item().created(), AqlApiBuild.module().dependecy().item().modifiedBy(), AqlApiBuild.module().dependecy().item().createdBy(), AqlApiBuild.module().dependecy().item().updated(), AqlApiBuild.module().dependecy().item().repo(), AqlApiBuild.module().dependecy().item().path(), AqlApiBuild.module().dependecy().item().name(), AqlApiBuild.module().dependecy().item().size() //Ordering by the last updated field, in case of duplicates with the same checksum. ).addSortElement(AqlApiBuild.module().dependecy().item().updated()).asc(); AqlEagerResult<AqlBaseFullRowImpl> results = aqlService.executeQueryEager(buildDependenciesQuery); log.debug("Search returned {} dependencies", results.getSize()); Multimap<String, Dependency> buildDependencies = BuildServiceUtils.getBuildDependencies(build); log.debug("This build contains {} dependencies (taken from build info)", buildDependencies.size()); Map<Dependency, FileInfo> matchedDependencies = matchDependenciesToFileInfos(results.getResults(), buildDependencies);//from w w w.ja va2 s.c o m log.debug("Matched {} build dependencies to actual paths returned by search", matchedDependencies.size()); //Lastly, populate matchedDependencies with all remaining unmatched dependencies with null values to help users //of this function know if all build artifacts were found. log.debug("{} dependencies were not matched to actual paths", buildDependencies.size()); for (Dependency dependency : buildDependencies.values()) { if (!matchedDependencies.containsKey(dependency)) { matchedDependencies.put(dependency, null); } } return matchedDependencies; }
From source file:accumulo.balancer.GroupBalancer.java
private boolean balanceExtraMultiple(Map<TServerInstance, TserverGroupInfo> tservers, int maxExtraGroups, Moves moves) {//from www . jav a2 s. c om Multimap<String, TserverGroupInfo> extraMultiple = HashMultimap.create(); for (TserverGroupInfo tgi : tservers.values()) { Map<String, Integer> extras = tgi.getExtras(); for (Entry<String, Integer> entry : extras.entrySet()) { if (entry.getValue() > 1) { extraMultiple.put(entry.getKey(), tgi); } } } balanceExtraMultiple(tservers, maxExtraGroups, moves, extraMultiple, false); if (moves.size() < getMaxMigrations() && extraMultiple.size() > 0) { // no place to move so must exceed maxExtra temporarily... subsequent balancer calls will smooth things out balanceExtraMultiple(tservers, maxExtraGroups, moves, extraMultiple, true); return false; } else { return true; } }