List of usage examples for com.google.common.collect Multimap size
int size();
From source file:com.facebook.presto.accumulo.tools.RewriteIndex.java
private void deleteIndexEntries(Connector connector, AccumuloTable table, long start) { LOG.info(format("Scanning index table %s to delete index entries", table.getIndexTableName())); BatchScanner scanner = null;/* w w w . ja v a 2s . c o m*/ BatchWriter indexWriter = null; try { // Create index writer and metrics writer, but we are never going to flush the metrics writer indexWriter = connector.createBatchWriter(table.getIndexTableName(), bwc); scanner = connector.createBatchScanner(table.getIndexTableName(), auths, 10); LOG.info(format("Created batch scanner against %s with auths %s", table.getIndexTableName(), auths)); IteratorSetting timestampFilter = new IteratorSetting(21, "timestamp", TimestampFilter.class); TimestampFilter.setRange(timestampFilter, 0L, start); scanner.addScanIterator(timestampFilter); scanner.setRanges(connector.tableOperations().splitRangeByTablets(table.getIndexTableName(), new Range(), Integer.MAX_VALUE)); // Scan the index table, gathering row IDs into batches long numTotalMutations = 0L; Map<ByteBuffer, RowStatus> rowIdStatuses = new HashMap<>(); Multimap<ByteBuffer, Mutation> queryIndexEntries = MultimapBuilder.hashKeys().hashSetValues().build(); Text text = new Text(); for (Entry<Key, Value> entry : scanner) { ++numTotalMutations; ByteBuffer rowID = ByteBuffer.wrap(entry.getKey().getColumnQualifier(text).copyBytes()); Mutation mutation = new Mutation(entry.getKey().getRow(text).copyBytes()); mutation.putDelete(entry.getKey().getColumnFamily(text).copyBytes(), entry.getKey().getColumnQualifier(text).copyBytes(), entry.getKey().getColumnVisibilityParsed(), start); // Get status of this row ID switch (rowIdStatuses.getOrDefault(rowID, RowStatus.UNKNOWN)) { case ABSENT: case UNKNOWN: // Absent or unknown? Add it to the collection to check the status and/or delete queryIndexEntries.put(rowID, mutation); break; case PRESENT: // Present? No op break; } if (queryIndexEntries.size() == 100000) { flushDeleteEntries(connector, table, start, indexWriter, ImmutableMultimap.copyOf(queryIndexEntries), rowIdStatuses); queryIndexEntries.clear(); } } flushDeleteEntries(connector, table, start, indexWriter, ImmutableMultimap.copyOf(queryIndexEntries), rowIdStatuses); queryIndexEntries.clear(); LOG.info(format( "Finished scanning index entries. There are %s distinct row IDs containing %s entries. %s rows present in the data table and %s absent", rowIdStatuses.size(), numTotalMutations, rowIdStatuses.entrySet().stream().filter(entry -> entry.getValue().equals(RowStatus.PRESENT)) .count(), rowIdStatuses.entrySet().stream().filter(entry -> entry.getValue().equals(RowStatus.ABSENT)) .count())); if (dryRun) { LOG.info(format("Would have deleted %s index entries", numDeletedIndexEntries)); } else { LOG.info(format("Deleted %s index entries", numDeletedIndexEntries)); } } catch (AccumuloException | AccumuloSecurityException e) { LOG.error("Accumulo exception", e); } catch (TableNotFoundException e) { LOG.error("Table not found, must have been deleted during process", e); } finally { if (indexWriter != null) { try { indexWriter.close(); } catch (MutationsRejectedException e) { LOG.error("Server rejected mutations", e); } } if (scanner != null) { scanner.close(); } } }
From source file:com.proofpoint.reporting.PrometheusCollector.java
Multimap<String, TaggedValue> collectData() { Multimap<String, TaggedValue> valuesByMetric = MultimapBuilder.treeKeys().treeSetValues().build(); for (RegistrationInfo registrationInfo : reportedBeanRegistry.getReportedBeans()) { StringBuilder nameBuilder = new StringBuilder(); if (registrationInfo.isApplicationPrefix()) { nameBuilder.append(applicationPrefix); }//from ww w . j a v a 2 s . c o m nameBuilder.append(sanitizeMetricName(registrationInfo.getNamePrefix())); for (PrometheusBeanAttribute attribute : registrationInfo.getReportedBean().getPrometheusAttributes()) { String metricName = sanitizeMetricName(attribute.getName()); String name; if ("".equals(metricName)) { name = nameBuilder.toString(); } else { name = nameBuilder + "_" + metricName; } if (INITIAL_DIGIT_PATTERN.matcher(name).lookingAt()) { name = "_" + name; } ValueAndTimestamp valueAndTimestamp = null; try { valueAndTimestamp = attribute.getValue(null); } catch (MBeanException | ReflectionException ignored) { } if (valueAndTimestamp != null) { valuesByMetric.put(name, taggedValue(registrationInfo.getTags(), valueAndTimestamp)); } } } valuesByMetric.put("ReportCollector_NumMetrics", taggedValue(versionTags, valueAndTimestamp(simplePrometheusValue(valuesByMetric.size()), null))); if (bucketIdProvider.get().getTimestamp() < startupTimestamp + TimeUnit.MINUTES.toMillis(10)) { valuesByMetric.put("ReportCollector_ServerStart", taggedValue(versionTags, valueAndTimestamp(simplePrometheusValue(1), startupTimestamp))); } return valuesByMetric; }
From source file:net.minecraftforge.registries.GameData.java
@SuppressWarnings({ "unchecked", "rawtypes" }) public static Multimap<ResourceLocation, ResourceLocation> injectSnapshot( Map<ResourceLocation, ForgeRegistry.Snapshot> snapshot, boolean injectFrozenData, boolean isLocalWorld) { FMLLog.log.info("Injecting existing registry data into this {} instance", FMLCommonHandler.instance().getEffectiveSide().isServer() ? "server" : "client"); RegistryManager.ACTIVE.registries.forEach((name, reg) -> reg.validateContent(name)); RegistryManager.ACTIVE.registries.forEach((name, reg) -> reg.dump(name)); RegistryManager.ACTIVE.registries.forEach((name, reg) -> reg.resetDelegates()); List<ResourceLocation> missingRegs = snapshot.keySet().stream() .filter(name -> !RegistryManager.ACTIVE.registries.containsKey(name)).collect(Collectors.toList()); if (missingRegs.size() > 0) { String text = "Forge Mod Loader detected missing/unknown registrie(s).\n\n" + "There are " + missingRegs.size() + " missing registries in this save.\n" + "If you continue the missing registries will get removed.\n" + "This may cause issues, it is advised that you create a world backup before continuing.\n\n" + "Missing Registries:\n"; for (ResourceLocation s : missingRegs) text += s.toString() + "\n"; if (!StartupQuery.confirm(text)) StartupQuery.abort();/*from w w w .j a v a 2s. c o m*/ } RegistryManager STAGING = new RegistryManager("STAGING"); final Map<ResourceLocation, Map<ResourceLocation, Integer[]>> remaps = Maps.newHashMap(); final LinkedHashMap<ResourceLocation, Map<ResourceLocation, Integer>> missing = Maps.newLinkedHashMap(); // Load the snapshot into the "STAGING" registry snapshot.forEach((key, value) -> { final Class<? extends IForgeRegistryEntry> clazz = RegistryManager.ACTIVE.getSuperType(key); remaps.put(key, Maps.newLinkedHashMap()); missing.put(key, Maps.newHashMap()); loadPersistentDataToStagingRegistry(RegistryManager.ACTIVE, STAGING, remaps.get(key), missing.get(key), key, value, clazz); }); snapshot.forEach((key, value) -> { value.dummied.forEach(dummy -> { Map<ResourceLocation, Integer> m = missing.get(key); ForgeRegistry<?> reg = STAGING.getRegistry(key); // Currently missing locally, we just inject and carry on if (m.containsKey(dummy)) { if (reg.markDummy(dummy, m.get(dummy))) m.remove(dummy); } else if (isLocalWorld) { if (ForgeRegistry.DEBUG) FMLLog.log.debug("Registry {}: Resuscitating dummy entry {}", key, dummy); } else { // The server believes this is a dummy block identity, but we seem to have one locally. This is likely a conflict // in mod setup - Mark this entry as a dummy int id = reg.getID(dummy); FMLLog.log.warn( "Registry {}: The ID {} is currently locally mapped - it will be replaced with a dummy for this session", key, id); reg.markDummy(dummy, id); } }); }); int count = missing.values().stream().mapToInt(Map::size).sum(); if (count > 0) { FMLLog.log.debug("There are {} mappings missing - attempting a mod remap", count); Multimap<ResourceLocation, ResourceLocation> defaulted = ArrayListMultimap.create(); Multimap<ResourceLocation, ResourceLocation> failed = ArrayListMultimap.create(); missing.entrySet().stream().filter(e -> e.getValue().size() > 0).forEach(m -> { ResourceLocation name = m.getKey(); ForgeRegistry<?> reg = STAGING.getRegistry(name); RegistryEvent.MissingMappings<?> event = reg.getMissingEvent(name, m.getValue()); MinecraftForge.EVENT_BUS.post(event); List<MissingMappings.Mapping<?>> lst = event.getAllMappings().stream() .filter(e -> e.getAction() == MissingMappings.Action.DEFAULT).collect(Collectors.toList()); if (!lst.isEmpty()) { FMLLog.log.error("Unidentified mapping from registry {}", name); lst.forEach(map -> { FMLLog.log.error(" {}: {}", map.key, map.id); if (!isLocalWorld) defaulted.put(name, map.key); }); } event.getAllMappings().stream().filter(e -> e.getAction() == MissingMappings.Action.FAIL) .forEach(fail -> failed.put(name, fail.key)); final Class<? extends IForgeRegistryEntry> clazz = RegistryManager.ACTIVE.getSuperType(name); processMissing(clazz, name, STAGING, event, m.getValue(), remaps.get(name), defaulted.get(name), failed.get(name)); }); if (!defaulted.isEmpty() && !isLocalWorld) return defaulted; if (!defaulted.isEmpty()) { StringBuilder buf = new StringBuilder(); buf.append("Forge Mod Loader detected missing registry entries.\n\n").append("There are ") .append(defaulted.size()).append(" missing entries in this save.\n") .append("If you continue the missing entries will get removed.\n") .append("A world backup will be automatically created in your saves directory.\n\n"); defaulted.asMap().forEach((name, entries) -> { buf.append("Missing ").append(name).append(":\n"); entries.forEach(rl -> buf.append(" ").append(rl).append("\n")); }); boolean confirmed = StartupQuery.confirm(buf.toString()); if (!confirmed) StartupQuery.abort(); try { String skip = System.getProperty("fml.doNotBackup"); if (skip == null || !"true".equals(skip)) { ZipperUtil.backupWorld(); } else { for (int x = 0; x < 10; x++) FMLLog.log.error("!!!!!!!!!! UPDATING WORLD WITHOUT DOING BACKUP !!!!!!!!!!!!!!!!"); } } catch (IOException e) { StartupQuery.notify("The world backup couldn't be created.\n\n" + e); StartupQuery.abort(); } } if (!defaulted.isEmpty()) { if (isLocalWorld) FMLLog.log.error( "There are unidentified mappings in this world - we are going to attempt to process anyway"); } } if (injectFrozenData) { // If we're loading from disk, we can actually substitute air in the block map for anything that is otherwise "missing". This keeps the reference in the map, in case // the block comes back later missing.forEach((name, m) -> { ForgeRegistry<?> reg = STAGING.getRegistry(name); m.forEach((rl, id) -> reg.markDummy(rl, id)); }); // If we're loading up the world from disk, we want to add in the new data that might have been provisioned by mods // So we load it from the frozen persistent registry RegistryManager.ACTIVE.registries.forEach((name, reg) -> { final Class<? extends IForgeRegistryEntry> clazz = RegistryManager.ACTIVE.getSuperType(name); loadFrozenDataToStagingRegistry(STAGING, name, remaps.get(name), clazz); }); } // Validate that all the STAGING data is good STAGING.registries.forEach((name, reg) -> reg.validateContent(name)); // Load the STAGING registry into the ACTIVE registry for (Map.Entry<ResourceLocation, ForgeRegistry<? extends IForgeRegistryEntry<?>>> r : RegistryManager.ACTIVE.registries .entrySet()) { final Class<? extends IForgeRegistryEntry> registrySuperType = RegistryManager.ACTIVE .getSuperType(r.getKey()); loadRegistry(r.getKey(), STAGING, RegistryManager.ACTIVE, registrySuperType, true); } // Dump the active registry RegistryManager.ACTIVE.registries.forEach((name, reg) -> reg.dump(name)); // Tell mods that the ids have changed Loader.instance().fireRemapEvent(remaps, false); // The id map changed, ensure we apply object holders ObjectHolderRegistry.INSTANCE.applyObjectHolders(); // Return an empty list, because we're good return ArrayListMultimap.create(); }
From source file:com.cloudant.sync.datastore.BasicDatastore.java
List<Multimap<String, String>> multiMapPartitions(Multimap<String, String> revisions, int size) { List<Multimap<String, String>> partitions = new ArrayList<Multimap<String, String>>(); Multimap<String, String> current = HashMultimap.create(); for (Map.Entry<String, String> e : revisions.entries()) { current.put(e.getKey(), e.getValue()); // the query uses below (see revsDiffBatch()) // `multimap.size() + multimap.keySet().size()` placeholders // and SQLite has limit on the number of placeholders on a single query. if (current.size() + current.keySet().size() >= size) { partitions.add(current);//from www. j av a 2s . com current = HashMultimap.create(); } } if (current.size() > 0) { partitions.add(current); } return partitions; }
From source file:com.yahoo.pulsar.broker.loadbalance.impl.SimpleLoadManagerImpl.java
private Multimap<Long, ResourceUnit> getFinalCandidatesWithPolicy(NamespaceName namespace, Multimap<Long, ResourceUnit> primaries, Multimap<Long, ResourceUnit> shared) { Multimap<Long, ResourceUnit> finalCandidates = TreeMultimap.create(); // if not enough primary then it should be union of primaries and secondaries finalCandidates.putAll(primaries);/*from w w w. j a v a2s .c o m*/ if (policies.shouldFailoverToSecondaries(namespace, primaries.size())) { log.debug( "Not enough of primaries [{}] available for namespace - [{}], " + "adding shared [{}] as possible candidate owners", primaries.size(), namespace.toString(), shared.size()); finalCandidates.putAll(shared); } return finalCandidates; }
From source file:com.cloudant.sync.datastore.BasicDatastore.java
/** * Removes revisions present in the datastore from the input map. * * @param revisions an multimap from document id to set of revisions. The * map is modified in place for performance consideration. *//* w w w . j a v a 2 s . c o m*/ void revsDiffBatch(Multimap<String, String> revisions) { final String sql = String.format( "SELECT docs.docid, revs.revid FROM docs, revs " + "WHERE docs.doc_id = revs.doc_id AND docs.docid IN (%s) AND revs.revid IN (%s) " + "ORDER BY docs.docid", SQLDatabaseUtils.makePlaceholders(revisions.keySet().size()), SQLDatabaseUtils.makePlaceholders(revisions.size())); String[] args = new String[revisions.keySet().size() + revisions.size()]; String[] keys = revisions.keySet().toArray(new String[revisions.keySet().size()]); String[] values = revisions.values().toArray(new String[revisions.size()]); System.arraycopy(keys, 0, args, 0, revisions.keySet().size()); System.arraycopy(values, 0, args, revisions.keySet().size(), revisions.size()); Cursor cursor = null; try { cursor = this.sqlDb.rawQuery(sql, args); while (cursor.moveToNext()) { String docId = cursor.getString(0); String revId = cursor.getString(1); revisions.remove(docId, revId); } } catch (SQLException e) { e.printStackTrace(); } finally { DatabaseUtils.closeCursorQuietly(cursor); } }
From source file:com.flexive.core.search.genericSQL.GenericSQLDataFilter.java
/** * Builds an 'AND' condition./*from w w w .ja v a 2 s .c o m*/ * * @param sb the StringBuilder to use * @param br the brace * @throws FxSqlSearchException if the build failed */ private void buildAnd(StringBuilder sb, Brace br) throws FxSqlSearchException { // Start AND if (br.size() > 1) { final Multimap<String, ConditionTableInfo> tables = getUsedContentTables(br, true); // for "AND" we can only optimize when ALL flatstorage conditions are not multi-lang and on the same level, // i.e. that table must have exactly one flat-storage entry, and we cannot optimize if an IS NULL is present if (tables.size() == 1 && tables.values().iterator().next().isFlatStorage() && !tables.values().iterator().next().isMultiLang() && !containsIsNullCondition(br)) { sb.append(getOptimizedFlatStorageSubquery(br, tables.keySet().iterator().next(), true)); return; } if (tables.size() == 1 && tables.keys().iterator().next().equals(DatabaseConst.TBL_CONTENT)) { // combine main table selects into a single one sb.append("(SELECT id,ver," + getEmptyLanguage() + " as lang FROM " + DatabaseConst.TBL_CONTENT + " cd" + " WHERE " + getOptimizedMainTableConditions(br, "cd") + ")"); return; } // check if there are two or more flat storage queries in the same level that can be grouped try { final Brace grouped = br.groupConditions(new Brace.GroupFunction() { @Override public Object apply(Condition cond) { try { return getPropertyInfo(cond); } catch (FxSqlSearchException e) { throw e.asRuntimeException(); } } }); if (grouped != br) { // reorg happened - process new version if (LOG.isTraceEnabled()) { LOG.trace("AND statement reorganized, new statement: " + grouped); } buildAnd(sb, grouped); return; } } catch (SqlParserException e) { throw new FxSqlSearchException(e); } } int pos = 0; final StringBuilder combinedConditions = new StringBuilder(); int firstId = -1; for (BraceElement be : br.getElements()) { if (pos == 0) { firstId = be.getId(); // TODO: do we need .lang here? sb.append(("(SELECT tbl" + firstId + ".id,tbl" + firstId + ".ver,tbl" + firstId + ".lang FROM\n")); } else { sb.append(","); combinedConditions.append((pos > 1) ? " AND " : " ").append("tbl").append(firstId).append(".id=tbl") .append(be.getId()).append(".id AND ").append("tbl").append(firstId).append(".ver=tbl") .append(be.getId()).append(".ver AND ").append("(tbl").append(firstId) .append(".lang=0 or tbl").append(firstId).append(".lang IS NULL OR ").append("tbl") .append(be.getId()).append(".lang=0 OR tbl").append(be.getId()).append(".lang IS NULL OR ") .append("tbl").append(firstId).append(".lang=tbl").append(be.getId()).append(".lang)"); } if (be instanceof Condition) { sb.append(getConditionSubQuery(br.getStatement(), (Condition) be)); } else if (be instanceof Brace) { build(sb, (Brace) be); } else { throw new FxSqlSearchException(LOG, "ex.sqlSearch.filter.invalidBrace", be); } sb.append(" tbl").append(be.getId()).append("\n"); pos++; } // Where links the tables together sb.append(" WHERE "); sb.append(combinedConditions); // Close AND sb.append(")"); }
From source file:com.facebook.presto.accumulo.index.IndexLookup.java
private boolean getRangesWithMetrics(ConnectorSession session, String schema, String table, Multimap<AccumuloColumnConstraint, Range> constraintRanges, Collection<Range> rowIdRanges, List<TabletSplitMetadata> tabletSplits, Authorizations auths) throws Exception { String metricsTable = getMetricsTableName(schema, table); long numRows = getNumRowsInTable(metricsTable, auths); // Get the cardinalities from the metrics table Multimap<Long, AccumuloColumnConstraint> cardinalities; if (isIndexShortCircuitEnabled(session)) { cardinalities = cardinalityCache.getCardinalities(schema, table, auths, constraintRanges, (long) (numRows * getIndexSmallCardThreshold(session)), getIndexCardinalityCachePollingDuration(session)); } else {//from w w w . ja v a2 s . co m // disable short circuit using 0 cardinalities = cardinalityCache.getCardinalities(schema, table, auths, constraintRanges, 0, new Duration(0, TimeUnit.MILLISECONDS)); } Optional<Entry<Long, AccumuloColumnConstraint>> entry = cardinalities.entries().stream().findFirst(); if (!entry.isPresent()) { return false; } Entry<Long, AccumuloColumnConstraint> lowestCardinality = entry.get(); String indexTable = getIndexTableName(schema, table); double threshold = getIndexThreshold(session); List<Range> indexRanges; // If the smallest cardinality in our list is above the lowest cardinality threshold, // we should look at intersecting the row ID ranges to try and get under the threshold. if (smallestCardAboveThreshold(session, numRows, lowestCardinality.getKey())) { // If we only have one column, we can skip the intersection process and just check the index threshold if (cardinalities.size() == 1) { long numEntries = lowestCardinality.getKey(); double ratio = ((double) numEntries / (double) numRows); LOG.debug("Use of index would scan %d of %d rows, ratio %s. Threshold %2f, Using for table? %b", numEntries, numRows, ratio, threshold, ratio < threshold); if (ratio >= threshold) { return false; } } // Else, get the intersection of all row IDs for all column constraints LOG.debug("%d indexed columns, intersecting ranges", constraintRanges.size()); indexRanges = getIndexRanges(indexTable, constraintRanges, rowIdRanges, auths); LOG.debug("Intersection results in %d ranges from secondary index", indexRanges.size()); } else { // Else, we don't need to intersect the columns and we can just use the column with the lowest cardinality, // so get all those row IDs in a set of ranges. LOG.debug("Not intersecting columns, using column with lowest cardinality "); ImmutableMultimap.Builder<AccumuloColumnConstraint, Range> lcBldr = ImmutableMultimap.builder(); lcBldr.putAll(lowestCardinality.getValue(), constraintRanges.get(lowestCardinality.getValue())); indexRanges = getIndexRanges(indexTable, lcBldr.build(), rowIdRanges, auths); } if (indexRanges.isEmpty()) { LOG.debug("Query would return no results, returning empty list of splits"); return true; } // Okay, we now check how many rows we would scan by using the index vs. the overall number // of rows long numEntries = indexRanges.size(); double ratio = (double) numEntries / (double) numRows; LOG.debug("Use of index would scan %d of %d rows, ratio %s. Threshold %2f, Using for table? %b", numEntries, numRows, ratio, threshold, ratio < threshold, table); // If the percentage of scanned rows, the ratio, less than the configured threshold if (ratio < threshold) { // Bin the ranges into TabletMetadataSplits and return true to use the tablet splits binRanges(getNumIndexRowsPerSplit(session), indexRanges, tabletSplits); LOG.debug("Number of splits for %s.%s is %d with %d ranges", schema, table, tabletSplits.size(), indexRanges.size()); return true; } else { // We are going to do too much work to use the secondary index, so return false return false; } }
From source file:io.prestosql.plugin.accumulo.index.IndexLookup.java
private boolean getRangesWithMetrics(ConnectorSession session, String schema, String table, Multimap<AccumuloColumnConstraint, Range> constraintRanges, Collection<Range> rowIdRanges, List<TabletSplitMetadata> tabletSplits, Authorizations auths) throws Exception { String metricsTable = getMetricsTableName(schema, table); long numRows = getNumRowsInTable(metricsTable, auths); // Get the cardinalities from the metrics table Multimap<Long, AccumuloColumnConstraint> cardinalities; if (isIndexShortCircuitEnabled(session)) { cardinalities = cardinalityCache.getCardinalities(schema, table, auths, constraintRanges, (long) (numRows * getIndexSmallCardThreshold(session)), getIndexCardinalityCachePollingDuration(session)); } else {// w w w . j a va 2 s .com // disable short circuit using 0 cardinalities = cardinalityCache.getCardinalities(schema, table, auths, constraintRanges, 0, new Duration(0, TimeUnit.MILLISECONDS)); } Optional<Entry<Long, AccumuloColumnConstraint>> entry = cardinalities.entries().stream().findFirst(); if (!entry.isPresent()) { return false; } Entry<Long, AccumuloColumnConstraint> lowestCardinality = entry.get(); String indexTable = getIndexTableName(schema, table); double threshold = getIndexThreshold(session); List<Range> indexRanges; // If the smallest cardinality in our list is above the lowest cardinality threshold, // we should look at intersecting the row ID ranges to try and get under the threshold. if (smallestCardAboveThreshold(session, numRows, lowestCardinality.getKey())) { // If we only have one column, we can skip the intersection process and just check the index threshold if (cardinalities.size() == 1) { long numEntries = lowestCardinality.getKey(); double ratio = ((double) numEntries / (double) numRows); LOG.debug( "Use of index would scan %s of %s rows, ratio %s. Threshold %2f, Using for index table? %s", numEntries, numRows, ratio, threshold, ratio < threshold); if (ratio >= threshold) { return false; } } // Else, get the intersection of all row IDs for all column constraints LOG.debug("%d indexed columns, intersecting ranges", constraintRanges.size()); indexRanges = getIndexRanges(indexTable, constraintRanges, rowIdRanges, auths); LOG.debug("Intersection results in %d ranges from secondary index", indexRanges.size()); } else { // Else, we don't need to intersect the columns and we can just use the column with the lowest cardinality, // so get all those row IDs in a set of ranges. LOG.debug("Not intersecting columns, using column with lowest cardinality "); ImmutableMultimap.Builder<AccumuloColumnConstraint, Range> lcBldr = ImmutableMultimap.builder(); lcBldr.putAll(lowestCardinality.getValue(), constraintRanges.get(lowestCardinality.getValue())); indexRanges = getIndexRanges(indexTable, lcBldr.build(), rowIdRanges, auths); } if (indexRanges.isEmpty()) { LOG.debug("Query would return no results, returning empty list of splits"); return true; } // Okay, we now check how many rows we would scan by using the index vs. the overall number // of rows long numEntries = indexRanges.size(); double ratio = (double) numEntries / (double) numRows; LOG.debug("Use of index would scan %d of %d rows, ratio %s. Threshold %2f, Using for table? %b", numEntries, numRows, ratio, threshold, ratio < threshold, table); // If the percentage of scanned rows, the ratio, less than the configured threshold if (ratio < threshold) { // Bin the ranges into TabletMetadataSplits and return true to use the tablet splits binRanges(getNumIndexRowsPerSplit(session), indexRanges, tabletSplits); LOG.debug("Number of splits for %s.%s is %d with %d ranges", schema, table, tabletSplits.size(), indexRanges.size()); return true; } else { // We are going to do too much work to use the secondary index, so return false return false; } }
From source file:com.vecna.maven.jshint.mojo.JsHintMojo.java
/** * {@inheritDoc}//from w ww . j a v a 2 s . c om */ @Override public void execute() throws MojoExecutionException, MojoFailureException { if (skip) { getLog().info("skipping execution"); } else { String[] sourceFiles = getSourceFiles(); if (sourceFiles.length == 0) { getLog().info("no source files found"); } else { final InputStream jsHintSrc = openClasspathResource(jsHintJS); JsEngine engine; try { engine = new JsEngine().browserEnv().eval(jsHintSrc); } catch (IOException e) { throw new MojoExecutionException("failed to bootstrap JSHint", e); } NativeObject combinedOpts = readOptionsFromFile(engine); NativeObject combinedGlobals = extractGlobals(combinedOpts); addOptions(combinedOpts); addGlobals(combinedGlobals); Function jsHint = (Function) engine.get("JSHINT"); Multimap<String, JsHintError> errors = HashMultimap.create(); for (String srcFile : sourceFiles) { List<String> source; try { source = FileUtils.readLines(new File(srcDirectory, srcFile)); } catch (IOException e) { throw new MojoExecutionException("failed to read " + srcFile, e); } NativeArray array = new NativeArray(source.toArray()); engine.call(jsHint, array, combinedOpts, combinedGlobals); NativeArray nativeErrors = (NativeArray) engine.get(jsHint, "errors"); for (int i = 0; i < nativeErrors.size(); i++) { NativeObject nativeError = (NativeObject) nativeErrors.get(i); JsHintError error = new JsHintError(srcFile, nativeError); // handling the built-in JsHint error limit if (error.getReason().startsWith("Too many errors")) { break; } getLog().error(error.toString()); errors.put(error.getSource(), error); } } try { writeReport(errors); } catch (IOException e) { throw new MojoExecutionException("failed to write the report", e); } if (errors.size() > maxErrorsAllowed) { throw new MojoFailureException( "JSHint violations: " + errors.size() + ". Allowed violations: " + maxErrorsAllowed); } } } }