Example usage for com.google.common.collect Multimap asMap

List of usage examples for com.google.common.collect Multimap asMap

Introduction

In this page you can find the example usage for com.google.common.collect Multimap asMap.

Prototype

Map<K, Collection<V>> asMap();

Source Link

Document

Returns a view of this multimap as a Map from each distinct key to the nonempty collection of that key's associated values.

Usage

From source file:de.hzi.helmholtz.Compare.PathwayComparisonWithModules.java

public Multimap<Double, String> SubsetIdentification(PathwayWithModules firstPathway,
        PathwayWithModules secondPathway, BiMap<Integer, Integer> newSourceGeneIdToPositionMap,
        BiMap<Integer, Integer> newTargetGeneIdToPositionMap, int Yes) {
    Multimap<Double, String> result = TreeMultimap.create(Ordering.natural().reverse(), Ordering.natural());

    Iterator<ModuleGene> sourceGeneIt = firstPathway.moduleGeneIterator();
    int currentQueryGene = 0;
    while (sourceGeneIt.hasNext()) {
        currentQueryGene++;//from   w ww  .  ja va2 s.c o m
        ModuleGene queryGene = sourceGeneIt.next();
        Multimap<Integer, String> resultr = TreeMultimap.create(Ordering.natural(), Ordering.natural());
        int currentTargetGene = 0;
        Multiset<String> qfunction = LinkedHashMultiset.create();
        List<String> qfunctionList = new ArrayList<String>();
        List<String> qactivity = new ArrayList<String>();
        List<Set<String>> qsubstrate = new ArrayList<Set<String>>();
        for (Module m : queryGene.getModule()) {
            for (Domain d : m.getDomains()) {
                qfunction.add(d.getDomainFunctionString());
                qfunctionList.add(d.getDomainFunctionString());
                qactivity.add(d.getStatus().toString());
                qsubstrate.add(d.getSubstrates());
            }
        }
        List<String> TargenesSelected = new ArrayList<String>();
        Iterator<ModuleGene> targetGeneIt = secondPathway.moduleGeneIterator();
        while (targetGeneIt.hasNext()) {
            currentTargetGene++;
            ModuleGene targetGene = targetGeneIt.next();
            Multiset<String> tfunction = LinkedHashMultiset.create();
            List<String> tactivity = new ArrayList<String>();
            List<Set<String>> tsubstrate = new ArrayList<Set<String>>();
            List<String> tfunctionList = new ArrayList<String>();
            Iterator<Module> mIter = targetGene.moduleIterator();
            while (mIter.hasNext()) {
                Module m = mIter.next();
                Iterator<Domain> dIter = m.domainIterator();
                while (dIter.hasNext()) {
                    Domain d = dIter.next();
                    tfunction.add(d.getDomainFunctionString());
                    tfunctionList.add(d.getDomainFunctionString());
                    tactivity.add(d.getStatus().toString());
                    tsubstrate.add(d.getSubstrates());
                }
            }
            Multiset<String> DomainsCovered = Multisets.intersection(qfunction, tfunction);
            int Differences = Math.max(Math.abs(DomainsCovered.size() - tfunction.size()),
                    Math.abs(DomainsCovered.size() - qfunction.size()));
            if (DomainsCovered.size() == tfunction.size() && tfunction.size() > 4) {
                TargenesSelected.add(Integer.toString(currentTargetGene));
            } else {
                resultr.put(Differences, Integer.toString(currentTargetGene));
            }

        }
        int count = 0;
        if (resultr.size() > 0) {
            while (TargenesSelected.size() < 2) {
                Multiset<String> k = LinkedHashMultiset.create(resultr.values());
                Multiset<String> t = LinkedHashMultiset.create(TargenesSelected);
                Multiset<String> Covered = Multisets.intersection(k, t);
                if (Covered.size() == k.size()) {
                    break;
                }

                try {
                    TargenesSelected.addAll(
                            resultr.get(Integer.parseInt(resultr.keySet().toArray()[count].toString())));
                } catch (Exception ds) {
                }
                count = count + 1;
            }
        }
        // //System.out.println(TargenesSelected);
        //  Permutation perm = new Permutation();
        //  List<String> perms = perm.run(TargenesSelected);
        CombinationGenerator c = new CombinationGenerator(10, 10);
        List<String> perms = c.GenerateAllPossibleCombinations(TargenesSelected);
        myFunction sim = new myFunction();
        double score = 0;
        String targetIdentified = "";
        List<ModuleGene> targetGenesList = secondPathway.getModulegenes();
        for (String permu : perms) {
            String[] values = permu.replace("[", "").replace("]", "").split(",");
            List<String> mergedTargetgenes = new ArrayList<String>();
            List<Integer> ToRemove = new ArrayList<Integer>();
            List<String> tactivity = new ArrayList<String>();
            List<Set<String>> tsubstrate = new ArrayList<Set<String>>();
            for (String j : values) {
                ToRemove.add(Integer.parseInt(j.trim()));
                for (Module m : targetGenesList.get(Integer.parseInt(j.trim()) - 1).getModule()) {
                    for (Domain i : m.getDomains()) {
                        mergedTargetgenes.add(i.getDomainFunctionString());
                        tactivity.add(i.getStatus().toString());
                        tsubstrate.add(i.getSubstrates());
                    }
                }
            }
            Multimap<Double, Multimap<String, Integer>> FunctionScores = sim.calculate(qfunctionList,
                    mergedTargetgenes);
            Multimap<Double, Multimap<String, Integer>> activityscores = myFunction.calculate(qactivity,
                    tactivity);
            Multimap<Double, Multimap<String, Integer>> substratescores = myFunction
                    .calculate(getSubstrateList(qsubstrate), getSubstrateList(tsubstrate));
            Object FunctionScore = FunctionScores.asMap().keySet().toArray()[0];
            Object activityScore = activityscores.asMap().keySet().toArray()[0];
            Object substrateScore = substratescores.asMap().keySet().toArray()[0];

            double finalScore = Math
                    .round((((2.9 * Double.parseDouble(FunctionScore.toString().trim()))
                            + (0.05 * Double.parseDouble(activityScore.toString().trim()))
                            + (0.05 * Double.parseDouble(substrateScore.toString().trim()))) / 3) * 100.0)
                    / 100.0;
            targetIdentified = permu.replace(",", "+");
            String ConvertedGeneIDs = "";
            if (Yes == 0) {
                ConvertedGeneIDs = reconstructWithGeneId(Integer.toString(currentQueryGene),
                        newSourceGeneIdToPositionMap) + "->"
                        + reconstructWithGeneId(targetIdentified.replace("[", "").replace("]", ""),
                                newTargetGeneIdToPositionMap);
            } else {
                ConvertedGeneIDs = reconstructWithGeneId(targetIdentified.replace("[", "").replace("]", ""),
                        newTargetGeneIdToPositionMap) + "->"
                        + reconstructWithGeneId(Integer.toString(currentQueryGene),
                                newSourceGeneIdToPositionMap);
            }
            // String ConvertedGeneIDs = reconstructWithGeneId(Integer.toString(currentQueryGene), newSourceGeneIdToPositionMap) + "->" + reconstructWithGeneId(targetIdentified.replace("[", "").replace("]", ""), newTargetGeneIdToPositionMap);

            result.put(finalScore, ConvertedGeneIDs);

            ScoreFunctionMatchMisMatch.putAll(ConvertedGeneIDs, FunctionScores.values());
            ScoreStatusMatchMisMatch.putAll(ConvertedGeneIDs, activityscores.values());
            ScoreSubstrateMatchMisMatch.putAll(ConvertedGeneIDs, substratescores.values());

        }

    }
    return result;
}

From source file:org.eclipse.epp.internal.logging.aeri.ui.model.ErrorAnalyser.java

public Optional<String> computeComment(final List<Bundle> presentBundles, Throwable throwable) {
    if (packageAdmin == null) {
        return absent();
    }//from   ww w . java  2  s  .co  m

    List<String> problematicPackages = extractProblematicPackage(throwable);
    if (problematicPackages.isEmpty()) {
        return absent();
    }

    Set<String> presentBundlesSymbolicNames = Sets.newHashSet();
    for (Bundle presentBundle : presentBundles) {
        presentBundlesSymbolicNames.add(presentBundle.getName());
    }

    StringBuilder comment = new StringBuilder();
    for (String problematicPackage : problematicPackages) {
        comment.append("The problematic package '").append(problematicPackage)
                .append("' may originate in the following bundles:\n");
        ExportedPackage[] exportedPackages = packageAdmin.getExportedPackages(problematicPackage);
        if (ArrayUtils.isEmpty(exportedPackages)) {
            continue;
        }
        Multimap<org.osgi.framework.Bundle, org.osgi.framework.Bundle> exportersToImporters = HashMultimap
                .create();
        for (ExportedPackage exportedPackage : exportedPackages) {
            org.osgi.framework.Bundle exportingBundle = exportedPackage.getExportingBundle();
            if (!isPresent(exportingBundle)) {
                continue;
            }
            for (org.osgi.framework.Bundle importingBundle : exportedPackage.getImportingBundles()) {
                if (!isPresent(importingBundle)) {
                    continue;
                }
                if (presentBundlesSymbolicNames.contains(importingBundle.getSymbolicName())) {
                    exportersToImporters.put(exportingBundle, importingBundle);
                }
            }
        }
        if (exportersToImporters.isEmpty()) {
            continue;
        }

        for (Entry<org.osgi.framework.Bundle, Collection<org.osgi.framework.Bundle>> entry : exportersToImporters
                .asMap().entrySet()) {
            org.osgi.framework.Bundle exporter = entry.getKey();
            Collection<org.osgi.framework.Bundle> importers = entry.getValue();
            comment.append("  ").append(exporter.getSymbolicName()).append(' ').append(exporter.getVersion())
                    .append(", from which the following bundles present on the stack trace import it:\n");
            for (org.osgi.framework.Bundle importer : importers) {
                comment.append("    ").append(importer.getSymbolicName()).append(' ')
                        .append(importer.getVersion()).append('\n');
            }
        }
    }
    return Optional.of(comment.toString());
}

From source file:no.kantega.publishing.jobs.alerts.ExpireContentAlertJob.java

@Scheduled(cron = "${jobs.expirecontent.trigger}")
@DisableOnServertype(ServerType.SLAVE)/*from w  w  w.j a va 2 s  . c o m*/
public void expireContentAlert() {
    try {
        log.info("Looking for content will expire in less than " + daysBeforeWarning + " days");

        List<Site> sites = siteCache.getSites();
        for (Site site : sites) {
            String alias = ".";
            if (site != null && !site.getAlias().equals("/")) {
                alias = site.getAlias();
                alias = alias.replace('/', '.');
            }

            ContentQuery query = new ContentQuery();

            Date fromDate = new Date();
            query.setExpireDateFrom(fromDate);

            Calendar calendar = new GregorianCalendar();
            calendar.add(Calendar.DATE, daysBeforeWarning);
            query.setExpireDateTo(calendar.getTime());
            query.setSiteId(site.getId());
            query.setSortOrder(new SortOrder(ContentProperty.TITLE, false));

            List<Content> contentList = contentAO.getContentList(query, false);

            String defaultUserEmail = config.getString("mail" + alias + "contentexpire.recipient");

            Multimap<String, Content> users = ArrayListMultimap.create();

            // Insert docs into hashmap
            for (Content content : contentList) {

                if (content.getExpireAction() == ExpireAction.REMIND) {
                    String userId;
                    if (defaultUserEmail != null && defaultUserEmail.contains("@")) {
                        userId = defaultUserEmail;
                    } else {
                        if (content.getOwnerPerson() != null && content.getOwnerPerson().length() > 0) {
                            userId = content.getOwnerPerson();
                        } else {
                            userId = content.getModifiedBy();
                        }
                    }
                    if (isNotBlank(userId)) {
                        users.put(userId, content);
                    }
                }
            }

            // Iterate through users
            for (Map.Entry<String, Collection<Content>> entry : users.asMap().entrySet()) {
                String userId = entry.getKey();
                User user = getUser(userId);

                // Send message using listeners
                List<Content> userContentList = new ArrayList<>(entry.getValue());
                if (user != null) {
                    log.info("Sending alert to user " + user.getId() + " - " + userContentList.size()
                            + " docs about to expire");
                    for (ContentAlertListener listener : listeners) {
                        listener.sendContentAlert(user, userContentList);
                    }
                } else {
                    log.info("Skipping alert, user unknown " + userId + " - " + userContentList.size()
                            + " docs about to expire");
                }
            }

        }

    } catch (SystemException e) {
        log.error("", e);
    }

}

From source file:org.apache.hadoop.hbase.index.write.ParallelWriterIndexCommitter.java

@Override
public void write(Multimap<HTableInterfaceReference, Mutation> toWrite)
        throws SingleIndexWriteFailureException {
    /*/*from  w w  w .  j  a  v a 2  s.  co m*/
     * This bit here is a little odd, so let's explain what's going on. Basically, we want to do the
     * writes in parallel to each index table, so each table gets its own task and is submitted to
     * the pool. Where it gets tricky is that we want to block the calling thread until one of two
     * things happens: (1) all index tables get successfully updated, or (2) any one of the index
     * table writes fail; in either case, we should return as quickly as possible. We get a little
     * more complicated in that if we do get a single failure, but any of the index writes hasn't
     * been started yet (its been queued up, but not submitted to a thread) we want to that task to
     * fail immediately as we know that write is a waste and will need to be replayed anyways.
     */

    Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = toWrite.asMap().entrySet();
    TaskBatch<Void> tasks = new TaskBatch<Void>(entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        // get the mutations for each table. We leak the implementation here a little bit to save
        // doing a complete copy over of all the index update for each table.
        final List<Mutation> mutations = (List<Mutation>) entry.getValue();
        final HTableInterfaceReference tableReference = entry.getKey();
        /*
         * Write a batch of index updates to an index table. This operation stops (is cancelable) via
         * two mechanisms: (1) setting aborted or stopped on the IndexWriter or, (2) interrupting the
         * running thread. The former will only work if we are not in the midst of writing the current
         * batch to the table, though we do check these status variables before starting and before
         * writing the batch. The latter usage, interrupting the thread, will work in the previous
         * situations as was at some points while writing the batch, depending on the underlying
         * writer implementation (HTableInterface#batch is blocking, but doesn't elaborate when is
         * supports an interrupt).
         */
        tasks.add(new Task<Void>() {

            /**
             * Do the actual write to the primary table. We don't need to worry about closing the table
             * because that is handled the {@link CachingHTableFactory}.
             */
            @Override
            public Void call() throws Exception {
                // this may have been queued, so another task infront of us may have failed, so we should
                // early exit, if that's the case
                throwFailureIfDone();

                if (LOG.isDebugEnabled()) {
                    LOG.debug("Writing index update:" + mutations + " to table: " + tableReference);
                }
                try {
                    HTableInterface table = factory.getTable(tableReference.get());
                    throwFailureIfDone();
                    table.batch(mutations);
                } catch (SingleIndexWriteFailureException e) {
                    throw e;
                } catch (IOException e) {
                    throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e);
                } catch (InterruptedException e) {
                    // reset the interrupt status on the thread
                    Thread.currentThread().interrupt();
                    throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e);
                }
                return null;
            }

            private void throwFailureIfDone() throws SingleIndexWriteFailureException {
                if (this.isBatchFailed() || Thread.currentThread().isInterrupted()) {
                    throw new SingleIndexWriteFailureException(
                            "Pool closed, not attempting to write to the index!", null);
                }

            }
        });
    }

    // actually submit the tasks to the pool and wait for them to finish/fail
    try {
        pool.submitUninterruptible(tasks);
    } catch (EarlyExitFailure e) {
        propagateFailure(e);
    } catch (ExecutionException e) {
        LOG.error("Found a failed index update!");
        propagateFailure(e.getCause());
    }

}

From source file:com.foundationdb.server.store.OnlineHelper.java

private void buildTableIndexes(final Session session, QueryContext context, StoreAdapter adapter,
        final TransformCache transformCache, Multimap<Group, RowType> tableIndexes) {
    final WriteIndexRow buffer = new WriteIndexRow();
    for (Entry<Group, Collection<RowType>> entry : tableIndexes.asMap().entrySet()) {
        if (entry.getValue().isEmpty()) {
            continue;
        }/*from   w  ww  .  ja  va2 s  .c  o  m*/
        Operator plan = API.filter_Default(API.groupScan_Default(entry.getKey()), entry.getValue());
        runPlan(session, contextIfNull(context, adapter), schemaManager, txnService, plan, new RowHandler() {
            @Override
            public void handleRow(final Row row) {
                TableTransform transform = transformCache.get(row.rowType().typeId());
                simpleCheckConstraints(session, transformCache, row);
                for (final TableIndex index : transform.tableIndexes) {
                    final Key hKey = store.createKey();
                    row.hKey().copyTo(hKey);
                    if (index.isSpatial()) {
                        final SpatialColumnHandler spatialColumnHandler = new SpatialColumnHandler(index);
                        spatialColumnHandler.processSpatialObject(row, new SpatialColumnHandler.Operation() {
                            @Override
                            public void handleZValue(long z) {
                                store.writeIndexRow(session, index, row, hKey, buffer, spatialColumnHandler, z,
                                        true);
                            }
                        });
                    } else {
                        store.writeIndexRow(session, index, row, hKey, buffer, null, -1L, true);
                    }
                }
            }
        });
    }
}

From source file:org.lanternpowered.server.game.registry.type.world.GeneratorTypeRegistryModule.java

/**
 * Post initialize the {@link GeneratorType}s. All the default world generators
 * here be selected by scanning for 'default-world-gen.json' files.
 *///ww  w.j ava 2s.c  o  m
@CustomCatalogRegistration
@DelayedRegistration(RegistrationPhase.POST_INIT)
public void postInit() {
    final Multimap<String, DefaultEntry> entries = HashMultimap.create();
    final Gson gson = new Gson();
    // Scan every plugin
    for (PluginContainer pluginContainer : Sponge.getPluginManager().getPlugins()) {
        final Optional<Asset> optAsset = pluginContainer.getAsset("default-world-gen.json");
        if (optAsset.isPresent()) {
            try {
                final InputStream is = optAsset.get().getUrl().openStream();
                try (BufferedReader reader = new BufferedReader(new InputStreamReader(is))) {
                    final JsonObject json = gson.fromJson(reader, JsonObject.class);
                    for (Map.Entry<String, JsonElement> entry : json.entrySet()) {
                        entries.put(entry.getKey(),
                                new DefaultEntry(pluginContainer, entry.getValue().getAsString()));
                    }
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
    for (Map.Entry<String, Collection<DefaultEntry>> entry : entries.asMap().entrySet()) {
        final String id = entry.getKey();
        if (!getById(id).map(type -> type instanceof DelegateGeneratorType).orElse(false)) {
            Lantern.getLogger().warn("The plugin(s) ({}) attempted to map an unknown id: {}",
                    Arrays.toString(entry.getValue().stream().map(e -> e.pluginContainer.getId()).toArray()),
                    id);
            continue;
        }
        final List<DefaultEntry> possibleEntries = new ArrayList<>();
        for (DefaultEntry entry1 : entry.getValue()) {
            final Optional<GeneratorType> generatorType = getById(entry1.type);
            if (generatorType.isPresent()) {
                possibleEntries.add(entry1);
            } else {
                Lantern.getLogger().warn("The plugin {} attempted to map a missing generator type {} for {}",
                        entry1.pluginContainer.getId(), entry1.type, id);
            }
        }
        if (!possibleEntries.isEmpty()) {
            final DefaultEntry defaultEntry = possibleEntries.get(0);
            if (possibleEntries.size() > 1) {
                Lantern.getLogger().warn("Multiple plugins are mapping {}: {}", id,
                        Arrays.toString(entry.getValue().stream()
                                .map(e -> "\n" + e.pluginContainer.getId() + ": " + e.type).toArray()));
                Lantern.getLogger().warn("The first one will be used.");
            }
            ((DelegateGeneratorType) getById(id).get()).setGeneratorType(getById(defaultEntry.type).get());
            Lantern.getLogger().warn("Successfully registered a generator type mapping: {} from {} for {}",
                    defaultEntry.type, defaultEntry.pluginContainer.getId(), id);
        }
    }
}

From source file:org.glowroot.agent.model.TraceEntryComponent.java

public List<Trace.Entry> toProto(long captureTick,
        Multimap<TraceEntryImpl, TraceEntryImpl> asyncRootTraceEntries) {
    if (captureTick < startTick) {
        return ImmutableList.of();
    }/*from w  w w .j a  va2s. c om*/
    boolean completed = this.completed;
    if (completed && endTick < captureTick) {
        completed = false;
    }
    ListMultimap<TraceEntryImpl, TraceEntryImpl> parentChildMap = ArrayListMultimap.create();
    TraceEntryImpl entry = rootEntry.getNextTraceEntry();
    // filter out entries that started after the capture tick
    // checking completed is short circuit optimization for the common case
    while (entry != null && (completed || Tickers.lessThanOrEqual(entry.getStartTick(), captureTick))) {
        // checkNotNull is safe because only the root entry has null parent
        TraceEntryImpl parentTraceEntry = checkNotNull(entry.getParentTraceEntry());
        parentChildMap.put(parentTraceEntry, entry);
        entry = entry.getNextTraceEntry();
    }
    // merge in async trace entry roots
    for (Entry<TraceEntryImpl, Collection<TraceEntryImpl>> entries : asyncRootTraceEntries.asMap().entrySet()) {
        TraceEntryImpl parentTraceEntry = entries.getKey();
        List<TraceEntryImpl> childTraceEntries = Lists.newArrayList(parentChildMap.get(parentTraceEntry));
        for (TraceEntryImpl asyncRootTraceEntry : entries.getValue()) {
            TraceEntryImpl loopEntry = asyncRootTraceEntry;
            while (loopEntry != null
                    && (completed || Tickers.lessThanOrEqual(loopEntry.getStartTick(), captureTick))) {
                TraceEntryImpl loopParentEntry = loopEntry.getParentTraceEntry();
                if (loopParentEntry == null) {
                    childTraceEntries.add(loopEntry);
                } else {
                    parentChildMap.put(loopParentEntry, loopEntry);
                }
                loopEntry = loopEntry.getNextTraceEntry();
            }
        }
        childTraceEntries = TraceEntryImpl.orderingByStartTick.sortedCopy(childTraceEntries);
        parentChildMap.replaceValues(parentTraceEntry, childTraceEntries);
    }
    return getProtobufChildEntries(rootEntry, parentChildMap, startTick, captureTick);
}

From source file:org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter.java

@Override
public void write(Multimap<HTableInterfaceReference, Mutation> toWrite)
        throws SingleIndexWriteFailureException {
    /*/*from  w  w w.ja v  a 2s  .c  o m*/
     * This bit here is a little odd, so let's explain what's going on. Basically, we want to do the writes in
     * parallel to each index table, so each table gets its own task and is submitted to the pool. Where it gets
     * tricky is that we want to block the calling thread until one of two things happens: (1) all index tables get
     * successfully updated, or (2) any one of the index table writes fail; in either case, we should return as
     * quickly as possible. We get a little more complicated in that if we do get a single failure, but any of the
     * index writes hasn't been started yet (its been queued up, but not submitted to a thread) we want to that task
     * to fail immediately as we know that write is a waste and will need to be replayed anyways.
     */

    Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = toWrite.asMap().entrySet();
    TaskBatch<Void> tasks = new TaskBatch<Void>(entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        // get the mutations for each table. We leak the implementation here a little bit to save
        // doing a complete copy over of all the index update for each table.
        final List<Mutation> mutations = kvBuilder.cloneIfNecessary((List<Mutation>) entry.getValue());
        final HTableInterfaceReference tableReference = entry.getKey();
        final RegionCoprocessorEnvironment env = this.env;
        /*
         * Write a batch of index updates to an index table. This operation stops (is cancelable) via two
         * mechanisms: (1) setting aborted or stopped on the IndexWriter or, (2) interrupting the running thread.
         * The former will only work if we are not in the midst of writing the current batch to the table, though we
         * do check these status variables before starting and before writing the batch. The latter usage,
         * interrupting the thread, will work in the previous situations as was at some points while writing the
         * batch, depending on the underlying writer implementation (HTableInterface#batch is blocking, but doesn't
         * elaborate when is supports an interrupt).
         */
        tasks.add(new Task<Void>() {

            /**
             * Do the actual write to the primary table. We don't need to worry about closing the table because that
             * is handled the {@link CachingHTableFactory}.
             * 
             * @return
             */
            @SuppressWarnings("deprecation")
            @Override
            public Void call() throws Exception {
                // this may have been queued, so another task infront of us may have failed, so we should
                // early exit, if that's the case
                throwFailureIfDone();

                if (LOG.isDebugEnabled()) {
                    LOG.debug("Writing index update:" + mutations + " to table: " + tableReference);
                }
                try {
                    // TODO: Once HBASE-11766 is fixed, reexamine whether this is necessary.
                    // Also, checking the prefix of the table name to determine if this is a local
                    // index is pretty hacky. If we're going to keep this, we should revisit that
                    // as well.
                    try {
                        if (tableReference.getTableName().startsWith(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX)) {
                            Region indexRegion = IndexUtil.getIndexRegion(env);
                            if (indexRegion != null) {
                                throwFailureIfDone();
                                indexRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]),
                                        HConstants.NO_NONCE, HConstants.NO_NONCE);
                                return null;
                            }
                        }
                    } catch (IOException ignord) {
                        // when it's failed we fall back to the standard & slow way
                        if (LOG.isDebugEnabled()) {
                            LOG.debug(
                                    "indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
                                            + ignord);
                        }
                    }
                    HTableInterface table = factory.getTable(tableReference.get());
                    throwFailureIfDone();
                    table.batch(mutations);
                } catch (SingleIndexWriteFailureException e) {
                    throw e;
                } catch (IOException e) {
                    throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e);
                } catch (InterruptedException e) {
                    // reset the interrupt status on the thread
                    Thread.currentThread().interrupt();
                    throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e);
                }
                return null;
            }

            private void throwFailureIfDone() throws SingleIndexWriteFailureException {
                if (this.isBatchFailed() || Thread.currentThread().isInterrupted()) {
                    throw new SingleIndexWriteFailureException(
                            "Pool closed, not attempting to write to the index!", null);
                }

            }
        });
    }

    // actually submit the tasks to the pool and wait for them to finish/fail
    try {
        pool.submitUninterruptible(tasks);
    } catch (EarlyExitFailure e) {
        propagateFailure(e);
    } catch (ExecutionException e) {
        LOG.error("Found a failed index update!");
        propagateFailure(e.getCause());
    }

}

From source file:com.facebook.buck.apple.ApplePackageDescription.java

/**
 * Get the correct package configuration based on the platform flavors of this build target.
 *
 * Validates that all named platforms yields the identical package config.
 *
 * @return If found, a package config for this target.
 * @throws HumanReadableException if there are multiple possible package configs.
 *//*  w w  w  .j a v  a2 s . com*/
private Optional<ApplePackageConfigAndPlatformInfo> getApplePackageConfig(BuildTarget target,
        Function<String, com.facebook.buck.rules.args.Arg> macroExpander) {
    Set<Flavor> platformFlavors = getPlatformFlavorsOrDefault(target);

    // Ensure that different platforms generate the same config.
    // The value of this map is just for error reporting.
    Multimap<Optional<ApplePackageConfigAndPlatformInfo>, Flavor> packageConfigs = MultimapBuilder.hashKeys()
            .arrayListValues().build();

    for (Flavor flavor : platformFlavors) {
        AppleCxxPlatform platform = appleCxxPlatformFlavorDomain.getValue(flavor);
        Optional<ApplePackageConfig> packageConfig = config
                .getPackageConfigForPlatform(platform.getAppleSdk().getApplePlatform());
        packageConfigs.put(packageConfig.isPresent()
                ? Optional
                        .of(ApplePackageConfigAndPlatformInfo.of(packageConfig.get(), macroExpander, platform))
                : Optional.empty(), flavor);
    }

    if (packageConfigs.isEmpty()) {
        return Optional.empty();
    } else if (packageConfigs.keySet().size() == 1) {
        return Iterables.getOnlyElement(packageConfigs.keySet());
    } else {
        throw new HumanReadableException(
                "In target %s: Multi-architecture package has different package configs for targets: %s",
                target.getFullyQualifiedName(), packageConfigs.asMap().values());
    }
}

From source file:com.squareup.wire.schema.Linker.java

/** Validate that the tags of {@code fields} are unique and in range. */
void validateFields(Iterable<Field> fields, ImmutableList<Reserved> reserveds) {
    Multimap<Integer, Field> tagToField = LinkedHashMultimap.create();
    Multimap<String, Field> nameToField = LinkedHashMultimap.create();
    for (Field field : fields) {
        int tag = field.tag();
        if (!Util.isValidTag(tag)) {
            withContext(field).addError("tag is out of range: %s", tag);
        }//from  w  ww .j  a  va 2  s .  c o  m

        for (Reserved reserved : reserveds) {
            if (reserved.matchesTag(tag)) {
                withContext(field).addError("tag %s is reserved (%s)", tag, reserved.location());
            }
            if (reserved.matchesName(field.name())) {
                withContext(field).addError("name '%s' is reserved (%s)", field.name(), reserved.location());
            }
        }

        tagToField.put(tag, field);
        nameToField.put(field.qualifiedName(), field);
    }

    for (Map.Entry<Integer, Collection<Field>> entry : tagToField.asMap().entrySet()) {
        if (entry.getValue().size() > 1) {
            StringBuilder error = new StringBuilder();
            error.append(String.format("multiple fields share tag %s:", entry.getKey()));
            int index = 1;
            for (Field field : entry.getValue()) {
                error.append(String.format("\n  %s. %s (%s)", index++, field.name(), field.location()));
            }
            addError("%s", error);
        }
    }

    for (Collection<Field> collidingFields : nameToField.asMap().values()) {
        if (collidingFields.size() > 1) {
            Field first = collidingFields.iterator().next();
            StringBuilder error = new StringBuilder();
            error.append(String.format("multiple fields share name %s:", first.name()));
            int index = 1;
            for (Field field : collidingFields) {
                error.append(String.format("\n  %s. %s (%s)", index++, field.name(), field.location()));
            }
            addError("%s", error);
        }
    }
}