Example usage for com.google.common.collect Multimap isEmpty

List of usage examples for com.google.common.collect Multimap isEmpty

Introduction

In this page you can find the example usage for com.google.common.collect Multimap isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this multimap contains no key-value pairs.

Usage

From source file:org.artifactory.bintray.BintrayServiceImpl.java

private List<FileInfo> collectArtifactsToPushBasedOnDescriptor(FileInfo jsonFile, BintrayUploadInfo uploadInfo,
        BasicStatusHolder status) {/* ww w  .  ja  va  2s.c  om*/

    List<AqlSearchablePath> artifactPaths = Lists.newArrayList();
    Multimap<String, String> propsToFilterBy = getMapFromUploadInfoMultiSet(uploadInfo.getFilterProps());
    boolean descriptorHasPaths = CollectionUtils.notNullOrEmpty(uploadInfo.getArtifactPaths());
    boolean descriptorHasRelPaths = CollectionUtils.notNullOrEmpty(uploadInfo.getArtifactRelativePaths());

    if (!descriptorHasPaths && !descriptorHasRelPaths) {
        if (propsToFilterBy.isEmpty()) {
            status.status(
                    "The descriptor doesn't contain file paths and no properties to filter by were "
                            + "specified , pushing everything under " + jsonFile.getRepoPath().getParent(),
                    log);
        } else {
            status.status(
                    "The descriptor doesn't contain file paths, pushing everything under "
                            + jsonFile.getRepoPath().getParent() + " , filtered by the properties specified.",
                    log);
        }
        artifactPaths = AqlUtils
                .getSearchablePathForCurrentFolderAndSubfolders(jsonFile.getRepoPath().getParent());
    } else {
        try {
            if (descriptorHasPaths) {
                artifactPaths = AqlSearchablePath.fullPathToSearchablePathList(uploadInfo.getArtifactPaths());
            }
            if (descriptorHasRelPaths) {
                artifactPaths.addAll(AqlSearchablePath.relativePathToSearchablePathList(
                        uploadInfo.getArtifactRelativePaths(), jsonFile.getRepoPath().getParent()));
            }
        } catch (IllegalArgumentException iae) {
            status.error("Paths in the descriptor must point to a file or use a valid wildcard that denotes "
                    + "several files (i.e. /*.*)", SC_BAD_REQUEST, iae, log);
            return null;
        }
    }
    List<FileInfo> artifactsToPush = collectArtifactItemInfos(artifactPaths, propsToFilterBy);
    filterOutJsonFileFromArtifactsToPush(artifactsToPush, jsonFile.getRepoPath(), status);

    //aql search returned no artifacts for query
    if (CollectionUtils.isNullOrEmpty(artifactsToPush)) {
        status.error("No artifacts found to push to Bintray, aborting operation", SC_NOT_FOUND, log);
    }
    return artifactsToPush;
}

From source file:com.bigdata.dastor.service.StorageService.java

private void unbootstrap(final Runnable onFinish) {
    final CountDownLatch latch = new CountDownLatch(DatabaseDescriptor.getNonSystemTables().size());
    for (final String table : DatabaseDescriptor.getNonSystemTables()) {
        Multimap<Range, InetAddress> rangesMM = getChangedRangesForLeaving(table,
                FBUtilities.getLocalAddress());
        if (logger_.isDebugEnabled())
            logger_.debug("Ranges needing transfer are [" + StringUtils.join(rangesMM.keySet(), ",") + "]");
        if (rangesMM.isEmpty()) {
            latch.countDown();//from w w  w .ja v a 2 s  .c om
            continue;
        }

        setMode("Leaving: streaming data to other nodes", true);
        final Set<Map.Entry<Range, InetAddress>> pending = Collections
                .synchronizedSet(new HashSet<Map.Entry<Range, InetAddress>>(rangesMM.entries()));
        for (final Map.Entry<Range, InetAddress> entry : rangesMM.entries()) {
            final Range range = entry.getKey();
            final InetAddress newEndpoint = entry.getValue();
            final Runnable callback = new Runnable() {
                public void run() {
                    pending.remove(entry);
                    if (pending.isEmpty())
                        latch.countDown();
                }
            };
            StageManager.getStage(StageManager.STREAM_STAGE).execute(new Runnable() {
                public void run() {
                    // TODO each call to transferRanges re-flushes, this is potentially a lot of waste
                    StreamOut.transferRanges(newEndpoint, table, Arrays.asList(range), callback);
                }
            });
        }
    }

    // wait for the transfer runnables to signal the latch.
    logger_.debug("waiting for stream aks.");
    try {
        latch.await();
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
    logger_.debug("stream acks all received.");
    leaveRing();
    onFinish.run();
}

From source file:com.palantir.atlasdb.keyvalue.partition.map.DynamicPartitionMapImpl.java

private <ValType> Map<KeyValueEndpoint, Multimap<Cell, ValType>> getServicesForCellsMultimap(String tableName,
        Multimap<Cell, ValType> cellMultimap, boolean isWrite) {
    Map<KeyValueEndpoint, Multimap<Cell, ValType>> result = Maps.newHashMap();
    for (Map.Entry<Cell, ValType> e : cellMultimap.entries()) {
        Set<KeyValueEndpoint> services = getServicesHavingRow(e.getKey().getRowName(), isWrite);
        for (KeyValueEndpoint kvs : services) {
            if (!result.containsKey(kvs)) {
                result.put(kvs, HashMultimap.<Cell, ValType>create());
            }/*  ww  w  .  ja v  a  2 s. c  om*/
            assert !result.get(kvs).containsEntry(e.getKey(), e.getValue());
            result.get(kvs).put(e.getKey(), e.getValue());
        }
    }
    if (!cellMultimap.isEmpty()) {
        assert result.keySet().size() >= quorumParameters.getReplicationFactor();
    }
    return result;
}

From source file:com.google.devtools.build.lib.analysis.BuildView.java

/**
 * <p>If {@link BuildConfiguration.Options#trimConfigurations()} is true, transforms a collection
 * of <Target, Configuration> pairs by trimming each target's
 * configuration to only the fragments the target and its transitive dependencies need.
 *
 * <p>Else returns configurations that unconditionally include all fragments.
 *
 * <p>Preserves the original input order. Uses original (untrimmed) configurations for targets
 * that can't be evaluated (e.g. due to loading phase errors).
 *
 * <p>This is suitable for feeding {@link ConfiguredTargetValue} keys: as general principle
 * {@link ConfiguredTarget}s should have exactly as much information in their configurations as
 * they need to evaluate and no more (e.g. there's no need for Android settings in a C++
 * configured target)./*from  w  w w .j  av  a 2 s  .com*/
 */
// TODO(bazel-team): error out early for targets that fail - untrimmed configurations should
// never make it through analysis (and especially not seed ConfiguredTargetValues)
private List<TargetAndConfiguration> getDynamicConfigurations(Iterable<TargetAndConfiguration> inputs,
        EventHandler eventHandler) throws InterruptedException {
    Map<Label, Target> labelsToTargets = new LinkedHashMap<>();
    // We'll get the configs from SkyframeExecutor#getConfigurations, which gets configurations
    // for deps including transitions. So to satisfy its API we repackage each target as a
    // Dependency with a NONE transition.
    Multimap<BuildConfiguration, Dependency> asDeps = ArrayListMultimap
            .<BuildConfiguration, Dependency>create();

    for (TargetAndConfiguration targetAndConfig : inputs) {
        labelsToTargets.put(targetAndConfig.getLabel(), targetAndConfig.getTarget());
        if (targetAndConfig.getConfiguration() != null) {
            asDeps.put(targetAndConfig.getConfiguration(),
                    Dependency.withTransitionAndAspects(targetAndConfig.getLabel(),
                            Attribute.ConfigurationTransition.NONE,
                            // TODO(bazel-team): support top-level aspects
                            ImmutableSet.<AspectDescriptor>of()));
        }
    }

    // Maps <target, originalConfig> pairs to <target, dynamicConfig> pairs for targets that
    // could be successfully Skyframe-evaluated.
    Map<TargetAndConfiguration, TargetAndConfiguration> successfullyEvaluatedTargets = new LinkedHashMap<>();
    if (!asDeps.isEmpty()) {
        for (BuildConfiguration fromConfig : asDeps.keySet()) {
            Multimap<Dependency, BuildConfiguration> trimmedTargets = skyframeExecutor
                    .getConfigurations(eventHandler, fromConfig.getOptions(), asDeps.get(fromConfig));
            for (Map.Entry<Dependency, BuildConfiguration> trimmedTarget : trimmedTargets.entries()) {
                Target target = labelsToTargets.get(trimmedTarget.getKey().getLabel());
                successfullyEvaluatedTargets.put(new TargetAndConfiguration(target, fromConfig),
                        new TargetAndConfiguration(target, trimmedTarget.getValue()));
            }
        }
    }

    ImmutableList.Builder<TargetAndConfiguration> result = ImmutableList.<TargetAndConfiguration>builder();
    for (TargetAndConfiguration originalInput : inputs) {
        if (successfullyEvaluatedTargets.containsKey(originalInput)) {
            // The configuration was successfully trimmed.
            result.add(successfullyEvaluatedTargets.get(originalInput));
        } else {
            // Either the configuration couldn't be determined (e.g. loading phase error) or it's null.
            result.add(originalInput);
        }
    }
    return result.build();
}

From source file:forge.learnedai.ComputerUtil.java

public static Object vote(Player ai, List<Object> options, SpellAbility sa, Multimap<Object, Player> votes) {
    if (!sa.hasParam("AILogic")) {
        return Aggregates.random(options);
    } else {//from  w  ww .  j  ava2 s  .  co  m
        String logic = sa.getParam("AILogic");
        switch (logic) {
        case "Torture":
            return "Torture";
        case "GraceOrCondemnation":
            return ai.getCreaturesInPlay().size() > ai.getOpponent().getCreaturesInPlay().size() ? "Grace"
                    : "Condemnation";
        case "CarnageOrHomage":
            CardCollection cardsInPlay = CardLists
                    .getNotType(sa.getHostCard().getGame().getCardsIn(ZoneType.Battlefield), "Land");
            CardCollection humanlist = CardLists.filterControlledBy(cardsInPlay, ai.getOpponents());
            CardCollection computerlist = CardLists.filterControlledBy(cardsInPlay, ai);
            return (ComputerUtilCard.evaluatePermanentList(computerlist) + 3) < ComputerUtilCard
                    .evaluatePermanentList(humanlist) ? "Carnage" : "Homage";
        case "Judgment":
            if (votes.isEmpty()) {
                CardCollection list = new CardCollection();
                for (Object o : options) {
                    if (o instanceof Card) {
                        list.add((Card) o);
                    }
                }
                return ComputerUtilCard.getBestAI(list);
            } else {
                return Iterables.getFirst(votes.keySet(), null);
            }
        case "Protection":
            if (votes.isEmpty()) {
                List<String> restrictedToColors = new ArrayList<String>();
                for (Object o : options) {
                    if (o instanceof String) {
                        restrictedToColors.add((String) o);
                    }
                }
                CardCollection lists = CardLists.filterControlledBy(ai.getGame().getCardsInGame(),
                        ai.getOpponents());
                return StringUtils
                        .capitalize(ComputerUtilCard.getMostProminentColor(lists, restrictedToColors));
            } else {
                return Iterables.getFirst(votes.keySet(), null);
            }
        default:
            return Iterables.getFirst(options, null);
        }
    }
}

From source file:com.facebook.presto.metadata.MetadataManager.java

@Override
public final void verifyComparableOrderableContract() {
    Multimap<Type, OperatorType> missingOperators = HashMultimap.create();
    for (Type type : typeManager.getTypes()) {
        if (type.isComparable()) {
            if (!functions.canResolveOperator(HASH_CODE, BIGINT, ImmutableList.of(type))) {
                missingOperators.put(type, HASH_CODE);
            }//  ww w  .j  av  a2  s  .c o  m
            if (!functions.canResolveOperator(EQUAL, BOOLEAN, ImmutableList.of(type, type))) {
                missingOperators.put(type, EQUAL);
            }
            if (!functions.canResolveOperator(NOT_EQUAL, BOOLEAN, ImmutableList.of(type, type))) {
                missingOperators.put(type, NOT_EQUAL);
            }
        }
        if (type.isOrderable()) {
            for (OperatorType operator : ImmutableList.of(LESS_THAN, LESS_THAN_OR_EQUAL, GREATER_THAN,
                    GREATER_THAN_OR_EQUAL)) {
                if (!functions.canResolveOperator(operator, BOOLEAN, ImmutableList.of(type, type))) {
                    missingOperators.put(type, operator);
                }
            }
            if (!functions.canResolveOperator(BETWEEN, BOOLEAN, ImmutableList.of(type, type, type))) {
                missingOperators.put(type, BETWEEN);
            }
        }
    }
    // TODO: verify the parametric types too
    if (!missingOperators.isEmpty()) {
        List<String> messages = new ArrayList<>();
        for (Type type : missingOperators.keySet()) {
            messages.add(format("%s missing for %s", missingOperators.get(type), type));
        }
        throw new IllegalStateException(Joiner.on(", ").join(messages));
    }
}

From source file:com.google.devtools.build.lib.analysis.constraints.TopLevelConstraintSemantics.java

/**
 * Checks that if this is an environment-restricted build, all top-level targets support expected
 * top-level environments. Expected top-level environments can be declared explicitly through
 * {@code --target_environment} or implicitly through {@code --auto_cpu_environment_group}. For
 * the latter, top-level targets must be compatible with the build's target configuration CPU.
 *
 * <p>If any target doesn't support an explicitly expected environment declared through {@link
 * BuildConfiguration.Options#targetEnvironments}, the entire build fails with an error.
 *
 * <p>If any target doesn't support an implicitly expected environment declared through {@link
 * BuildConfiguration.Options#autoCpuEnvironmentGroup}, the target is skipped during execution
 * while remaining targets execute as normal.
 *
 * @param topLevelTargets the build's top-level targets
 * @param packageManager object for retrieving loaded targets
 * @param eventHandler the build's event handler
 * @return the set of bad top-level targets.
 * @throws ViewCreationFailedException if any target doesn't support an explicitly expected
 *     environment declared through {@link BuildConfiguration.Options#targetEnvironments}
 *//*from   ww w  . j  a  v a 2s.co  m*/
public static Set<ConfiguredTarget> checkTargetEnvironmentRestrictions(
        Iterable<ConfiguredTarget> topLevelTargets, PackageManager packageManager,
        ExtendedEventHandler eventHandler) throws ViewCreationFailedException, InterruptedException {
    ImmutableSet.Builder<ConfiguredTarget> badTargets = ImmutableSet.builder();
    // Maps targets that are missing *explicitly* required environments to the set of environments
    // they're missing. These targets trigger a ViewCreationFailedException, which halts the build.
    // Targets with missing *implicitly* required environments don't belong here, since the build
    // continues while skipping them.
    Multimap<ConfiguredTarget, Label> exceptionInducingTargets = ArrayListMultimap.create();
    for (ConfiguredTarget topLevelTarget : topLevelTargets) {
        BuildConfiguration config = topLevelTarget.getConfiguration();
        boolean failBuildIfTargetIsBad = true;
        if (config == null) {
            // TODO(bazel-team): support file targets (they should apply package-default constraints).
            continue;
        } else if (!config.enforceConstraints()) {
            continue;
        }

        List<Label> targetEnvironments = config.getTargetEnvironments();
        if (targetEnvironments.isEmpty()) {
            try {
                targetEnvironments = autoConfigureTargetEnvironments(config,
                        config.getAutoCpuEnvironmentGroup(), packageManager, eventHandler);
                failBuildIfTargetIsBad = false;
            } catch (NoSuchPackageException | NoSuchTargetException
                    | ConstraintSemantics.EnvironmentLookupException e) {
                throw new ViewCreationFailedException("invalid target environment", e);
            }
        }

        if (targetEnvironments.isEmpty()) {
            continue;
        }

        // Parse and collect this configuration's environments.
        EnvironmentCollection.Builder builder = new EnvironmentCollection.Builder();
        for (Label envLabel : targetEnvironments) {
            try {
                Target env = packageManager.getTarget(eventHandler, envLabel);
                builder.put(ConstraintSemantics.getEnvironmentGroup(env), envLabel);
            } catch (NoSuchPackageException | NoSuchTargetException
                    | ConstraintSemantics.EnvironmentLookupException e) {
                throw new ViewCreationFailedException("invalid target environment", e);
            }
        }
        EnvironmentCollection expectedEnvironments = builder.build();

        // Now check the target against those environments.
        TransitiveInfoCollection asProvider;
        if (topLevelTarget instanceof OutputFileConfiguredTarget) {
            asProvider = ((OutputFileConfiguredTarget) topLevelTarget).getGeneratingRule();
        } else {
            asProvider = topLevelTarget;
        }
        SupportedEnvironmentsProvider provider = Verify
                .verifyNotNull(asProvider.getProvider(SupportedEnvironmentsProvider.class));
        Collection<Label> missingEnvironments = ConstraintSemantics
                .getUnsupportedEnvironments(provider.getRefinedEnvironments(), expectedEnvironments);
        if (!missingEnvironments.isEmpty()) {
            badTargets.add(topLevelTarget);
            if (failBuildIfTargetIsBad) {
                exceptionInducingTargets.putAll(topLevelTarget, missingEnvironments);
            }
        }
    }

    if (!exceptionInducingTargets.isEmpty()) {
        throw new ViewCreationFailedException(getBadTargetsUserMessage(exceptionInducingTargets));
    }
    return ImmutableSet.copyOf(badTargets.build());
}

From source file:org.apache.cassandra.service.StorageService.java

/**
 * Used to request ranges from endpoints in the ring (will block until all data is fetched and ready)
 * @param ranges ranges to fetch as map of the preferred address and range collection
 * @return latch to count down/*from  www. j  a  va 2  s .c  om*/
 */
private CountDownLatch requestRanges(final Map<String, Multimap<InetAddress, Range>> ranges) {
    final CountDownLatch latch = new CountDownLatch(ranges.keySet().size());
    for (final String table : ranges.keySet()) {
        Multimap<InetAddress, Range> endpointWithRanges = ranges.get(table);

        if (endpointWithRanges.isEmpty()) {
            latch.countDown();
            continue;
        }

        final Set<InetAddress> pending = new HashSet<InetAddress>(endpointWithRanges.keySet());

        // Send messages to respective folks to stream data over to me
        for (final InetAddress source : endpointWithRanges.keySet()) {
            Collection<Range> toFetch = endpointWithRanges.get(source);

            final Runnable callback = new Runnable() {
                public void run() {
                    pending.remove(source);

                    if (pending.isEmpty())
                        latch.countDown();
                }
            };

            if (logger_.isDebugEnabled())
                logger_.debug("Requesting from " + source + " ranges " + StringUtils.join(toFetch, ", "));

            // sending actual request
            StreamIn.requestRanges(source, table, toFetch, callback, OperationType.BOOTSTRAP);
        }
    }
    return latch;
}

From source file:org.apache.cassandra.service.StorageService.java

/**
 * Seed data to the endpoints that will be responsible for it at the future
 *
 * @param rangesToStreamByTable tables and data ranges with endpoints included for each
 * @return latch to count down//w  w  w  .ja  v  a2  s  .  co  m
 */
private CountDownLatch streamRanges(final Map<String, Multimap<Range, InetAddress>> rangesToStreamByTable) {
    final CountDownLatch latch = new CountDownLatch(rangesToStreamByTable.keySet().size());
    for (final String table : rangesToStreamByTable.keySet()) {
        Multimap<Range, InetAddress> rangesWithEndpoints = rangesToStreamByTable.get(table);

        if (rangesWithEndpoints.isEmpty()) {
            latch.countDown();
            continue;
        }

        final Set<Map.Entry<Range, InetAddress>> pending = new HashSet<Map.Entry<Range, InetAddress>>(
                rangesWithEndpoints.entries());

        for (final Map.Entry<Range, InetAddress> entry : rangesWithEndpoints.entries()) {
            final Range range = entry.getKey();
            final InetAddress newEndpoint = entry.getValue();

            final Runnable callback = new Runnable() {
                public void run() {
                    synchronized (pending) {
                        pending.remove(entry);

                        if (pending.isEmpty())
                            latch.countDown();
                    }
                }
            };

            StageManager.getStage(Stage.STREAM).execute(new Runnable() {
                public void run() {
                    // TODO each call to transferRanges re-flushes, this is potentially a lot of waste
                    StreamOut.transferRanges(newEndpoint, table, Arrays.asList(range), callback,
                            OperationType.UNBOOTSTRAP);
                }
            });
        }
    }
    return latch;
}

From source file:org.zanata.magpie.service.PersistentTranslationService.java

/**
 * Translate multiple string in an api trigger
 *
 * Get from database if exists (hash) from same document,
 * if not exist, get latest TF from DB with matching hash,
 * else from MT engine//from  ww w.  j  a  va 2s . c om
 */
@Transactional
public List<String> translate(@NotNull Document document, @NotNull List<String> sourceStrings,
        @NotNull Locale fromLocale, @NotNull Locale toLocale, @NotNull BackendID backendID,
        @NotNull StringType stringType, Optional<String> category) throws BadRequestException, MTException {
    // fetch the text flows for later (as part of this new transaction)
    document = documentDAO.reload(document);
    document.getTextFlows();
    if (sourceStrings == null || sourceStrings.isEmpty() || fromLocale == null || toLocale == null
            || backendID == null) {
        throw new BadRequestException();
    }
    if (!authenticatedAccount.hasAuthenticatedAccount()) {
        throw new MTException("not authenticated account trying to trigger MT translation");
    }

    // get translator backend for MT engine by requested backend id
    TranslatorBackend translatorBackend = getTranslatorBackend(backendID);

    BackendLocaleCode mappedFromLocaleCode = translatorBackend.getMappedLocale(fromLocale.getLocaleCode());
    BackendLocaleCode mappedToLocaleCode = translatorBackend.getMappedLocale(toLocale.getLocaleCode());

    List<String> results = new ArrayList<>(sourceStrings);
    Multimap<String, Integer> untranslatedIndexMap = ArrayListMultimap.create();

    Map<Integer, TextFlow> indexTextFlowMap = Maps.newHashMap();

    // search from database
    int matchCount = 0;
    for (int sourceStringIndex = 0; sourceStringIndex < sourceStrings.size(); sourceStringIndex++) {
        String string = sourceStrings.get(sourceStringIndex);
        String contentHash = HashUtil.generateHash(string);
        Optional<TextFlow> matchedHashTf = tryFindTextFlowByContentHashFromDB(document, fromLocale, toLocale,
                backendID, contentHash);

        if (matchedHashTf.isPresent()) {
            // we found a matching text flow in database
            // now check to see if it has translation from the same provider
            TextFlow matchedTf = matchedHashTf.get();
            Optional<TextFlowTarget> matchedTarget = findTargetByLocaleAndProvider(toLocale, backendID,
                    matchedTf);

            if (matchedTarget.isPresent()) {
                TextFlowTarget matchedEntity = matchedTarget.get();
                matchCount++;
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Found match, Source {}:{}:{}\nTranslation {}:{}", fromLocale.getLocaleCode(),
                            document.getUrl(), ShortString.shorten(string), toLocale.getLocaleCode(),
                            ShortString.shorten(matchedEntity.getContent()));
                }

                results.set(sourceStringIndex, matchedEntity.getContent());
            } else {
                untranslatedIndexMap.put(string, sourceStringIndex);
                indexTextFlowMap.put(sourceStringIndex, matchedTf);
            }
        } else {
            untranslatedIndexMap.put(string, sourceStringIndex);
        }
    }
    LOG.info("found {} of match sources and translations in database", matchCount);

    // see if we got all translations from database records
    if (untranslatedIndexMap.isEmpty()) {
        return results;
    }

    // translate using requested MT engine
    List<String> sourcesToTranslate = new ArrayList<>(untranslatedIndexMap.keySet());
    Date engineInvokeTime = new Date();
    List<AugmentedTranslation> translations = translatorBackend.translate(sourcesToTranslate,
            mappedFromLocaleCode, mappedToLocaleCode, stringType, category);

    LOG.info("triggered MT engine {} from {} to {}", backendID, fromLocale.getLocaleCode(),
            toLocale.getLocaleCode());

    List<String> requestedTextFlows = Lists.newLinkedList();
    long wordCount = 0;
    long charCount = 0;
    for (int i = 0; i < sourcesToTranslate.size(); i++) {
        String source = sourcesToTranslate.get(i);
        AugmentedTranslation translation = translations.get(i);
        // same string may appear several times in a document therefore has several indexes
        Collection<Integer> indexes = untranslatedIndexMap.get(source);
        indexes.forEach(j -> results.set(j, translation.getPlainTranslation()));

        // see if we already have a matched text flow
        // (either in the same document or copied from other document)
        TextFlow tf = indexTextFlowMap.get(indexes.iterator().next());

        try {
            if (tf == null) {
                tf = createTextFlow(document, source, fromLocale);
            }
            wordCount += tf.getWordCount();
            charCount += tf.getCharCount();
            requestedTextFlows.add(tf.getContentHash());
            TextFlowTarget target = new TextFlowTarget(translation.getPlainTranslation(),
                    translation.getRawTranslation(), tf, toLocale, backendID);
            createOrUpdateTextFlowTarget(target);
        } catch (Exception e) {
            List<Throwable> causalChain = getThrowableList(e);
            Optional<Throwable> duplicateKeyEx = causalChain.stream()
                    .filter(t -> t instanceof SQLException && t.getMessage() != null
                            && t.getMessage().contains("duplicate key value violates unique constraint"))
                    .findAny();
            if (duplicateKeyEx.isPresent()) {
                LOG.warn("concurrent requests for document {}", document.getUrl());
                // we ignore the failed update
                // TODO prevent duplicates from reaching DB: ZNTAMT-51
            }
        }
    }
    requestedMTEvent.fire(new RequestedMTEvent(document, requestedTextFlows, backendID, engineInvokeTime,
            authenticatedAccount.getAuthenticatedAccount().get(), wordCount, charCount));

    return results;
}