Example usage for com.google.common.collect Multimap asMap

List of usage examples for com.google.common.collect Multimap asMap

Introduction

In this page you can find the example usage for com.google.common.collect Multimap asMap.

Prototype

Map<K, Collection<V>> asMap();

Source Link

Document

Returns a view of this multimap as a Map from each distinct key to the nonempty collection of that key's associated values.

Usage

From source file:co.cask.http.BasicHttpResponder.java

@Override
public void sendFile(File file, @Nullable Multimap<String, String> headers) {
    Preconditions.checkArgument(responded.compareAndSet(false, true), "Response has been already sent");
    HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);

    response.setHeader(HttpHeaders.Names.CONTENT_LENGTH, file.length());

    if (keepAlive) {
        response.setHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
    }/*from   w w w. ja v a  2s.  co  m*/

    // Add headers, note will override all headers set by the framework
    if (headers != null) {
        for (Map.Entry<String, Collection<String>> entry : headers.asMap().entrySet()) {
            response.setHeader(entry.getKey(), entry.getValue());
        }
    }

    // Write the initial line and the header.
    channel.write(response);

    // Write the content.

    ChannelFuture writeFuture;
    try {
        FileChannel fc = new RandomAccessFile(file, "r").getChannel();

        final FileRegion region = new DefaultFileRegion(fc, 0, file.length());
        writeFuture = channel.write(region);
        writeFuture.addListener(new ChannelFutureProgressListener() {
            public void operationComplete(ChannelFuture future) {
                region.releaseExternalResources();
                if (!keepAlive) {
                    channel.close();
                }
            }

            @Override
            public void operationProgressed(ChannelFuture future, long amount, long current, long total)
                    throws Exception {
                // no-op
            }
        });

    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}

From source file:org.dllearner.utilities.owl.OWLClassExpressionMinimizer.java

@Override
public OWLClassExpression visit(OWLObjectIntersectionOf ce) {
    List<OWLClassExpression> operands = ce.getOperandsAsList();
    //replace operands by the short form
    for (int i = 0; i < operands.size(); i++) {
        operands.set(i, operands.get(i).accept(this));
    }/*from   w  w w.j ava2s .  c  o m*/

    List<OWLClassExpression> oldOperands = new ArrayList<>(new TreeSet<>(operands));
    List<OWLClassExpression> newOperands = new ArrayList<>(operands);

    if (newOperands.size() == 1) {
        return newOperands.iterator().next().accept(this);
    }

    for (int i = 0; i < oldOperands.size(); i++) {
        OWLClassExpression op1 = oldOperands.get(i);
        for (int j = i + 1; j < oldOperands.size(); j++) {
            OWLClassExpression op2 = oldOperands.get(j);

            //remove operand if it is a super class
            if (isSubClassOf(op1, op2)) {
                newOperands.remove(op2);
            } else if (isSubClassOf(op2, op1)) {
                newOperands.remove(op1);
            }
        }
    }

    // combine facet restrictions with same p
    Multimap<OWLDataPropertyExpression, OWLDataSomeValuesFrom> map = HashMultimap.create();
    for (OWLClassExpression operand : newOperands) {
        if (operand instanceof OWLDataSomeValuesFrom) {
            map.put(((OWLDataSomeValuesFrom) operand).getProperty(), (OWLDataSomeValuesFrom) operand);
        }
    }
    for (Entry<OWLDataPropertyExpression, Collection<OWLDataSomeValuesFrom>> entry : map.asMap().entrySet()) {
        OWLDataPropertyExpression dp = entry.getKey();
        Collection<OWLDataSomeValuesFrom> datapropertyRestrictions = entry.getValue();

        if (datapropertyRestrictions.size() > 1) {
            Set<OWLFacetRestriction> facetRestrictions = new TreeSet<>();
            for (OWLDataSomeValuesFrom restriction : datapropertyRestrictions) {
                OWLDataRange dataRange = restriction.getFiller();
                if (dataRange instanceof OWLDatatypeRestriction) {
                    facetRestrictions.addAll(((OWLDatatypeRestriction) dataRange).getFacetRestrictions());
                }
            }
            if (facetRestrictions.size() > 1) {
                OWLDatatype datatype = ((OWLDatatypeRestriction) datapropertyRestrictions.iterator().next()
                        .getFiller()).getDatatype();
                OWLDataRange newDataRange = df.getOWLDatatypeRestriction(datatype, facetRestrictions);
                OWLClassExpression newRestriction = df.getOWLDataSomeValuesFrom(dp, newDataRange);
                newOperands.removeAll(datapropertyRestrictions);
                newOperands.add(newRestriction);
            }
        }
    }

    if (newOperands.size() == 1) {
        return newOperands.iterator().next().accept(this);
    }

    return df.getOWLObjectIntersectionOf(new HashSet<>(newOperands));
}

From source file:org.apache.aurora.scheduler.thrift.ReadOnlySchedulerImpl.java

private Map<IJobKey, IJobConfiguration> getJobs(Optional<String> ownerRole,
        Multimap<IJobKey, IScheduledTask> tasks) {

    // We need to synthesize the JobConfiguration from the the current tasks because the
    // ImmediateJobManager doesn't store jobs directly and ImmediateJobManager#getJobs always
    // returns an empty Collection.
    Map<IJobKey, IJobConfiguration> jobs = Maps.newHashMap();

    jobs.putAll(Maps.transformEntries(tasks.asMap(), (jobKey, tasks1) -> {

        // Pick the latest transitioned task for each immediate job since the job can be in the
        // middle of an update or some shards have been selectively created.
        TaskConfig mostRecentTaskConfig = Tasks.getLatestActiveTask(tasks1).getAssignedTask().getTask()
                .newBuilder();//from   w ww. j  a  v a 2  s  . co  m

        return IJobConfiguration.build(
                new JobConfiguration().setKey(jobKey.newBuilder()).setOwner(mostRecentTaskConfig.getOwner())
                        .setTaskConfig(mostRecentTaskConfig).setInstanceCount(tasks1.size()));
    }));

    // Get cron jobs directly from the manager. Do this after querying the task store so the real
    // template JobConfiguration for a cron job will overwrite the synthesized one that could have
    // been created above.
    Predicate<IJobConfiguration> configFilter = ownerRole.isPresent()
            ? Predicates.compose(Predicates.equalTo(ownerRole.get()), JobKeys::getRole)
            : Predicates.alwaysTrue();
    jobs.putAll(Maps.uniqueIndex(FluentIterable.from(Storage.Util.fetchCronJobs(storage)).filter(configFilter),
            IJobConfiguration::getKey));

    return jobs;
}

From source file:org.jboss.hal.core.modelbrowser.ReadChildren.java

@Override
@SuppressWarnings("unchecked")
public void load(final Node<Context> node, final ResultCallback<Context> callback) {
    if (node.data.isFullyQualified()) {
        Operation operation = new Operation.Builder(node.data.getAddress(), READ_CHILDREN_TYPES_OPERATION)
                .param(INCLUDE_SINGLETONS, true).build();
        dispatcher.execute(operation, result -> {
            List<ModelNode> modelNodes = result.asList();
            Multimap<String, String> resources = HashMultimap.create();
            for (ModelNode modelNode : modelNodes) {
                String name = modelNode.asString();
                if (name.contains("=")) {
                    List<String> parts = Splitter.on('=').limit(2).splitToList(name);
                    resources.put(parts.get(0), parts.get(1));
                } else {
                    resources.put(name, NO_SINGLETON);
                }/*from  ww  w.  j  a v a  2s . c om*/
            }

            List<Node<Context>> children = new ArrayList<>();
            for (Map.Entry<String, Collection<String>> entry : resources.asMap().entrySet()) {
                String name = entry.getKey();
                Set<String> singletons = new HashSet<>(entry.getValue());
                if (singletons.size() == 1 && singletons.contains(NO_SINGLETON)) {
                    singletons = Collections.emptySet();
                }
                ResourceAddress address = new ResourceAddress(node.data.getAddress()).add(name, "*");
                Context context = new Context(address, singletons);
                // ids need to be unique!
                Node.Builder<Context> builder = new Node.Builder<>(uniqueId(node, name), name, context)
                        .asyncFolder();
                if (!singletons.isEmpty()) {
                    builder.icon(fontAwesome("list-ul"));
                }
                children.add(builder.build());
            }
            callback.result(children.toArray(new Node[children.size()]));
        });

    } else {
        ResourceAddress parentAddress = node.data.getAddress().getParent();
        Operation operation = new Operation.Builder(parentAddress, READ_CHILDREN_NAMES_OPERATION)
                .param(CHILD_TYPE, node.text).build();
        dispatcher.execute(operation, result -> {
            List<ModelNode> modelNodes = result.asList();
            List<Node<Context>> children = new ArrayList<>();
            SortedSet<String> singletons = new TreeSet<>(node.data.getSingletons());

            // Add existing children
            for (ModelNode modelNode : modelNodes) {
                String name = SafeHtmlUtils.fromString(modelNode.asString()).asString();
                singletons.remove(name);
                ResourceAddress address = new ResourceAddress(parentAddress).add(node.text, name);
                Context context = new Context(address, Collections.emptySet());
                Node<Context> child = new Node.Builder<>(uniqueId(node, name), name, context).asyncFolder()
                        .icon(fontAwesome("file-text-o")).build();
                children.add(child);
            }

            // Add non-existing singletons
            for (String singleton : singletons) {
                ResourceAddress address = new ResourceAddress(parentAddress).add(node.text, singleton);
                Context context = new Context(address, Collections.emptySet());
                Node<Context> child = new Node.Builder<>(uniqueId(node, singleton), singleton, context)
                        .icon(fontAwesome("file-o")).disabled().build();
                children.add(child);
            }

            callback.result(children.toArray(new Node[children.size()]));
        });
    }
}

From source file:org.apache.bigtop.datagenerators.bigpetstore.generators.purchase.MultinomialPurchasingModelSampler.java

protected Map<Pair<String, Object>, Double> generateFieldValueWeights(ProductCategory productCategory)
        throws Exception {
    // Get all values for each field by iterating over all products
    Multimap<String, Object> allFieldValues = HashMultimap.create();
    for (String fieldName : productCategory.getFieldNames()) {
        if (!Constants.PRODUCT_MODEL_EXCLUDED_FIELDS.contains(fieldName)) {
            for (Product p : productCategory.getProducts()) {
                Object fieldValue = p.getFieldValue(fieldName);
                allFieldValues.put(fieldName, fieldValue);
            }// w w  w.ja v a  2s  .  c  om
        }
    }

    Sampler<Double> sampler = new UniformSampler(seedFactory);

    // shuffle field values
    Map<Pair<String, Object>, Double> fieldValueWeights = Maps.newHashMap();
    for (Map.Entry<String, Collection<Object>> entry : allFieldValues.asMap().entrySet()) {
        String fieldName = entry.getKey();
        List<Object> shuffled = shuffle(entry.getValue());

        for (int i = 0; i < shuffled.size(); i++) {
            double weight = Constants.PRODUCT_MULTINOMIAL_POSITIVE_WEIGHT;
            if ((i + 1) > Constants.PRODUCT_MULTINOMIAL_POSITIVE_COUNT_MIN) {
                double r = sampler.sample();
                if (r >= Constants.PRODUCT_MULTINOMIAL_POSITIVE_FREQUENCY) {
                    weight = Constants.PRODUCT_MULTINOMIAL_NEGATIVE_WEIGHT;
                }
            }

            Object fieldValue = shuffled.get(i);
            fieldValueWeights.put(Pair.of(fieldName, fieldValue), weight);
        }
    }

    return ImmutableMap.copyOf(fieldValueWeights);
}

From source file:com.squareup.wire.schema.Linker.java

void validateEnumConstantNameUniqueness(Iterable<Type> nestedTypes) {
    Multimap<String, EnumType> nameToType = LinkedHashMultimap.create();
    for (Type type : nestedTypes) {
        if (type instanceof EnumType) {
            EnumType enumType = (EnumType) type;
            for (EnumConstant enumConstant : enumType.constants()) {
                nameToType.put(enumConstant.name(), enumType);
            }/*from   w w w . j  a v a  2  s  .co m*/
        }
    }

    for (Map.Entry<String, Collection<EnumType>> entry : nameToType.asMap().entrySet()) {
        if (entry.getValue().size() > 1) {
            StringBuilder error = new StringBuilder();
            String constant = entry.getKey();
            int index = 1;
            error.append(String.format("multiple enums share constant %s:", constant));
            for (EnumType enumType : entry.getValue()) {
                error.append(String.format("\n  %s. %s.%s (%s)", index++, enumType.type(), constant,
                        enumType.constant(constant).location()));
            }
            addError("%s", error);
        }
    }
}

From source file:com.facebook.swift.codec.metadata.AbstractThriftMetadataBuilder.java

protected final Iterable<ThriftFieldMetadata> buildFieldInjections() {
    Multimap<Optional<Short>, FieldMetadata> fieldsById = Multimaps.index(fields, getThriftFieldId());
    return Iterables.transform(fieldsById.asMap().values(),
            new Function<Collection<FieldMetadata>, ThriftFieldMetadata>() {
                @Override//from   w  ww . ja  v  a 2s  .co m
                public ThriftFieldMetadata apply(Collection<FieldMetadata> input) {
                    checkArgument(!input.isEmpty(), "input is empty");
                    return buildField(input);
                }
            });
}

From source file:com.torodb.backend.AbstractReadInterface.java

@Override
@SuppressFBWarnings(value = { "OBL_UNSATISFIED_OBLIGATION",
        "ODR_OPEN_DATABASE_RESOURCE" }, justification = "ResultSet is wrapped in a Cursor<Tuple2<Integer, KVValue<?>>>. It's "
                + "iterated and closed in caller code")
public Cursor<Tuple2<Integer, KvValue<?>>> getCollectionDidsAndProjectionWithFieldsIn(DSLContext dsl,
        MetaDatabase metaDatabase, MetaCollection metaCol, MetaDocPart metaDocPart,
        Multimap<MetaField, KvValue<?>> valuesMultimap) throws SQLException {
    assert metaDatabase.getMetaCollectionByIdentifier(metaCol.getIdentifier()) != null;
    assert metaCol.getMetaDocPartByIdentifier(metaDocPart.getIdentifier()) != null;
    assert valuesMultimap.keySet().stream()
            .allMatch(metafield -> metaDocPart.getMetaFieldByIdentifier(metafield.getIdentifier()) != null);

    Stream<Tuple2<MetaField, Collection<KvValue<?>>>> valuesBatchStream = valuesMultimap.asMap().entrySet()
            .stream().map(e -> new Tuple2<MetaField, Collection<KvValue<?>>>(e.getKey(), e.getValue()));
    if (valuesMultimap.asMap().entrySet().stream().anyMatch(e -> e.getValue().size() > 500)) {
        valuesBatchStream = valuesBatchStream.flatMap(e -> Seq.seq(e.v2.stream()).zipWithIndex()
                .groupBy(t -> t.v2 / 500).entrySet().stream().map(se -> toValuesMap(e.v1, se)));
    }//from ww  w  .  j a va 2s. c o m
    Stream<Cursor<Tuple2<Integer, KvValue<?>>>> didProjectionCursorStream = valuesBatchStream
            .map(Unchecked.function(mapBatch -> getCollectionDidsAndProjectionWithFieldsInBatch(dsl,
                    metaDatabase, metaCol, metaDocPart, mapBatch.v1, mapBatch.v2)));
    Stream<Tuple2<Integer, KvValue<?>>> didProjectionStream = didProjectionCursorStream
            .flatMap(cursor -> cursor.getRemaining().stream());

    return new IteratorCursor<>(didProjectionStream.iterator());
}

From source file:com.ikanow.aleph2.management_db.services.CoreManagementDbService.java

/**
 * Performs a test run for a bucket.  A test run is a processing cycle that only runs as
 * long as test_spec specifies and/or to the amount of results that test_spec specifies, whichever
 * occurs first./*from w w  w .  j  ava2s  . c o  m*/
 * 
 * Changes a buckets name to not overwrite an existing job, then sends out a start test message.
 * If that message is picked up, this throws an object on both the test queue and delete queue that
 * will timeout the test and delete the results respectively after a certain amount of time.
 * 
 */
@Override
public ManagementFuture<Boolean> testBucket(DataBucketBean to_test, ProcessingTestSpecBean test_spec) {
    //create a test bucket to put data into instead of the specified bucket
    final DataBucketBean test_bucket = BucketUtils.convertDataBucketBeanToTest(to_test, to_test.owner_id());
    // - validate the bucket
    final Tuple2<DataBucketBean, Collection<BasicMessageBean>> validation = this._data_bucket_service
            .validateBucket(test_bucket, true);
    if (validation._2().stream().anyMatch(m -> !m.success())) {
        return FutureUtils.createManagementFuture(CompletableFuture.completedFuture(false),
                CompletableFuture.completedFuture(validation._2()));
    }
    DataBucketBean validated_test_bucket = validation._1();

    // Create full set of file paths for the test bucket
    try {
        DataBucketCrudService.createFilePaths(test_bucket, this._service_context.getStorageService());
    } catch (Exception e) {
        //return error
        _logger.error("Error creating file paths", e);
        return FutureUtils.createManagementFuture(CompletableFuture.completedFuture(false),
                CompletableFuture
                        .completedFuture(Arrays.asList(ErrorUtils.buildErrorMessage("CoreManagementDbService",
                                "testBucket", "Error launching job: {0}", e.getMessage()))));
    }

    // - is there any test data already present for this user, delete if so (?)
    final CompletableFuture<BasicMessageBean> base_future = Lambdas.get(() -> {
        if (Optional.ofNullable(test_spec.overwrite_existing_data()).orElse(true)) {
            return purgeBucket(validated_test_bucket, Optional.empty()).exceptionally(t -> {
                _logger.error("Error clearing output datastore, probably okay: " + "ingest."
                        + validated_test_bucket._id(), t);
                return false;
            });
        } else {
            return CompletableFuture.completedFuture(Unit.unit());
        }
    }).thenCompose(__ -> {
        // Register the bucket with its services before launching the test:
        final Multimap<IDataServiceProvider, String> data_service_info = DataServiceUtils
                .selectDataServices(validated_test_bucket.data_schema(), _service_context);

        final List<CompletableFuture<Collection<BasicMessageBean>>> ds_update_results = data_service_info
                .asMap().entrySet().stream()
                .map(kv -> kv.getKey().onPublishOrUpdate(validated_test_bucket, Optional.empty(), false,
                        kv.getValue().stream().collect(Collectors.toSet()), Collections.emptySet()))
                .collect(Collectors.toList());

        return CompletableFuture.allOf(ds_update_results.stream().toArray(CompletableFuture[]::new));

    }).thenCompose(__ -> {

        final long max_startup_time_secs = Optional.ofNullable(test_spec.max_startup_time_secs())
                .orElse(DEFAULT_MAX_STARTUP_TIME_SECS);
        final CompletableFuture<Collection<BasicMessageBean>> future_replies = BucketActionSupervisor
                .askBucketActionActor(Optional.empty(), _actor_context.getBucketActionSupervisor(),
                        _actor_context.getActorSystem(),
                        new BucketActionMessage.TestBucketActionMessage(validated_test_bucket, test_spec),
                        Optional.of(FiniteDuration.create(max_startup_time_secs, TimeUnit.SECONDS)))
                .thenApply(msg -> msg.replies());

        return MgmtCrudUtils.getSuccessfulNodes(future_replies, SuccessfulNodeType.all_technologies)
                .thenCombine(future_replies, (hostnames, replies) -> {
                    final String reply_str = replies.stream().map(m -> m.message())
                            .collect(Collectors.joining(";"));
                    //make sure there is at least 1 hostname result, otherwise throw error
                    if (!hostnames.isEmpty()) {
                        // - add to the test queue
                        ICrudService<BucketTimeoutMessage> test_service = getBucketTestQueue(
                                BucketTimeoutMessage.class);
                        final long max_run_time_secs = Optional.ofNullable(test_spec.max_run_time_secs())
                                .orElse(60L);
                        test_service.storeObject(new BucketTimeoutMessage(validated_test_bucket,
                                new Date(System.currentTimeMillis() + (max_run_time_secs * 1000L)), hostnames),
                                true);

                        // - add to the delete queue
                        final ICrudService<BucketDeletionMessage> delete_queue = getBucketDeletionQueue(
                                BucketDeletionMessage.class);
                        final long max_storage_time_sec = Optional.ofNullable(test_spec.max_storage_time_secs())
                                .orElse(86400L);
                        delete_queue.storeObject(new BucketDeletionMessage(validated_test_bucket,
                                new Date(System.currentTimeMillis() + (max_storage_time_sec * 1000)), false),
                                true);

                        _logger.debug("Got hostnames successfully, added test to test queue and delete queue");
                        return ErrorUtils.buildSuccessMessage("CoreManagementDbService", "testBucket",
                                "Created test on hosts {0}, added test to test queue and delete queue\nmessages = {1}",
                                hostnames.stream().collect(Collectors.joining(";")), reply_str);
                    } else {
                        final String err = ErrorUtils
                                .get("Error, no successful actions performed\nmessages = {0}", reply_str);
                        _logger.error(err);
                        return ErrorUtils.buildErrorMessage("CoreManagementDbService", "testBucket", err);
                    }
                }).exceptionally(t -> {
                    //return error
                    _logger.error("Error getting hostnames", t);
                    return ErrorUtils.buildErrorMessage("CoreManagementDbService", "testBucket",
                            "Error launching job: {0}", t.getMessage());
                });
    });
    return FutureUtils.createManagementFuture(base_future.<Boolean>thenApply(m -> m.success()) // (did it work?)
            , base_future.thenApply(m -> Arrays.asList(m))) // (what were the errors?)
    ;
}

From source file:ai.grakn.graql.internal.reasoner.atom.binary.Relation.java

private Multimap<RoleType, RelationPlayer> getRoleRelationPlayerMap() {
    Multimap<RoleType, RelationPlayer> roleRelationPlayerMap = HashMultimap.create();
    Multimap<RoleType, Var> roleVarTypeMap = getRoleVarMap();
    Set<RelationPlayer> relationPlayers = getRelationPlayers();
    roleVarTypeMap.asMap().entrySet().forEach(e -> {
        RoleType role = e.getKey();/*  w w w .  j a  v a2 s  . c om*/
        TypeLabel roleLabel = role.getLabel();
        relationPlayers.stream().filter(rp -> rp.getRoleType().isPresent()).forEach(rp -> {
            VarPatternAdmin roleTypeVar = rp.getRoleType().orElse(null);
            TypeLabel rl = roleTypeVar != null ? roleTypeVar.getTypeLabel().orElse(null) : null;
            if (roleLabel != null && roleLabel.equals(rl)) {
                roleRelationPlayerMap.put(role, rp);
            }
        });
    });
    return roleRelationPlayerMap;
}