Example usage for com.google.common.collect Maps immutableEntry

List of usage examples for com.google.common.collect Maps immutableEntry

Introduction

In this page you can find the example usage for com.google.common.collect Maps immutableEntry.

Prototype

@GwtCompatible(serializable = true)
public static <K, V> Entry<K, V> immutableEntry(@Nullable K key, @Nullable V value) 

Source Link

Document

Returns an immutable map entry with the specified key and value.

Usage

From source file:ome.services.graphs.GraphTraversal.java

/**
 * Traverse model object graph to determine steps for the proposed operation.
 * @param session the Hibernate session to use for HQL queries
 * @param objectInstances the model objects to process, may be unloaded with ID only
 * @param include if the given model objects are to be included (instead of just deleted)
 * @param applyRules if the given model objects should have the policy rules applied to them
 * @return the model objects included in the operation, and the deleted objects, may be unloaded with ID only
 * @throws GraphException if the model objects were not as expected
 *//*from w w  w.j ava2  s. c  o  m*/
public Entry<Collection<IObject>, Collection<IObject>> planOperation(Session session,
        Collection<? extends IObject> objectInstances, boolean include, boolean applyRules)
        throws GraphException {
    if (progress.contains(Milestone.PLANNED)) {
        throw new IllegalStateException("operation already planned");
    }
    final Set<CI> targetSet = include ? planning.included : planning.deleted;
    /* note the object instances for processing */
    final SetMultimap<String, Long> objectsToQuery = HashMultimap.create();
    for (final IObject instance : objectInstances) {
        if (instance.isLoaded() && instance.getDetails() != null) {
            final CI object = new CI(instance);
            noteDetails(object, instance.getDetails());
            targetSet.add(object);
        } else {
            objectsToQuery.put(instance.getClass().getName(), instance.getId());
        }
    }
    targetSet.addAll(objectsToCIs(session, objectsToQuery));
    if (applyRules) {
        /* actually do the planning of the operation */
        planning.toProcess.addAll(targetSet);
        planOperation(session);
    } else {
        /* act as if the target objects have no links and no rules match them */
        for (final CI targetObject : targetSet) {
            planning.blockedBy.put(targetObject, new HashSet<CI>());
        }
    }
    progress.add(Milestone.PLANNED);
    /* report which objects are to be included in the operation or deleted so that it can proceed */
    final Collection<IObject> included = new ArrayList<IObject>(planning.included.size());
    for (final CI includedObject : planning.included) {
        included.add(includedObject.toIObject());
    }
    final Collection<IObject> deleted = new ArrayList<IObject>(planning.deleted.size());
    for (final CI deletedObject : planning.deleted) {
        deleted.add(deletedObject.toIObject());
    }
    return Maps.immutableEntry(included, deleted);
}

From source file:com.hippo.leveldb.impl.VersionSet.java

Entry<InternalKey, InternalKey> getRange(List<FileMetaData>... inputLists) {
    InternalKey smallest = null;/*from  w  ww .  j ava 2 s.c o  m*/
    InternalKey largest = null;
    for (List<FileMetaData> inputList : inputLists) {
        for (FileMetaData fileMetaData : inputList) {
            if (smallest == null) {
                smallest = fileMetaData.getSmallest();
                largest = fileMetaData.getLargest();
            } else {
                if (internalKeyComparator.compare(fileMetaData.getSmallest(), smallest) < 0) {
                    smallest = fileMetaData.getSmallest();
                }
                if (internalKeyComparator.compare(fileMetaData.getLargest(), largest) > 0) {
                    largest = fileMetaData.getLargest();
                }
            }
        }
    }
    return Maps.immutableEntry(smallest, largest);
}

From source file:org.apache.hadoop.hdfs.notifier.NamespaceNotifierClient.java

void addServer(String host, int port) throws ServerAlreadyKnownException {
    if (getServerPosition(host, port) != -1) {
        throw new ServerAlreadyKnownException("Already got " + host + ":" + port);
    }//from   w  w  w.  jav a 2  s  .co m

    // Put in a random position to ensure load balancing across servers
    int position = notifierClient.generator.nextInt(servers.size() + 1);
    servers.add(position, Maps.immutableEntry(host, port));
}

From source file:ninja.leaping.permissionsex.backend.sql.SqlDao.java

public SqlContextInheritance getContextInheritance() throws SQLException {
    try (PreparedStatement stmt = prepareStatement(getSelectContextInheritanceQuery())) {
        ImmutableMap.Builder<Entry<String, String>, List<Entry<String, String>>> ret = ImmutableMap.builder();
        Entry<String, String> current = null;
        ImmutableList.Builder<Entry<String, String>> builder = null;
        ResultSet rs = stmt.executeQuery();
        while (rs.next()) {
            final String childKey = rs.getString(1), childValue = rs.getString(2), parentKey = rs.getString(3),
                    parentValue = rs.getString(4);
            if (current == null || !childKey.equals(current.getKey())
                    || !childValue.equals(current.getValue())) {
                if (current != null && builder != null) {
                    ret.put(current, builder.build());
                }//from w w  w .j a  v a2  s . c  om
                current = Maps.immutableEntry(childKey, childValue);
                builder = ImmutableList.builder();
            }
            builder.add(Maps.immutableEntry(parentKey, parentValue));
        }

        if (current != null) {
            ret.put(current, builder.build());
        }

        return new SqlContextInheritance(ret.build(), null);
    }

}

From source file:com.palantir.atlasdb.keyvalue.cassandra.CQLKeyValueService.java

@Override
public void multiPut(Map<String, ? extends Map<Cell, byte[]>> valuesByTable, final long timestamp)
        throws KeyAlreadyExistsException {
    Map<ResultSetFuture, String> resultSetFutures = Maps.newHashMap();
    for (Entry<String, ? extends Map<Cell, byte[]>> e : valuesByTable.entrySet()) {
        final String table = e.getKey();
        // We sort here because some key value stores are more efficient if you store adjacent keys together.
        NavigableMap<Cell, byte[]> sortedMap = ImmutableSortedMap.copyOf(e.getValue());

        Iterable<List<Entry<Cell, byte[]>>> partitions = partitionByCountAndBytes(sortedMap.entrySet(),
                getMultiPutBatchCount(), getMultiPutBatchSizeBytes(), table,
                CQLKeyValueServices.MULTIPUT_ENTRY_SIZING_FUNCTION);

        for (final List<Entry<Cell, byte[]>> p : partitions) {
            List<Entry<Cell, Value>> partition = Lists.transform(p,
                    new Function<Entry<Cell, byte[]>, Entry<Cell, Value>>() {
                        @Override
                        public Entry<Cell, Value> apply(Entry<Cell, byte[]> input) {
                            return Maps.immutableEntry(input.getKey(),
                                    Value.create(input.getValue(), timestamp));
                        }/*  w w  w.  jav a 2s .c  om*/
                    });
            resultSetFutures.put(getPutPartitionResultSetFuture(table, partition, TransactionType.NONE), table);
        }
    }

    for (Entry<ResultSetFuture, String> result : resultSetFutures.entrySet()) {
        ResultSet resultSet;
        try {
            resultSet = result.getKey().getUninterruptibly();
            resultSet.all();
        } catch (Throwable t) {
            throw Throwables.throwUncheckedException(t);
        }
        CQLKeyValueServices.logTracedQuery(getPutQuery(result.getValue(), CassandraConstants.NO_TTL), resultSet,
                session, cqlStatementCache.NORMAL_QUERY);
    }
}

From source file:org.apache.accumulo.core.client.mapreduce.lib.util.InputConfigurator.java

/**
 * Returns the {@link org.apache.accumulo.core.client.mapreduce.InputTableConfig} for the configuration based on the properties set using the single-table
 * input methods./*from  w  w w  .j a  v a  2 s.  c o m*/
 * 
 * @param implementingClass
 *          the class whose name will be used as a prefix for the property configuration key
 * @param conf
 *          the Hadoop instance for which to retrieve the configuration
 * @return the config object built from the single input table properties set on the job
 * @since 1.6.0
 */
protected static Map.Entry<String, InputTableConfig> getDefaultInputTableConfig(Class<?> implementingClass,
        Configuration conf) {
    String tableName = getInputTableName(implementingClass, conf);
    if (tableName != null) {
        InputTableConfig queryConfig = new InputTableConfig();
        List<IteratorSetting> itrs = getIterators(implementingClass, conf);
        if (itrs != null)
            queryConfig.setIterators(itrs);
        Set<Pair<Text, Text>> columns = getFetchedColumns(implementingClass, conf);
        if (columns != null)
            queryConfig.fetchColumns(columns);
        List<Range> ranges = null;
        try {
            ranges = getRanges(implementingClass, conf);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        if (ranges != null)
            queryConfig.setRanges(ranges);

        queryConfig.setAutoAdjustRanges(getAutoAdjustRanges(implementingClass, conf))
                .setUseIsolatedScanners(isIsolated(implementingClass, conf))
                .setUseLocalIterators(usesLocalIterators(implementingClass, conf))
                .setOfflineScan(isOfflineScan(implementingClass, conf));
        return Maps.immutableEntry(tableName, queryConfig);
    }
    return null;
}

From source file:org.opendaylight.controller.config.manager.impl.ConfigRegistryImpl.java

public void add(String transactionName, ConfigTransactionControllerInternal transactionController,
        ConfigTransactionLookupRegistry txLookupRegistry) {
    Object oldValue = transactions.putIfAbsent(transactionName,
            Maps.immutableEntry(transactionController, txLookupRegistry));
    if (oldValue != null) {
        throw new IllegalStateException("Error: two transactions with same name");
    }/*from ww w  .  j  a  v  a2s .  co  m*/
}

From source file:org.jooby.internal.apitool.SwaggerBuilder.java

private Map<String, Object> swaggerAttributes(Class annotation, Map<String, Object> attributes) {
    String name = annotation.getSimpleName();
    return attributes.entrySet().stream()
            .filter(it -> it.getKey().equalsIgnoreCase(name) || it.getKey().startsWith(name + "."))
            .filter(it -> {/*w  w w  . ja  v  a2 s.  co  m*/
                Object value = it.getValue();
                if (value instanceof String) {
                    return !Strings.isNullOrEmpty(((String) value).trim());
                }
                return value != null;
            }).map(e -> {
                String key = e.getKey();
                Object value = attributes.get(key);
                if (key.equalsIgnoreCase(name)) {
                    return Maps.immutableEntry(name, value);
                }
                return Maps.immutableEntry(key.replace("ApiOperation.", ""), value);
            }).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}

From source file:org.apache.accumulo.hadoopImpl.mapreduce.lib.InputConfigurator.java

/**
 * Returns the {@link InputTableConfig} for the configuration based on the properties set using
 * the single-table input methods./*from  w ww.  j a  v  a  2s .  co m*/
 *
 * @param implementingClass
 *          the class whose name will be used as a prefix for the property configuration key
 * @param conf
 *          the Hadoop instance for which to retrieve the configuration
 * @param tableName
 *          the table name for which to retrieve the configuration
 * @return the config object built from the single input table properties set on the job
 * @since 1.6.0
 */
protected static Map.Entry<String, InputTableConfig> getDefaultInputTableConfig(Class<?> implementingClass,
        Configuration conf, String tableName) {
    if (tableName != null) {
        InputTableConfig queryConfig = new InputTableConfig();
        List<IteratorSetting> itrs = getIterators(implementingClass, conf);
        if (itrs != null)
            itrs.forEach(itr -> queryConfig.addIterator(itr));
        Set<IteratorSetting.Column> columns = getFetchedColumns(implementingClass, conf);
        if (columns != null)
            queryConfig.fetchColumns(columns);
        List<Range> ranges = null;
        try {
            ranges = getRanges(implementingClass, conf);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        if (ranges != null)
            queryConfig.setRanges(ranges);

        SamplerConfiguration samplerConfig = getSamplerConfiguration(implementingClass, conf);
        if (samplerConfig != null) {
            queryConfig.setSamplerConfiguration(samplerConfig);
        }

        queryConfig.setAutoAdjustRanges(getAutoAdjustRanges(implementingClass, conf))
                .setUseIsolatedScanners(isIsolated(implementingClass, conf))
                .setUseLocalIterators(usesLocalIterators(implementingClass, conf))
                .setOfflineScan(isOfflineScan(implementingClass, conf))
                .setExecutionHints(getExecutionHints(implementingClass, conf));
        return Maps.immutableEntry(tableName, queryConfig);
    }
    return null;
}

From source file:org.apache.accumulo.core.clientImpl.mapreduce.lib.InputConfigurator.java

/**
 * Returns the InputTableConfig for the configuration based on the properties set using the
 * single-table input methods./*from   ww w.j  a v  a2 s .  c  om*/
 *
 * @param implementingClass
 *          the class whose name will be used as a prefix for the property configuration key
 * @param conf
 *          the Hadoop instance for which to retrieve the configuration
 * @return the config object built from the single input table properties set on the job
 * @since 1.6.0
 */
protected static Map.Entry<String, org.apache.accumulo.core.client.mapreduce.InputTableConfig> getDefaultInputTableConfig(
        Class<?> implementingClass, Configuration conf) {
    String tableName = getInputTableName(implementingClass, conf);
    if (tableName != null) {
        org.apache.accumulo.core.client.mapreduce.InputTableConfig queryConfig = new org.apache.accumulo.core.client.mapreduce.InputTableConfig();
        List<IteratorSetting> itrs = getIterators(implementingClass, conf);
        if (itrs != null)
            queryConfig.setIterators(itrs);
        Set<Pair<Text, Text>> columns = getFetchedColumns(implementingClass, conf);
        if (columns != null)
            queryConfig.fetchColumns(columns);
        List<Range> ranges = null;
        try {
            ranges = getRanges(implementingClass, conf);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        if (ranges != null)
            queryConfig.setRanges(ranges);

        SamplerConfiguration samplerConfig = getSamplerConfiguration(implementingClass, conf);
        if (samplerConfig != null) {
            queryConfig.setSamplerConfiguration(samplerConfig);
        }

        queryConfig.setAutoAdjustRanges(getAutoAdjustRanges(implementingClass, conf))
                .setUseIsolatedScanners(isIsolated(implementingClass, conf))
                .setUseLocalIterators(usesLocalIterators(implementingClass, conf))
                .setOfflineScan(isOfflineScan(implementingClass, conf));
        return Maps.immutableEntry(tableName, queryConfig);
    }
    return null;
}