Example usage for com.google.common.collect Maps newHashMapWithExpectedSize

List of usage examples for com.google.common.collect Maps newHashMapWithExpectedSize

Introduction

In this page you can find the example usage for com.google.common.collect Maps newHashMapWithExpectedSize.

Prototype

public static <K, V> HashMap<K, V> newHashMapWithExpectedSize(int expectedSize) 

Source Link

Document

Creates a HashMap instance, with a high enough "initial capacity" that it should hold expectedSize elements without growth.

Usage

From source file:com.google.gerrit.server.account.IncludingGroupMembership.java

@Inject
IncludingGroupMembership(GroupIncludeCache includeCache, @Assisted IdentifiedUser user) {
    this.includeCache = includeCache;
    this.user = user;

    Set<AccountGroup.UUID> groups = user.state().getInternalGroups();
    memberOf = Maps.newHashMapWithExpectedSize(groups.size());
    for (AccountGroup.UUID g : groups) {
        memberOf.put(g, true);/* w w w . j  av  a 2 s  .c o  m*/
    }
}

From source file:org.gradoop.flink.io.impl.tlf.functions.TLFFileFormat.java

/**
 * Creates a TLF string representation of a given graph transaction, which
 * has the following format:/*from w  w w .  ja  v  a  2 s  .com*/
 * <p>
 * t # 0
 * v 0 label
 * v 1 label
 * v 2 label
 * e 0 1 edgeLabel
 * e 1 2 edgeLabel
 * e 2 1 edgeLabel
 * </p>
 *
 * @param graphTransaction graph transaction
 * @return TLF string representation
 */
@Override
public String format(GraphTransaction graphTransaction) {
    StringBuilder builder = new StringBuilder();

    vertexIdMap = Maps.newHashMapWithExpectedSize(graphTransaction.getVertices().size());

    // GRAPH HEAD
    writeGraphHead(builder, graphId);
    graphId++;

    // VERTICES
    writeVertices(builder, graphTransaction.getVertices());

    // EDGES
    writeEdges(builder, graphTransaction.getEdges());

    return builder.toString().trim();
}

From source file:org.apache.shindig.gadgets.config.XhrwrapperConfigContributor.java

/** {@inheritDoc} */
private void addOAuthConfig(Map<String, String> xhrWrapperConfig, View view) {
    Map<String, String> oAuthConfig = Maps.newHashMapWithExpectedSize(3);
    try {/*ww  w  .  j  a  v a 2 s .c o  m*/
        OAuthArguments oAuthArguments = new OAuthArguments(view);
        oAuthConfig.put("authorization", "oauth");
        oAuthConfig.put("oauthService", oAuthArguments.getServiceName());
        if (!"".equals(oAuthArguments.getTokenName())) {
            oAuthConfig.put("oauthTokenName", oAuthArguments.getTokenName());
        }
        xhrWrapperConfig.putAll(oAuthConfig);
    } catch (GadgetException e) {
        // Do not add any OAuth configuration if an exception was thrown
    }
}

From source file:org.gradoop.flink.algorithms.fsm.transactional.tle.functions.ToFSMGraph.java

/**
 * Transforms a graph's edges./*w w  w .  ja  v a  2 s . co  m*/
 *
 * @param graph graph
 * @param vertexIdMap mapping of Gradoop ids to integers
 * @return id-edge map
 */
protected Map<Integer, FSMEdge> transformEdges(GraphTransaction graph, Map<GradoopId, Integer> vertexIdMap) {
    Map<Integer, FSMEdge> fsmEdges = Maps.newHashMapWithExpectedSize(graph.getEdges().size());

    int edgeId = 0;
    for (Edge edge : graph.getEdges()) {

        int sourceId = vertexIdMap.get(edge.getSourceId());
        int targetId = vertexIdMap.get(edge.getTargetId());

        fsmEdges.put(edgeId, new FSMEdge(sourceId, edge.getLabel(), targetId));

        edgeId++;
    }
    return fsmEdges;
}

From source file:org.gradoop.io.impl.tlf.functions.TLFFileFormat.java

/**
 * Creates a TLF string representation of a given graph transaction.
 *
 * @param graphTransaction graph transaction
 * @return TLF string representation//from  w  w  w  .j a va  2 s . co m
 */
@Override
public String format(GraphTransaction<G, V, E> graphTransaction) {
    graphId++;

    Map<GradoopId, Integer> vertexIdMap = Maps
            .newHashMapWithExpectedSize(graphTransaction.getVertices().size());

    Collection<String> lines = Lists.newArrayListWithExpectedSize(
            graphTransaction.getVertices().size() + graphTransaction.getEdges().size() + 1);

    // GRAPH HEAD
    lines.add(TLFGraph.SYMBOL + " # " + graphId);

    // VERTICES
    int vertexId = 0;
    for (V vertex : graphTransaction.getVertices()) {
        vertexIdMap.put(vertex.getId(), vertexId);
        lines.add(TLFVertex.SYMBOL + " " + vertexId + " " + vertex.getLabel());
        vertexId++;
    }

    // EDGES
    for (E edge : graphTransaction.getEdges()) {
        Integer sourceId = vertexIdMap.get(edge.getSourceId());
        Integer targetId = vertexIdMap.get(edge.getTargetId());

        lines.add(TLFEdge.SYMBOL + " " + sourceId + " " + targetId + "" + " " + edge.getLabel());
    }
    return StringUtils.join(lines, "\n") + "\n";
}

From source file:org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.AbstractImmutableDataContainerNodeBuilder.java

protected AbstractImmutableDataContainerNodeBuilder(final int sizeHint) {
    if (sizeHint >= 0) {
        this.value = Maps.newHashMapWithExpectedSize(sizeHint);
    } else {//from w  w w . j  av a2s.  co  m
        this.value = new HashMap<>(DEFAULT_CAPACITY);
    }
    this.dirty = false;
}

From source file:cosmos.records.impl.MapRecord.java

public <T1, T2> MapRecord(Map<T1, T2> untypedDoc, String docId, ColumnVisibility docVisibility,
        RecordFunction<T1, T2> function) {
    checkNotNull(untypedDoc);/*from   ww  w  .  jav  a2s.c om*/
    checkNotNull(docId);
    checkNotNull(docVisibility);
    checkNotNull(function);

    this.docId = docId;
    this.document = Maps.newHashMapWithExpectedSize(untypedDoc.size());
    this.docVisibility = docVisibility;

    for (Entry<T1, T2> untypedEntry : untypedDoc.entrySet()) {
        Entry<Column, RecordValue<?>> entry = function.apply(untypedEntry);
        this.document.put(entry.getKey(), entry.getValue());
    }
}

From source file:org.sonar.server.qualityprofile.index.ActiveRuleDoc.java

public ActiveRuleDoc(ActiveRuleKey key) {
    super(Maps.newHashMapWithExpectedSize(9));
    checkNotNull(key, "ActiveRuleKey cannot be null");
    this.key = key;
    setField(FIELD_ACTIVE_RULE_KEY, key.toString());
    setField(FIELD_ACTIVE_RULE_PROFILE_KEY, key.qProfile());
    setField(FIELD_ACTIVE_RULE_RULE_KEY, key.ruleKey().toString());
    setField(FIELD_ACTIVE_RULE_REPOSITORY, key.ruleKey().repository());
}

From source file:org.eclipse.sirius.diagram.sequence.business.internal.refresh.SequenceRefreshExtension.java

/**
 * {@inheritDoc}/* ww  w.j  av a2s .co  m*/
 */
public void beforeRefresh(DDiagram dDiagram) {
    if (dDiagram instanceof SequenceDDiagram) {
        currentDiagram = (SequenceDDiagram) dDiagram;

        Collection<DDiagramElement> nodeEvents = getEventsToSync(currentDiagram);

        if (nodeEvents.size() != 0) {
            flags = Maps.newHashMapWithExpectedSize(nodeEvents.size());
            for (DDiagramElement elt : nodeEvents) {
                Iterable<AbsoluteBoundsFilter> flag = Iterables.filter(elt.getGraphicalFilters(),
                        AbsoluteBoundsFilter.class);
                EObject semanticTarget = elt.getTarget();
                if (semanticTarget != null && Iterables.size(flag) == 1) {
                    flags.put(semanticTarget, Iterables.getOnlyElement(flag));
                }
            }
        }
    }
}

From source file:com.pinterest.terrapin.controller.ControllerUtil.java

/**
 * Builds the helix ideal state for HDFS directory by finding the locations of HDFS blocks and
 * creating an ideal state assignment based on those.
 *
 * @param hdfsClient The HDFS client object.
 * @param hdfsDir The HDFS directory containing the various files.
 * @param resourceName The name of the Helix resource for which the ideal state is being created.
 * @param partitioner The partitioner type, used for extracting helix partition names from
 *                    HDFS files./*from w  w  w  . ja  va  2 s  .c o  m*/
 * @param numReplicas The number of replicas for each partition.
 * @param enableZkCompression Whether data in zk is kept compressed.
 * @return The ideal state as computed based on HDFS block placement.
 * @throws ControllerException
 */
public static IdealState buildIdealStateForHdfsDir(DFSClient hdfsClient, String hdfsDir, String resourceName,
        PartitionerType partitioner, int numReplicas, boolean enableZkCompression) throws ControllerException {
    List<HdfsFileStatus> fileList;
    try {
        fileList = TerrapinUtil.getHdfsFileList(hdfsClient, hdfsDir);
    } catch (IOException e) {
        throw new ControllerException("Exception while listing files in " + hdfsDir,
                ControllerErrorCode.HDFS_ERROR);
    }
    // Mapping from file to HDFS block locations.
    Map<Integer, Set<String>> hdfsBlockMapping = Maps.newHashMapWithExpectedSize(fileList.size());
    for (HdfsFileStatus fileStatus : fileList) {
        Integer partitionName = TerrapinUtil.extractPartitionName(fileStatus.getLocalName(), partitioner);
        if (partitionName == null) {
            LOG.info("Skipping " + fileStatus.getLocalName() + " for " + hdfsDir);
            continue;
        }
        String fullName = fileStatus.getFullName(hdfsDir);
        BlockLocation[] locations = null;
        try {
            locations = hdfsClient.getBlockLocations(fullName, 0, fileStatus.getLen());
        } catch (Exception e) {
            throw new ControllerException("Exception while getting block locations " + e.getMessage(),
                    ControllerErrorCode.HDFS_ERROR);
        }
        Set<String> instanceSet = Sets.newHashSetWithExpectedSize(3);
        BlockLocation firstLocation = locations[0];
        String[] hosts = null;
        try {
            hosts = firstLocation.getHosts();
        } catch (IOException e) {
            throw new ControllerException("Exception while getting hosts " + e.getMessage(),
                    ControllerErrorCode.HDFS_ERROR);
        }
        for (String host : hosts) {
            instanceSet.add(host);
        }
        hdfsBlockMapping.put(partitionName, instanceSet);
    }
    // Assign helix partitions for the resource - which is the HDFS directory.
    int bucketSize = TerrapinUtil.getBucketSize(hdfsBlockMapping.size(), enableZkCompression);
    CustomModeISBuilder idealStateBuilder = new CustomModeISBuilder(resourceName);
    for (Map.Entry<Integer, Set<String>> mapping : hdfsBlockMapping.entrySet()) {
        // Make partitions globally unique
        String partitionName = null;
        // This is needed because of the way helix parses partition numbers for buckets.
        if (bucketSize > 0) {
            partitionName = resourceName + "_" + mapping.getKey();
        } else {
            partitionName = resourceName + "$" + mapping.getKey();
        }
        Set<String> instanceSet = mapping.getValue();
        for (String instance : instanceSet) {
            idealStateBuilder.assignInstanceAndState(partitionName,
                    TerrapinUtil.getHelixInstanceFromHDFSHost(instance), "ONLINE");
        }
    }
    idealStateBuilder.setStateModel("OnlineOffline");
    idealStateBuilder.setNumReplica(numReplicas);
    idealStateBuilder.setNumPartitions(hdfsBlockMapping.size());
    IdealState is = idealStateBuilder.build();
    if (bucketSize > 0) {
        is.setBucketSize(bucketSize);
    }
    is.setRebalanceMode(IdealState.RebalanceMode.CUSTOMIZED);
    if (enableZkCompression) {
        TerrapinUtil.compressIdealState(is);
    }
    return is;
}