Example usage for com.google.common.collect Sets newHashSetWithExpectedSize

List of usage examples for com.google.common.collect Sets newHashSetWithExpectedSize

Introduction

In this page you can find the example usage for com.google.common.collect Sets newHashSetWithExpectedSize.

Prototype

public static <E> HashSet<E> newHashSetWithExpectedSize(int expectedSize) 

Source Link

Document

Creates a HashSet instance, with a high enough initial table size that it should hold expectedSize elements without resizing.

Usage

From source file:com.edmunds.etm.system.impl.AgentMonitor.java

protected void onAgentsUpdated(ZooKeeperTreeNode rootNode) {
    Collection<ZooKeeperTreeNode> childNodes = rootNode.getChildren().values();

    Set<AgentInstance> agents = Sets.newHashSetWithExpectedSize(childNodes.size());
    for (ZooKeeperTreeNode treeNode : rootNode.getChildren().values()) {
        AgentInstance instance = bytesToAgentInstance(treeNode.getData());
        if (instance != null) {
            agents.add(instance);// ww w  .j  a va2 s.c  o m
        }
    }

    connectedAgents = agents;
}

From source file:org.jetbrains.kotlin.resolve.calls.smartcasts.DelegatingDataFlowInfo.java

@Override
@NotNull/*from  w w w . j av a 2 s .co m*/
public Set<KotlinType> getPossibleTypes(@NotNull DataFlowValue key) {
    KotlinType originalType = key.getType();
    Set<KotlinType> types = collectTypesFromMeAndParents(key);
    if (getNullability(key).canBeNull()) {
        return types;
    }

    Set<KotlinType> enrichedTypes = Sets.newHashSetWithExpectedSize(types.size() + 1);
    if (originalType.isMarkedNullable()) {
        enrichedTypes.add(TypeUtils.makeNotNullable(originalType));
    }
    for (KotlinType type : types) {
        enrichedTypes.add(TypeUtils.makeNotNullable(type));
    }

    return enrichedTypes;
}

From source file:com.knewton.mapreduce.SSTableRecordReader.java

/**
 * Performs all the necessary actions to initialize and prepare this record reader.
 *//*from www . ja v  a2 s .  c o m*/
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext context)
        throws IOException, InterruptedException {
    this.ctx = context;
    conf = context.getConfiguration();
    keysRead = 0;
    components = Sets.newHashSetWithExpectedSize(3);
    FileSplit split = (FileSplit) inputSplit;
    validateConfiguration(conf);

    // Get comparator. Subcomparator can be null.
    AbstractType<?> comparator = getConfComparator(conf);
    AbstractType<?> subcomparator = getConfSubComparator(conf);

    // Get partitioner for keys
    IPartitioner partitioner = getConfPartitioner(conf);

    // Move minimum required db tables to local disk.
    Path dataTablePath = split.getPath();
    FileSystem remoteFS = FileSystem.get(dataTablePath.toUri(), conf);
    FileSystem localFS = FileSystem.getLocal(conf);
    copyTablesToLocal(remoteFS, localFS, dataTablePath, context);
    CFMetaData cfMetaData;
    if (getConfIsSparse(conf)) {
        cfMetaData = CFMetaData.sparseCFMetaData(getDescriptor().ksname, getDescriptor().cfname, comparator);
    } else {
        cfMetaData = CFMetaData.denseCFMetaData(getDescriptor().ksname, getDescriptor().cfname, comparator,
                subcomparator);
    }
    // Open table and get scanner
    SSTableReader tableReader = openSSTableReader(partitioner, cfMetaData);
    setTableScanner(tableReader);
}

From source file:com.android.tools.lint.checks.LayoutInflationDetector.java

@Override
public void visitDocument(@NonNull XmlContext context, @NonNull Document document) {
    Element root = document.getDocumentElement();
    if (root != null) {
        NamedNodeMap attributes = root.getAttributes();
        for (int i = 0, n = attributes.getLength(); i < n; i++) {
            Attr attribute = (Attr) attributes.item(i);
            if (attribute.getLocalName() != null
                    && attribute.getLocalName().startsWith(ATTR_LAYOUT_RESOURCE_PREFIX)) {
                if (mLayoutsWithRootLayoutParams == null) {
                    mLayoutsWithRootLayoutParams = Sets.newHashSetWithExpectedSize(20);
                }/*from   w  ww.jav a2  s .co m*/
                mLayoutsWithRootLayoutParams.add(LintUtils.getBaseName(context.file.getName()));
                break;
            }
        }
    }
}

From source file:com.atlassian.jira.rest.client.internal.json.CimFieldsInfoMapJsonParser.java

private Set<StandardOperation> parseOperations(JSONArray operations) throws JSONException {
    final int operationsCount = operations.length();
    final Set<StandardOperation> res = Sets.newHashSetWithExpectedSize(operationsCount);
    for (int i = 0; i < operationsCount; i++) {
        String opName = operations.getString(i);
        StandardOperation op = StandardOperation.valueOf(opName.toUpperCase());
        res.add(op);/*from www.ja  v a 2s. c o m*/
    }
    return res;
}

From source file:com.google.devtools.build.lib.graph.Node.java

/**
 * Adds 'value' to either the predecessor or successor set, updating the
 * appropriate field as necessary./*  w w w  . ja v a2  s  .  com*/
 * @return {@code true} if the set was modified; {@code false} if the set
 * was not modified
 */
private boolean add(boolean predecessorSet, Node<T> value) {
    final Collection<Node<T>> set = predecessorSet ? preds : succs;
    if (set == null) {
        // null -> SingletonList
        return updateField(predecessorSet, Collections.singletonList(value));
    }
    if (set.contains(value)) {
        // already exists in this set
        return false;
    }
    int previousSize = set.size();
    if (previousSize == 1) {
        // SingletonList -> ArrayList
        Collection<Node<T>> newSet = new ArrayList<>(ARRAYLIST_THRESHOLD);
        newSet.addAll(set);
        newSet.add(value);
        return updateField(predecessorSet, newSet);
    } else if (previousSize < ARRAYLIST_THRESHOLD) {
        // ArrayList
        set.add(value);
        return true;
    } else if (previousSize == ARRAYLIST_THRESHOLD) {
        // ArrayList -> HashSet
        Collection<Node<T>> newSet = Sets.newHashSetWithExpectedSize(INITIAL_HASHSET_CAPACITY);
        newSet.addAll(set);
        newSet.add(value);
        return updateField(predecessorSet, newSet);
    } else {
        // HashSet
        set.add(value);
        return true;
    }
}

From source file:com.torodb.torod.db.executor.jobs.InsertSplitDocumentCallable.java

private InsertResponse transactionalInsert() throws ImplementationDbException {
    DbConnection connection = connectionProvider.get();
    List<WriteError> errors = Lists.newLinkedList();

    try {/*from  w ww.  j  av a2  s . c  om*/
        /*
        * First we need to store the root documents
        */
        connection.insertRootDocuments(collection, docs);

        /*
        * Then we have to store the subdocuments. It is more efficient to do one insert for each table, so inserts
        * are done by subdocument type.
        * To do that, we could create a map like Map<SubDocType, List<SubDocument>> and then iterate over the keys,
        * but we need to duplicate memory and the documents to insert may be very big. So we decided to do it in a
        * functional way. First we get all types and then we use an iterator that, for each type 't' and document 'd'
        * does d.getSubDocuments().row(k).values().iterator and finally merges the iterators grouped by type.
        */
        Set<SubDocType> types = Sets.newHashSetWithExpectedSize(10 * docs.size());
        for (SplitDocument splitDocument : docs) {
            types.addAll(splitDocument.getSubDocuments().rowKeySet());
        }

        /*
        * The following code that uses guava functions is the same as the following jdk8 code:
        * for (SubDocType type : types) {
        *   java.util.function.Function<SplitDocument, Stream<SubDocument>> f = (sd) -> sd.getSubDocuments().row(type).values().stream();
        *
        *   Stream<SubDocument> flatMap = docs.stream().map(f).flatMap((stream) -> stream);
        *
        *   connection.insertSubdocuments(collection, type, flatMap.iterator());
        *
        * }
        */
        for (SubDocType type : types) {
            Function<SplitDocument, Iterable<SubDocument>> extractor = new SubDocumentExtractorFunction(type);

            connection.insertSubdocuments(collection, type,
                    Iterables.concat(Iterables.transform(docs, extractor)).iterator());
        }

        return createResponse(docs.size(), errors);
    } catch (UserDbException ex) {
        appendError(errors, ex, 0);
        connection.rollback();
    }
    return createResponse(0, errors);
}

From source file:com.b2international.snowowl.datastore.server.DelegateCDOServerChangeManager.java

/**
 * Provides a way to handle transactions that are to be committed to the lightweight store.
 * @throws RuntimeException to indicate that the commit operation must not be executed against the index store.
 *//*from   w  w w  .j  a va  2  s  . c o  m*/
public void handleTransactionBeforeCommitting() throws RuntimeException {

    try {

        lockBranch();
        createProcessors(branchPath);

        final Collection<Job> changeProcessingJobs = Sets.newHashSetWithExpectedSize(changeProcessors.size());
        final InternalSession session = StoreThreadLocal.getSession();
        final Metrics metrics = MetricsThreadLocal.get();

        if (changeProcessors.size() == 1) {
            final ICDOChangeProcessor processor = Iterables.getOnlyElement(changeProcessors);
            processor.process(commitChangeSet);
        } else {
            for (final ICDOChangeProcessor processor : changeProcessors) {
                changeProcessingJobs.add(new Job("Processing commit information with " + processor.getName()) {
                    @Override
                    public IStatus run(final IProgressMonitor monitor) {

                        try {
                            StoreThreadLocal.setSession(session);
                            MetricsThreadLocal.set(metrics);

                            processor.process(commitChangeSet);
                            return Statuses.ok();
                        } catch (final Exception e) {
                            return Statuses.error(DatastoreServerActivator.PLUGIN_ID,
                                    "Error while processing changes with " + processor.getName()
                                            + " for branch: " + branchPath,
                                    e);
                        } finally {
                            //release session for all threads
                            StoreThreadLocal.release();
                            MetricsThreadLocal.release();
                        }
                    }
                });
            }
        }
        ForkJoinUtils.runJobsInParallelWithErrorHandling(changeProcessingJobs, null);
    } catch (final Exception e) {
        try {
            /* 
             * XXX (apeteri): we don't know if we got here via applyChanges or a CDO commit, so handleTransactionRollback() may be called 
             * once from here and then once again, separately.
             */
            handleTransactionRollback();
        } catch (final Exception e2) {
            e.addSuppressed(e2);
        }

        if (e instanceof RuntimeException) {
            if (e.getCause() instanceof ApiException) {
                throw (ApiException) e.getCause();
            } else {
                throw new SnowowlRuntimeException(
                        "Error when executing change processors on branch: " + branchPath, e);
            }
        } else {
            throw new SnowowlRuntimeException("Error when executing change processors on branch: " + branchPath,
                    e);
        }
    }
}

From source file:com.torodb.torod.db.executor.jobs.InsertCallable.java

private InsertResponse transactionalInsert() throws ImplementationDbException {
    DbConnection connection = getConnection();
    List<WriteError> errors = Lists.newLinkedList();

    try {/*from   w w  w  .  j ava2  s  .co  m*/
        /*
        * First we need to store the root documents
        */
        connection.insertRootDocuments(collection, docs);

        /*
        * Then we have to store the subdocuments. It is more efficient to do one insert for each table, so inserts
        * are done by subdocument type.
        * To do that, we could create a map like Map<SubDocType, List<SubDocument>> and then iterate over the keys,
        * but we need to duplicate memory and the documents to insert may be very big. So we decided to do it in a
        * functional way. First we get all types and then we use an iterator that, for each type 't' and document 'd'
        * does d.getSubDocuments().row(k).values().iterator and finally merges the iterators grouped by type.
        */
        Set<SubDocType> types = Sets.newHashSetWithExpectedSize(10 * docs.size());
        for (SplitDocument splitDocument : docs) {
            types.addAll(splitDocument.getSubDocuments().rowKeySet());
        }

        /*
        * The following code that uses guava functions is the same as the following jdk8 code:
        * for (SubDocType type : types) {
        *   java.util.function.Function<SplitDocument, Stream<SubDocument>> f = (sd) -> sd.getSubDocuments().row(type).values().stream();
        *
        *   Stream<SubDocument> flatMap = docs.stream().map(f).flatMap((stream) -> stream);
        *
        *   connection.insertSubdocuments(collection, type, flatMap.iterator());
        *
        * }
        */
        for (SubDocType type : types) {
            Function<SplitDocument, Iterable<SubDocument>> extractor = new SubDocumentExtractorFunction(type);

            connection.insertSubdocuments(collection, type,
                    Iterables.concat(Iterables.transform(docs, extractor)).iterator());
        }

        return createResponse(docs.size(), errors);
    } catch (UserDbException ex) {
        appendError(errors, ex, 0);
        connection.rollback();
    }
    return createResponse(0, errors);
}

From source file:org.opendaylight.netconf.sal.connect.netconf.listener.NetconfSessionPreferences.java

/**
 * Merge module-based list of capabilities with current list of module-based capabilities
 *
 * @param netconfSessionModuleCapabilities capabilities to merge into this
 *
 * @return new instance of preferences with merged module-based capabilities
 *//*  www  .j a  v a  2 s .c  o  m*/
public NetconfSessionPreferences addModuleCaps(
        final NetconfSessionPreferences netconfSessionModuleCapabilities) {
    final HashSet<QName> mergedCaps = Sets.newHashSetWithExpectedSize(
            moduleBasedCaps.size() + netconfSessionModuleCapabilities.getModuleBasedCaps().size());
    mergedCaps.addAll(moduleBasedCaps);
    mergedCaps.addAll(netconfSessionModuleCapabilities.getModuleBasedCaps());
    return new NetconfSessionPreferences(getNonModuleCaps(), mergedCaps);
}