Example usage for com.google.common.collect Sets newHashSetWithExpectedSize

List of usage examples for com.google.common.collect Sets newHashSetWithExpectedSize

Introduction

In this page you can find the example usage for com.google.common.collect Sets newHashSetWithExpectedSize.

Prototype

public static <E> HashSet<E> newHashSetWithExpectedSize(int expectedSize) 

Source Link

Document

Creates a HashSet instance, with a high enough initial table size that it should hold expectedSize elements without resizing.

Usage

From source file:com.opengamma.financial.analytics.model.sensitivities.ExternallyProvidedSensitivitiesYieldCurveNodeSensitivitiesFunction.java

@Override
public Set<ValueSpecification> getResults(final FunctionCompilationContext context,
        final ComputationTarget target) {
    final ValueProperties.Builder properties = createValueProperties(target);
    properties.withAny(ValuePropertyNames.CURVE);
    properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
    final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
    final ComputationTargetSpecification targetSpec = target.toSpecification();
    results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
    s_logger.debug("getResults(1) = " + results);
    return results;
}

From source file:org.zenoss.zep.index.impl.MultiBackendEventIndexDao.java

/** Only call this within a backendsUse.lock() block. */
private void processTasks(String backendId, List<EventIndexBackendTask> tasks, WorkQueue q)
        throws ZepException {
    final EventIndexBackendConfiguration configuration = backends.get(backendId);
    if (configuration == null)
        throw new ZepException("Tried to process tasks for unknown backend: " + backendId);
    final EventIndexBackend backend = configuration.getBackend();
    if (backend == null)
        throw new ZepException("Tried to process tasks for unknown backend: " + backendId);
    logger.debug("Processing {} tasks for backend {}", tasks.size(), backendId);

    final Set<EventIndexBackendTask> flushes = Sets.newHashSet();
    final Map<String, EventIndexBackendTask> indexTasks = Maps.newHashMapWithExpectedSize(tasks.size());
    final Set<EventSummary> toIndex = Sets.newHashSetWithExpectedSize(tasks.size());

    for (EventIndexBackendTask task : tasks) {
        switch (task.op) {
        case FLUSH:
            flushes.add(task);/*from   w ww .  java2 s  . c o m*/
            break;
        case INDEX_EVENT:
            indexTasks.put(task.uuid, task);
            toIndex.add(EventSummary.newBuilder().setUuid(task.uuid).setLastSeenTime(task.lastSeen).build());
            break;
        default:
            logger.error("UNEXPECTED TASK OPERATION: {}", task.op);
            q.complete(task);
        }
    }

    try {
        if (!toIndex.isEmpty()) {
            logger.debug(String.format("Looking up %d events by primary key", toIndex.size()));
            List<EventSummary> events = eventDao.findByKey(toIndex);
            if (events.size() != toIndex.size())
                logger.info("Found {} of {} events by primary key", events.size(), toIndex.size());
            else
                logger.debug("Found {} of {} events by primary key", events.size(), toIndex.size());
            try {
                backend.index(events);
                logger.debug("Indexed {} events", events.size());
            } catch (ZepException e) {
                if (logger.isDebugEnabled())
                    logger.warn(String.format("failed to process task to index events (%d) for backend %s",
                            events.size(), backendId), e);
                else
                    logger.warn(String.format("failed to process task to index events (%d) for backend %s",
                            events.size(), backendId));
            }
            List<EventIndexBackendTask> completedTasks = Lists.newArrayListWithExpectedSize(events.size());
            for (EventSummary event : events) {
                EventIndexBackendTask task = indexTasks.remove(event.getUuid());
                if (task != null) // should always be true
                    completedTasks.add(task);
            }
            q.completeAll(completedTasks);

            if (!indexTasks.isEmpty()) {
                try {
                    if (configuration.isHonorDeletes()) {
                        logger.debug(
                                "Removing {} events from the index since they weren't found by primary key in the database",
                                indexTasks.size());
                        backend.delete(indexTasks.keySet());
                    }
                    q.completeAll(indexTasks.values());
                } catch (ZepException e) {
                    if (logger.isDebugEnabled())
                        logger.warn(String.format("failed to delete %d events from backend %s", toIndex.size(),
                                backendId), e);
                    else
                        logger.warn(String.format("failed to delete %d events from backend %s", toIndex.size(),
                                backendId));
                }
            }
        }

        if (!flushes.isEmpty()) {
            try {
                logger.debug("flushing backend");
                backend.flush();
                q.completeAll(flushes);
            } catch (ZepException e) {
                if (logger.isDebugEnabled())
                    logger.warn(String.format("failed to process tasks %s for backend %s", flushes, backendId),
                            e);
                else
                    logger.warn(String.format("failed to process tasks %s for backend %s", flushes, backendId));

            }
        }
    } catch (ZepException e) {
        if (logger.isDebugEnabled())
            logger.warn(String.format("failed to find events for UUIDs %s for backend %s", indexTasks.keySet(),
                    backendId), e);
        else
            logger.warn(String.format("failed to find events for UUIDs %s for backend %s", indexTasks.keySet(),
                    backendId));
    }
}

From source file:com.google.gerrit.server.notedb.ChangeDraftUpdate.java

private CommitBuilder storeCommentsInNotes(RevWalk rw, ObjectInserter ins, ObjectId curr, CommitBuilder cb)
        throws ConfigInvalidException, OrmException, IOException {
    RevisionNoteMap<ChangeRevisionNote> rnm = getRevisionNoteMap(rw, curr);
    Set<RevId> updatedRevs = Sets.newHashSetWithExpectedSize(rnm.revisionNotes.size());
    RevisionNoteBuilder.Cache cache = new RevisionNoteBuilder.Cache(rnm);

    for (Comment c : put) {
        if (!delete.contains(key(c))) {
            cache.get(new RevId(c.revId)).putComment(c);
        }/*  w  w w  . j  ava  2  s.co m*/
    }
    for (Key k : delete) {
        cache.get(new RevId(k.revId())).deleteComment(k.key());
    }

    Map<RevId, RevisionNoteBuilder> builders = cache.getBuilders();
    boolean touchedAnyRevs = false;
    boolean hasComments = false;
    for (Map.Entry<RevId, RevisionNoteBuilder> e : builders.entrySet()) {
        updatedRevs.add(e.getKey());
        ObjectId id = ObjectId.fromString(e.getKey().get());
        byte[] data = e.getValue().build(noteUtil, noteUtil.getWriteJson());
        if (!Arrays.equals(data, e.getValue().baseRaw)) {
            touchedAnyRevs = true;
        }
        if (data.length == 0) {
            rnm.noteMap.remove(id);
        } else {
            hasComments = true;
            ObjectId dataBlob = ins.insert(OBJ_BLOB, data);
            rnm.noteMap.set(id, dataBlob);
        }
    }

    // If we didn't touch any notes, tell the caller this was a no-op update. We
    // couldn't have done this in isEmpty() below because we hadn't read the old
    // data yet.
    if (!touchedAnyRevs) {
        return NO_OP_UPDATE;
    }

    // If we touched every revision and there are no comments left, tell the
    // caller to delete the entire ref.
    boolean touchedAllRevs = updatedRevs.equals(rnm.revisionNotes.keySet());
    if (touchedAllRevs && !hasComments) {
        return null;
    }

    cb.setTreeId(rnm.noteMap.writeTree(ins));
    return cb;
}

From source file:org.immutables.value.processor.meta.Round.java

private Set<Element> allAnnotatedElements() {
    Set<Element> elements = Sets.newHashSetWithExpectedSize(100);
    for (TypeElement annotation : annotations()) {
        Set<? extends Element> annotatedElements = round().getElementsAnnotatedWith(annotation);
        checkAnnotation(annotation, annotatedElements);
        elements.addAll(annotatedElements);
    }/*from w  ww .  j av a2 s  . c o  m*/
    return elements;
}

From source file:com.eucalyptus.cluster.VmStateHandler.java

public static void updateVmInfo(final VmStateUpdate stateUpdate) {
    UpdateInstanceResourcesType update = new UpdateInstanceResourcesType();
    update.setPartition(stateUpdate.getCluster().getPartition());
    update.setResources(TypeMappers.transform(stateUpdate, InstanceResourceReportType.class));
    final boolean requestBroadcast = Networking.getInstance().update(update);

    if (Databases.isVolatile()) {
        return;/*from  ww w  .  ja v a 2 s.  c  om*/
    }

    final Cluster cluster = stateUpdate.getCluster();
    final Set<String> initialInstances = stateUpdate.getRequestedVms();
    final List<VmInfo> vms = stateUpdate.getVmInfos();
    final Map<String, VmStateView> localState = ImmutableMap.copyOf(CollectionUtils.putAll(
            instanceViewSupplier.get(), Maps.<String, VmStateView>newHashMapWithExpectedSize(vms.size()),
            HasName.GET_NAME, Functions.<VmStateView>identity()));

    final Set<String> reportedInstances = Sets.newHashSetWithExpectedSize(vms.size());
    for (VmInfo vmInfo : vms) {
        reportedInstances.add(vmInfo.getInstanceId());
        vmInfo.setPlacement(cluster.getConfiguration().getName());
        VmTypeInfo typeInfo = vmInfo.getInstanceType();
        if (typeInfo.getName() == null || "".equals(typeInfo.getName())) {
            for (VmType t : VmTypes.list()) {
                if (t.getCpu().equals(typeInfo.getCores()) && t.getDisk().equals(typeInfo.getDisk())
                        && t.getMemory().equals(typeInfo.getMemory())) {
                    typeInfo.setName(t.getName());
                }
            }
        }
    }

    final Set<String> unreportedInstances = Sets
            .newHashSet(Sets.difference(initialInstances, reportedInstances));
    if (Databases.isVolatile()) {
        return;
    }

    final Set<String> unknownInstances = Sets.newHashSet(Sets.difference(reportedInstances, initialInstances));

    final List<Optional<Runnable>> taskList = Lists.newArrayList();

    for (final VmInfo runVm : vms) {
        if (initialInstances.contains(runVm.getInstanceId())) {
            taskList.add(UpdateTaskFunction.REPORTED.apply(context(localState, runVm)));
        } else if (unknownInstances.contains(runVm.getInstanceId())) {
            taskList.add(UpdateTaskFunction.UNKNOWN.apply(context(localState, runVm)));
        }
    }
    for (final String vmId : unreportedInstances) {
        taskList.add(UpdateTaskFunction.UNREPORTED.apply(context(localState, vmId)));
    }
    final Optional<Runnable> broadcastRequestRunnable = requestBroadcast
            ? Optional.<Runnable>of(new Runnable() {
                @Override
                public void run() {
                    NetworkInfoBroadcaster.requestNetworkInfoBroadcast();
                }
            })
            : Optional.<Runnable>absent();

    for (final Runnable task : Iterables.concat(Optional.presentInstances(taskList),
            broadcastRequestRunnable.asSet())) {
        Threads.enqueue(ClusterController.class, VmStateHandler.class,
                (Runtime.getRuntime().availableProcessors() * 2) + 1, Executors.callable(task));
    }
}

From source file:com.viadeo.kasper.core.interceptor.authorization.Role.java

public void addAll(final Collection<Permission> perms) {
    if ((null != perms) && (!perms.isEmpty())) {
        if (null == this.permissions) {
            this.permissions = Sets.newHashSetWithExpectedSize(perms.size());
        }/*from   ww w. j  ava  2 s.  c  o  m*/
        this.permissions.addAll(perms);
    }
}

From source file:org.apache.drill.exec.planner.sql.handlers.SqlHandlerUtil.java

private static void ensureNoDuplicateColumnNames(List<String> fieldNames) throws ValidationException {
    final HashSet<String> fieldHashSet = Sets.newHashSetWithExpectedSize(fieldNames.size());
    for (String field : fieldNames) {
        if (fieldHashSet.contains(field.toLowerCase())) {
            throw new ValidationException(String.format("Duplicate column name [%s]", field));
        }//www .j  ava 2 s. c  om
        fieldHashSet.add(field.toLowerCase());
    }
}

From source file:com.google.cloud.pubsub.deprecated.spi.v1.GrpcPubSubRpc.java

private static <V> ApiFuture<V> translate(ApiFuture<V> from, final boolean idempotent, int... returnNullOn) {
    final Set<Integer> returnNullOnSet = Sets.newHashSetWithExpectedSize(returnNullOn.length);
    for (int value : returnNullOn) {
        returnNullOnSet.add(value);//from   ww w .  j a v  a2 s.  c om
    }
    return ApiFutures.<V, ApiException>catching(from, ApiException.class, new ApiFunction<ApiException, V>() {
        @Override
        public V apply(ApiException exception) {
            if (returnNullOnSet.contains(exception.getStatusCode().value())) {
                return null;
            }
            throw new PubSubException(exception, idempotent);
        }
    });
}

From source file:com.aionlightning.commons.services.CronService.java

protected Collection<JobDetail> getJobDetails() {
    if (scheduler == null) {
        return Collections.emptySet();
    }//from w w  w .j  a  va  2s  . c  om

    try {
        Set<JobKey> keys = scheduler.getJobKeys(null);

        if (GenericValidator.isBlankOrNull(keys)) {
            return Collections.emptySet();
        }

        Set<JobDetail> result = Sets.newHashSetWithExpectedSize(keys.size());
        for (JobKey jk : keys) {
            result.add(scheduler.getJobDetail(jk));
        }

        return result;
    } catch (Exception e) {
        throw new CronServiceException("Can't get all active job details", e);
    }
}

From source file:net.derquinse.bocas.AbstractGuavaCachingBocas.java

@Override
public final Map<ByteString, ByteSource> get(Iterable<ByteString> keys) {
    Set<K> ikRequested = toInternalKeySet(keys);
    if (ikRequested.isEmpty()) {
        return ImmutableMap.of();
    }//ww  w  .  j a v a2 s  . co m
    Map<ByteString, ByteSource> found = Maps.newHashMapWithExpectedSize(ikRequested.size());
    Set<ByteString> notCached = Sets.newHashSetWithExpectedSize(ikRequested.size());
    for (K internalKey : ikRequested) {
        ByteString key = toKey(internalKey);
        ByteSource value = cache.getIfPresent(internalKey);
        if (value != null) {
            found.put(key, value);
        } else {
            notCached.add(key);
        }
    }
    Map<ByteString, ByteSource> foundNotCached = bocas.get(notCached);
    for (Entry<ByteString, ByteSource> e : foundNotCached.entrySet()) {
        ByteString key = e.getKey();
        K internalKey = toInternalKey(key);
        MemoryByteSource value = transform(e.getValue());
        cache.put(internalKey, value);
        found.put(key, value);
    }
    return found;
}