Example usage for com.google.common.collect Sets newHashSetWithExpectedSize

List of usage examples for com.google.common.collect Sets newHashSetWithExpectedSize

Introduction

In this page you can find the example usage for com.google.common.collect Sets newHashSetWithExpectedSize.

Prototype

public static <E> HashSet<E> newHashSetWithExpectedSize(int expectedSize) 

Source Link

Document

Creates a HashSet instance, with a high enough initial table size that it should hold expectedSize elements without resizing.

Usage

From source file:org.apache.phoenix.execute.PhoenixTxIndexMutationGenerator.java

public Collection<Pair<Mutation, byte[]>> getIndexUpdates(Table htable,
        Iterator<? extends Mutation> mutationIterator) throws IOException, SQLException {

    if (!mutationIterator.hasNext()) {
        return Collections.emptyList();
    }//from ww w .j a  v a 2 s  .c  om

    List<IndexMaintainer> indexMaintainers = indexMetaData.getIndexMaintainers();
    ResultScanner currentScanner = null;
    // Collect up all mutations in batch
    Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    // Collect the set of mutable ColumnReferences so that we can first
    // run a scan to get the current state. We'll need this to delete
    // the existing index rows.
    int estimatedSize = indexMaintainers.size() * 10;
    Set<ColumnReference> mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize);
    for (IndexMaintainer indexMaintainer : indexMaintainers) {
        // For transactional tables, we use an index maintainer
        // to aid in rollback if there's a KeyValue column in the index. The alternative would be
        // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
        // client side.
        Set<ColumnReference> allColumns = indexMaintainer.getAllColumns();
        mutableColumns.addAll(allColumns);
    }

    Mutation m = mutationIterator.next();
    Map<String, byte[]> updateAttributes = m.getAttributesMap();
    byte[] txRollbackAttribute = updateAttributes.get(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY);
    boolean isRollback = txRollbackAttribute != null;

    boolean isImmutable = indexMetaData.isImmutableRows();
    Map<ImmutableBytesPtr, MultiMutation> findPriorValueMutations;
    if (isImmutable && !isRollback) {
        findPriorValueMutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    } else {
        findPriorValueMutations = mutations;
    }

    while (true) {
        // add the mutation to the batch set
        ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
        // if we have no non PK columns, no need to find the prior values
        if (mutations != findPriorValueMutations && indexMetaData.requiresPriorRowState(m)) {
            addMutation(findPriorValueMutations, row, m);
        }
        addMutation(mutations, row, m);

        if (!mutationIterator.hasNext()) {
            break;
        }
        m = mutationIterator.next();
    }

    Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>(
            mutations.size() * 2 * indexMaintainers.size());
    // Track if we have row keys with Delete mutations (or Puts that are
    // Tephra's Delete marker). If there are none, we don't need to do the scan for
    // prior versions, if there are, we do. Since rollbacks always have delete mutations,
    // this logic will work there too.
    if (!findPriorValueMutations.isEmpty()) {
        List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
        for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) {
            keys.add(PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary()));
        }
        Scan scan = new Scan();
        // Project all mutable columns
        for (ColumnReference ref : mutableColumns) {
            scan.addColumn(ref.getFamily(), ref.getQualifier());
        }
        /*
         * Indexes inherit the storage scheme of the data table which means all the indexes have the same
         * storage scheme and empty key value qualifier. Note that this assumption would be broken if we start
         * supporting new indexes over existing data tables to have a different storage scheme than the data
         * table.
         */
        byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier();

        // Project empty key value column
        scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier);
        ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys),
                ScanUtil.SINGLE_COLUMN_SLOT_SPAN, null, true, -1);
        scanRanges.initializeScan(scan);
        Table txTable = indexMetaData.getTransactionContext().getTransactionalTable(htable, true);
        // For rollback, we need to see all versions, including
        // the last committed version as there may be multiple
        // checkpointed versions.
        SkipScanFilter filter = scanRanges.getSkipScanFilter();
        if (isRollback) {
            filter = new SkipScanFilter(filter, true);
            indexMetaData.getTransactionContext().setVisibilityLevel(PhoenixVisibilityLevel.SNAPSHOT_ALL);
        }
        scan.setFilter(filter);
        currentScanner = txTable.getScanner(scan);
    }
    if (isRollback) {
        processRollback(indexMetaData, txRollbackAttribute, currentScanner, mutableColumns, indexUpdates,
                mutations);
    } else {
        processMutation(indexMetaData, txRollbackAttribute, currentScanner, mutableColumns, indexUpdates,
                mutations, findPriorValueMutations);
    }

    return indexUpdates;
}

From source file:com.opengamma.engine.view.calc.MultipleNodeExecutor.java

protected MutableGraphFragment.Root executeMultipleFragments(final MutableGraphFragmentContext context,
        final GraphExecutorStatisticsGatherer statistics) {
    final Set<MutableGraphFragment> allFragments = Sets
            .newHashSetWithExpectedSize(context.getGraph().getSize());
    final MutableGraphFragment.Root logicalRoot = new MutableGraphFragment.Root(context, statistics);
    for (MutableGraphFragment root : graphToFragments(context, context.getGraph(), allFragments)) {
        root.getOutputFragments().add(logicalRoot);
        logicalRoot.getInputFragments().add(root);
    }/*w w w.  ja v a2s  .c  o m*/
    int failCount = 0;
    do {
        if (mergeSharedInputs(logicalRoot, allFragments)) {
            failCount = 0;
        } else {
            if (++failCount >= 2) {
                break;
            }
        }
        if (mergeSingleDependencies(context, allFragments)) {
            failCount = 0;
        } else {
            if (++failCount >= 2) {
                break;
            }
        }
    } while (true);
    findTailFragments(allFragments);
    context.allocateFragmentMap(allFragments.size());
    // Set block counts on non-leaf nodes & leave only the leaves in the set
    logicalRoot.initBlockCount();
    final Iterator<MutableGraphFragment> fragmentIterator = allFragments.iterator();
    final int count = allFragments.size();
    int totalSize = 0;
    long totalInvocationCost = 0;
    long totalDataCost = 0;
    while (fragmentIterator.hasNext()) {
        final MutableGraphFragment fragment = fragmentIterator.next();
        totalSize += fragment.getJobItems();
        totalInvocationCost += fragment.getJobInvocationCost();
        totalDataCost += fragment.getJobDataInputCost() + fragment.getJobDataOutputCost();
        if (!fragment.getInputFragments().isEmpty()) {
            fragment.initBlockCount();
            fragmentIterator.remove();
        }
    }
    statistics.graphProcessed(context.getGraph().getCalculationConfigurationName(), count,
            (double) totalSize / (double) count, (double) totalInvocationCost / (double) count,
            (double) totalDataCost / (double) count);
    // printFragment(logicalRoot);
    // Execute anything left (leaf nodes)
    for (MutableGraphFragment fragment : allFragments) {
        fragment.execute(context);
    }
    return logicalRoot;
}

From source file:com.opengamma.core.security.impl.CoalescingSecuritySource.java

@Override
public Security get(final UniqueId uniqueId) {
    if (!_fetching.compareAndSet(false, true)) {
        final SingleCallback callback = new SingleCallback();
        _pending.add(Pair.of(uniqueId, callback));
        if (callback.waitForResult(_fetching)) {
            return callback.getSecurity();
        }/*from w  ww . j  a  va 2 s.co m*/
        // Request the pending queue
        final Collection<Pair<UniqueId, ? extends Callback>> pending = drainPending();
        final Set<UniqueId> request = Sets.newHashSetWithExpectedSize(pending.size());
        addPendingToRequest(pending, request);
        final Map<UniqueId, Security> fullResult;
        try {
            fullResult = getUnderlying().get(request);
            notifyPending(pending, fullResult);
        } catch (RuntimeException t) {
            errorPending(pending);
            throw t;
        } finally {
            _fetching.set(false);
            releaseOtherWritingThreads();
        }
        // We've either notified our own callback or another thread has already done it
        return callback.getSecurity();
    } else {
        Pair<UniqueId, ? extends Callback> e = _pending.poll();
        if (e == null) {
            // Single request
            Security security = null;
            try {
                security = getUnderlying().get(uniqueId);
            } catch (DataNotFoundException ex) {
                // Ignore
            } finally {
                _fetching.set(false);
                releaseOtherWritingThreads();
            }
            return security;
        } else {
            // Single request, e and the content of the pending queue
            final Collection<Pair<UniqueId, ? extends Callback>> pending = drainPending();
            pending.add(e);
            final Set<UniqueId> request = Sets.newHashSetWithExpectedSize(pending.size() + 1);
            request.add(uniqueId);
            addPendingToRequest(pending, request);
            final Map<UniqueId, Security> fullResult;
            try {
                fullResult = getUnderlying().get(request);
                notifyPending(pending, fullResult);
            } catch (RuntimeException t) {
                errorPending(pending);
                throw t;
            } finally {
                _fetching.set(false);
                releaseOtherWritingThreads();
            }
            return fullResult.get(uniqueId);
        }
    }
}

From source file:org.n52.sos.ogc.sos.SosOffering.java

/**
 * Creates a set of {@literal SosOffering}s from a map containing
 * identifiers as keys and names as values.
 * //from  w  ww. j  a  v  a  2  s  .  c o m
 * @param map
 *            the map (may be {@literal null})
 * 
 * @return the set (never {@literal null})
 */
public static Set<SosOffering> fromSet(Set<SweAbstractSimpleType<?>> set) {
    if (set == null) {
        return Collections.emptySet();
    }
    final Set<SosOffering> offeringSet = Sets.newHashSetWithExpectedSize(set.size());
    for (SweAbstractSimpleType<?> type : set) {
        SosOffering sosOffering = new SosOffering(type.getValue().toString(), type.getName());
        if (type.isSetDescription()) {
            sosOffering.setDescription(type.getDescription());
        }
        offeringSet.add(sosOffering);
    }
    return offeringSet;
}

From source file:org.eclipse.che.plugin.docker.machine.DockerInstanceProvider.java

@Inject
public DockerInstanceProvider(DockerConnector docker, DockerConnectorConfiguration dockerConnectorConfiguration,
        DockerMachineFactory dockerMachineFactory, DockerInstanceStopDetector dockerInstanceStopDetector,
        @Named("machine.docker.dev_machine.machine_servers") Set<ServerConf> devMachineServers,
        @Named("machine.docker.machine_servers") Set<ServerConf> allMachinesServers,
        @Named("machine.docker.dev_machine.machine_volumes") Set<String> devMachineSystemVolumes,
        @Named("machine.docker.machine_volumes") Set<String> allMachinesSystemVolumes,
        @Nullable @Named("machine.docker.machine_extra_hosts") String allMachinesExtraHosts,
        WorkspaceFolderPathProvider workspaceFolderPathProvider,
        @Named("che.machine.projects.internal.storage") String projectFolderPath,
        @Named("machine.docker.pull_image") boolean doForcePullOnBuild,
        @Named("machine.docker.privilege_mode") boolean privilegeMode,
        @Named("machine.docker.dev_machine.machine_env") Set<String> devMachineEnvVariables,
        @Named("machine.docker.machine_env") Set<String> allMachinesEnvVariables) throws IOException {

    this.docker = docker;
    this.dockerMachineFactory = dockerMachineFactory;
    this.dockerInstanceStopDetector = dockerInstanceStopDetector;
    this.workspaceFolderPathProvider = workspaceFolderPathProvider;
    this.doForcePullOnBuild = doForcePullOnBuild;
    this.privilegeMode = privilegeMode;
    this.supportedRecipeTypes = Collections.singleton("Dockerfile");
    this.projectFolderPath = projectFolderPath;

    if (SystemInfo.isWindows()) {
        allMachinesSystemVolumes = escapePaths(allMachinesSystemVolumes);
        devMachineSystemVolumes = escapePaths(devMachineSystemVolumes);
    }/*from   w  w w. j  av a2s  . c om*/
    this.commonMachineSystemVolumes = allMachinesSystemVolumes
            .toArray(new String[allMachinesEnvVariables.size()]);
    final Set<String> devMachineVolumes = Sets
            .newHashSetWithExpectedSize(allMachinesSystemVolumes.size() + devMachineSystemVolumes.size());
    devMachineVolumes.addAll(allMachinesSystemVolumes);
    devMachineVolumes.addAll(devMachineSystemVolumes);
    this.devMachineSystemVolumes = devMachineVolumes.toArray(new String[devMachineVolumes.size()]);

    this.devMachinePortsToExpose = Maps
            .newHashMapWithExpectedSize(allMachinesServers.size() + devMachineServers.size());
    this.commonMachinePortsToExpose = Maps.newHashMapWithExpectedSize(allMachinesServers.size());
    for (ServerConf serverConf : devMachineServers) {
        devMachinePortsToExpose.put(serverConf.getPort(), Collections.emptyMap());
    }
    for (ServerConf serverConf : allMachinesServers) {
        commonMachinePortsToExpose.put(serverConf.getPort(), Collections.emptyMap());
        devMachinePortsToExpose.put(serverConf.getPort(), Collections.emptyMap());
    }

    allMachinesEnvVariables = filterEmptyAndNullValues(allMachinesEnvVariables);
    devMachineEnvVariables = filterEmptyAndNullValues(devMachineEnvVariables);
    this.commonMachineEnvVariables = allMachinesEnvVariables;
    final HashSet<String> envVariablesForDevMachine = Sets
            .newHashSetWithExpectedSize(allMachinesEnvVariables.size() + devMachineEnvVariables.size());
    envVariablesForDevMachine.addAll(allMachinesEnvVariables);
    envVariablesForDevMachine.addAll(devMachineEnvVariables);
    this.devMachineEnvVariables = envVariablesForDevMachine;

    // always add the docker host
    String dockerHost = DockerInstanceRuntimeInfo.CHE_HOST.concat(":")
            .concat(dockerConnectorConfiguration.getDockerHostIp());
    if (isNullOrEmpty(allMachinesExtraHosts)) {
        this.allMachinesExtraHosts = new String[] { dockerHost };
    } else {
        this.allMachinesExtraHosts = ObjectArrays.concat(allMachinesExtraHosts.split(","), dockerHost);
    }
}

From source file:org.jalphanode.jmx.MBeanAnnotationScanner.java

protected void buildOperationMetadata(final BeanInfo beanInfo, final MBeanMetadata.Builder builder) {

    final Set<Method> allAccessors = allAccessors(beanInfo);
    final MethodDescriptor[] methodDescriptors = beanInfo.getMethodDescriptors();
    final Set<String> operationMethods = Sets.newHashSetWithExpectedSize(methodDescriptors.length);

    for (MethodDescriptor descriptor : methodDescriptors) {
        Method method = descriptor.getMethod();
        ManagedOperation operationAnnotation = method.getAnnotation(ManagedOperation.class);

        if (operationAnnotation != null) {
            if (allAccessors.contains(method)) {
                throw new MalformedMBeanException(MessageFormat.format("Accessor method {0} annotated as {1}",
                        method.getName(), ManagedOperation.class.getName()));
            }/*from w w w  .  j  a va2s .c o m*/

            int mod = method.getModifiers();
            if (!Modifier.isPublic(mod) || Modifier.isStatic(mod)) {
                throw new MalformedMBeanException(MessageFormat
                        .format("MBean operation {0} should be public and non static!", method.getName()));
            }

            final String name = operationAnnotation.name().isEmpty() ? method.getName()
                    : operationAnnotation.name();
            if (!operationMethods.add(name)) {
                throw new MalformedMBeanException(
                        MessageFormat.format("Operation with name {0} already registered", name));
            }

            ManagedOperationMetadata.Builder operationBuilder = new ManagedOperationMetadata.Builder(method);

            if (!operationAnnotation.name().isEmpty()) {
                operationBuilder.withName(operationAnnotation.name());
            }

            if (!operationAnnotation.description().isEmpty()) {
                operationBuilder.withDescription(operationAnnotation.description());
            }

            operationBuilder.withImpact(operationAnnotation.impact());
            buildParameterMetadata(method, operationBuilder);

            builder.putOperation(operationBuilder.build());
        }
    }
}

From source file:com.romeikat.datamessie.core.processing.service.stemming.text.TextStemmer.java

private Set<String> getAsSingleWords(final Collection<String> namedEntityNames) {
    final Set<String> singleWords = Sets.newHashSetWithExpectedSize(namedEntityNames.size());
    for (final String namedEntityName : namedEntityNames) {
        final String singleWord = NamedEntity.getAsSingleWord(namedEntityName);
        singleWords.add(singleWord);/*from   ww w.ja v  a 2 s  .c  o  m*/
    }
    return singleWords;
}

From source file:com.opengamma.financial.analytics.model.credit.BucketedSpreadCurveFunction.java

@Override
public CompiledFunctionDefinition compile(final FunctionCompilationContext compilationContext,
        final Instant atInstant) {
    final ZonedDateTime atZDT = ZonedDateTime.ofInstant(atInstant, ZoneOffset.UTC);
    return new AbstractInvokingCompiledFunction(atZDT.with(LocalTime.MIDNIGHT),
            atZDT.plusDays(1).with(LocalTime.MIDNIGHT).minusNanos(1000000)) {

        @SuppressWarnings("synthetic-access")
        @Override/*from ww w.  j a  va2  s .  c  o  m*/
        public Set<ComputedValue> execute(final FunctionExecutionContext executionContext,
                final FunctionInputs inputs, final ComputationTarget target,
                final Set<ValueRequirement> desiredValues) throws AsynchronousExecution {
            final Clock snapshotClock = executionContext.getValuationClock();
            final ZonedDateTime now = ZonedDateTime.now(snapshotClock);
            final Object dataObject = inputs.getValue(ValueRequirementNames.YIELD_CURVE_MARKET_DATA);
            if (dataObject == null) {
                throw new OpenGammaRuntimeException("Could not get spread curve bucket data");
            }
            final SnapshotDataBundle data = (SnapshotDataBundle) dataObject;
            final ArrayList<Tenor> times = new ArrayList<>();
            final ArrayList<Double> rates = new ArrayList<>();
            for (final Map.Entry<ExternalIdBundle, Double> dataEntry : data.getDataPointSet()) {
                // TODO: The original code here was based on there just being one external ID per point and that having a value which is a period. It would
                // be better to use an id-scheme to tag such values just in case there are any other arbitrary tickers thrown into the bundle. The safest
                // interim approach is to use the first parseable one 
                Period period = null;
                for (final ExternalId id : dataEntry.getKey()) {
                    try {
                        period = Period.parse(id.getValue());
                        break;
                    } catch (final DateTimeParseException e) {
                        // ignore
                    }
                }
                if (period == null) {
                    throw new IllegalArgumentException(dataEntry.toString());
                }
                times.add(Tenor.of(period));
                rates.add(dataEntry.getValue());
            }
            final NodalTenorDoubleCurve curve = new NodalTenorDoubleCurve(
                    times.toArray(new Tenor[times.size()]), rates.toArray(new Double[rates.size()]), false);

            final ValueProperties properties = createValueProperties().get();
            final ValueSpecification spec = new ValueSpecification(ValueRequirementNames.BUCKETED_SPREADS,
                    target.toSpecification(), properties);
            return Collections.singleton(new ComputedValue(spec, curve));
        }

        @Override
        public ComputationTargetType getTargetType() {
            return ComputationTargetType.PRIMITIVE;
        }

        @Override
        public boolean canApplyTo(final FunctionCompilationContext context, final ComputationTarget target) {
            return target.getUniqueId().getScheme().equals(CreditCurveIdentifier.OBJECT_SCHEME);
        }

        @Override
        public Set<ValueSpecification> getResults(final FunctionCompilationContext context,
                final ComputationTarget target) {
            @SuppressWarnings("synthetic-access")
            final ValueProperties properties = createValueProperties().get();
            return Collections.singleton(new ValueSpecification(ValueRequirementNames.BUCKETED_SPREADS,
                    target.toSpecification(), properties));
        }

        @Override
        public Set<ValueRequirement> getRequirements(final FunctionCompilationContext context,
                final ComputationTarget target, final ValueRequirement desiredValue) {
            final ValueProperties constraints = desiredValue.getConstraints();
            final CreditCurveIdentifier curveId = CreditCurveIdentifier
                    .of(target.toSpecification().getUniqueId());
            final Currency ccy = curveId.getCurrency();
            final ValueProperties properties = ValueProperties.builder()
                    .with(ValuePropertyNames.CURVE, curveId.toString()).get();
            final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(3);
            final ComputationTargetSpecification targetSpec = target.toSpecification();
            requirements.add(new ValueRequirement(ValueRequirementNames.YIELD_CURVE_MARKET_DATA,
                    ComputationTargetSpecification.of(ccy), properties));
            return requirements;
        }

    };
}

From source file:monasca.common.hibernate.db.AlarmDb.java

public AlarmDb setSubAlarms(final Collection<SubAlarmDb> subAlarms) {
    if (subAlarms == null || subAlarms.isEmpty()) {
        return this;
    }//from  w  w w. jav a  2s.  c om

    final AlarmDb self = this;
    this.subAlarms = Sets.newHashSetWithExpectedSize(subAlarms.size());

    FluentIterable.from(subAlarms).transform(new Function<SubAlarmDb, SubAlarmDb>() {
        @Nullable
        @Override
        public SubAlarmDb apply(@Nullable final SubAlarmDb input) {
            assert input != null;
            input.setAlarm(self);
            return input;
        }
    }).copyInto(this.subAlarms);
    return this;
}

From source file:com.datatorrent.lib.appdata.gpo.GPOUtils.java

/**
 * This method deserializes the fields in the given {@link FieldsDescriptor} into a map.
 * @param fieldsDescriptor The {@link FieldsDescriptor} to fetch fields from.
 * @param dpou The {@link JSONObject} which contains the fields whose values need to be fetched.
 * @return A {@link Map} whose keys are field names, and whose values are possible values for those fields.
 *//*from  ww  w . j a  va 2 s  .  co m*/
public static Map<String, Set<Object>> deserializeToMap(FieldsDescriptor fieldsDescriptor, JSONObject dpou) {
    Map<String, Set<Object>> keyToValues = Maps.newHashMap();

    for (String key : fieldsDescriptor.getFields().getFields()) {
        if (!dpou.has(key)) {
            throw new IllegalArgumentException("The given key " + key + " is not contained in the given JSON");
        }

        Set<Object> keyValues;
        Object keyValue;

        try {
            keyValue = dpou.get(key);
        } catch (JSONException ex) {
            throw new IllegalStateException("This should never happen", ex);
        }

        if (keyValue instanceof JSONArray) {

            JSONArray ja = (JSONArray) keyValue;
            keyValues = Sets.newHashSetWithExpectedSize(ja.length());

            Type type = fieldsDescriptor.getType(key);

            for (int index = 0; index < ja.length(); index++) {
                keyValues.add(getFieldFromJSON(type, ja, index));
            }

        } else if (keyValue instanceof JSONObject) {
            throw new UnsupportedOperationException("Cannot extract objects from JSONObjects");
        } else {
            keyValues = Sets.newHashSetWithExpectedSize(1);
            keyValues.add(getFieldFromJSON(fieldsDescriptor, key, dpou));
        }

        keyToValues.put(key, keyValues);
    }

    return keyToValues;
}