Example usage for com.google.common.collect Sets newHashSetWithExpectedSize

List of usage examples for com.google.common.collect Sets newHashSetWithExpectedSize

Introduction

In this page you can find the example usage for com.google.common.collect Sets newHashSetWithExpectedSize.

Prototype

public static <E> HashSet<E> newHashSetWithExpectedSize(int expectedSize) 

Source Link

Document

Creates a HashSet instance, with a high enough initial table size that it should hold expectedSize elements without resizing.

Usage

From source file:com.edmunds.etm.rules.impl.UrlTokenMonitor.java

private void onTokenTreeChanged(ZooKeeperTreeNode tokenNode) {

    if (tokenNode != null) {
        Collection<ZooKeeperTreeNode> childNodes = tokenNode.getChildren().values();
        Set<UrlTokenDto> tokenDtos = Sets.newHashSetWithExpectedSize(childNodes.size());
        for (ZooKeeperTreeNode node : childNodes) {
            try {
                UrlTokenDto dto = objectSerializer.readValue(node.getData(), UrlTokenDto.class);
                tokenDtos.add(dto);/*from ww  w.  j a v  a2s.  c  o m*/
            } catch (IOException e) {
                logger.error(String.format("Unable to deserialize UrlToken node: %s", node.getPath()));
            }
        }
        processTokenDtos(tokenDtos);
    }
}

From source file:org.jamocha.filter.Path.java

public Path(final Template template, final Node currentlyLowestNode,
        final FactAddress factAddressInCurrentlyLowestNode, final Path... joinedWith) {
    super();/* www  .  j a va 2s. c o m*/
    this.template = template;
    this.currentlyLowestNode = currentlyLowestNode;
    this.factAddressInCurrentlyLowestNode = factAddressInCurrentlyLowestNode;
    this.joinedWith = Sets.newHashSetWithExpectedSize(joinedWith.length);
    this.joinedWith.add(this);
    for (final Path path : joinedWith) {
        this.joinedWith.add(path);
    }
}

From source file:com.b2international.snowowl.snomed.importer.rf2.terminology.ComponentLookup.java

/**
 * Fetch CDOObject for the given component IDs.
 * @param componentIds //from  w w  w .  jav  a 2s. c  o  m
 * @return
 */
public Collection<C> getComponents(Collection<String> componentIds) {
    final Collection<C> components = Sets.newHashSetWithExpectedSize(componentIds.size());
    final Set<String> missingComponentIds = Sets.newHashSet();

    for (String componentId : componentIds) {
        final C component = getNewComponent(componentId);
        if (component != null) {
            components.add(component);
        } else {
            missingComponentIds.add(componentId);
        }
    }

    if (missingComponentIds.isEmpty()) {
        return components;
    }

    LongIterator storageKeys = getComponentStorageKeys(missingComponentIds).iterator();

    while (storageKeys.hasNext()) {
        final long storageKey = storageKeys.next();
        components.add((C) editingContext.lookup(storageKey));
    }

    return components;
}

From source file:com.google.gerrit.server.notedb.RobotCommentUpdate.java

private CommitBuilder storeCommentsInNotes(RevWalk rw, ObjectInserter ins, ObjectId curr, CommitBuilder cb)
        throws ConfigInvalidException, OrmException, IOException {
    RevisionNoteMap<RobotCommentsRevisionNote> rnm = getRevisionNoteMap(rw, curr);
    Set<RevId> updatedRevs = Sets.newHashSetWithExpectedSize(rnm.revisionNotes.size());
    RevisionNoteBuilder.Cache cache = new RevisionNoteBuilder.Cache(rnm);

    for (RobotComment c : put) {
        cache.get(new RevId(c.revId)).putComment(c);
    }/*from w ww.  j  av a  2s  . c o m*/

    Map<RevId, RevisionNoteBuilder> builders = cache.getBuilders();
    boolean touchedAnyRevs = false;
    boolean hasComments = false;
    for (Map.Entry<RevId, RevisionNoteBuilder> e : builders.entrySet()) {
        updatedRevs.add(e.getKey());
        ObjectId id = ObjectId.fromString(e.getKey().get());
        byte[] data = e.getValue().build(noteUtil, true);
        if (!Arrays.equals(data, e.getValue().baseRaw)) {
            touchedAnyRevs = true;
        }
        if (data.length == 0) {
            rnm.noteMap.remove(id);
        } else {
            hasComments = true;
            ObjectId dataBlob = ins.insert(OBJ_BLOB, data);
            rnm.noteMap.set(id, dataBlob);
        }
    }

    // If we didn't touch any notes, tell the caller this was a no-op update. We
    // couldn't have done this in isEmpty() below because we hadn't read the old
    // data yet.
    if (!touchedAnyRevs) {
        return NO_OP_UPDATE;
    }

    // If we touched every revision and there are no comments left, tell the
    // caller to delete the entire ref.
    boolean touchedAllRevs = updatedRevs.equals(rnm.revisionNotes.keySet());
    if (touchedAllRevs && !hasComments) {
        return null;
    }

    cb.setTreeId(rnm.noteMap.writeTree(ins));
    return cb;
}

From source file:org.sosy_lab.cpachecker.pcc.strategy.parallel.interleaved.PartialReachedSetIOCheckingInterleavedStrategy.java

@Override
public boolean checkCertificate(final ReachedSet pReachedSet) throws CPAException, InterruptedException {
    AtomicBoolean checkResult = new AtomicBoolean(true);
    AtomicInteger nextId = new AtomicInteger(0);
    AtomicInteger availableForChecking = new AtomicInteger(0);
    Semaphore partitionsRead = new Semaphore(0);
    Semaphore partitionChecked = new Semaphore(0);
    Collection<AbstractState> certificate = Sets.newHashSetWithExpectedSize(ioHelper.getSavedReachedSetSize());
    Multimap<CFANode, AbstractState> partitionNodes = HashMultimap.create();
    Collection<AbstractState> inOtherPartition = new ArrayList<>();
    AbstractState initialState = pReachedSet.popFromWaitlist();
    Precision initPrec = pReachedSet.getPrecision(initialState);

    logger.log(Level.INFO, "Create and start threads");
    ExecutorService executor = Executors.newFixedThreadPool(numThreads - 1);
    try {/*from  w  w w . j a  v a  2s . c o  m*/
        for (int i = 0; i < numThreads - 1; i++) {
            executor.execute(new ParallelPartitionChecker(availableForChecking, nextId, checkResult,
                    partitionsRead, partitionChecked, lock, ioHelper, partitionNodes, certificate,
                    inOtherPartition, initPrec, cpa.getStopOperator(), cpa.getTransferRelation(),
                    shutdownNotifier, logger));
        }

        // read partitions
        new PartitionReader(checkResult, partitionsRead, partitionChecked).run();

        if (!checkResult.get()) {
            return false;
        }

        // help checking remaining partitions
        new ParallelPartitionChecker(availableForChecking, nextId, checkResult, partitionsRead,
                partitionChecked, lock, ioHelper, partitionNodes, certificate, inOtherPartition, initPrec,
                cpa.getStopOperator(), cpa.getTransferRelation(), shutdownNotifier, logger).run();

        partitionChecked.acquire(ioHelper.getNumPartitions());

        if (!checkResult.get()) {
            return false;
        }

        logger.log(Level.INFO,
                "Add initial state to elements for which it will be checked if they are covered by partition nodes of certificate.");
        inOtherPartition.add(initialState);

        logger.log(Level.INFO,
                "Check if initial state and all nodes which should be contained in different partition are covered by certificate (partition node).");
        if (!PartitionChecker.areElementsCoveredByPartitionElement(inOtherPartition, partitionNodes,
                cpa.getStopOperator(), initPrec)) {
            logger.log(Level.SEVERE,
                    "Initial state or a state which should be in other partition is not covered by certificate.");
            return false;
        }

        logger.log(Level.INFO, "Check property.");
        stats.getPropertyCheckingTimer().start();
        try {
            if (!cpa.getPropChecker().satisfiesProperty(certificate)) {
                logger.log(Level.SEVERE, "Property violated");
                return false;
            }
        } finally {
            stats.getPropertyCheckingTimer().stop();
        }

        return true;
    } finally {
        executor.shutdown();
    }
}

From source file:abstractions.piece.PieceSetFactory.java

public <T extends Enum<T> & PieceTypeInterface> Map<SideInterface, Set<PieceInterface>> newPieceSet(
        final Class<T> pieceTypeSetClass) {

    final Set<T> pieceTypeSet = Sets.newHashSet(pieceTypeSetClass.getEnumConstants());

    if (pieceTypeSet.isEmpty()) {
        throw new IllegalPieceSetException("Set of pieces '" + pieceTypeSetClass.getSimpleName()
                + "' must contain the NULL piece type and at least one not-NULL piece type.");
    }/*from  w  w w . ja v a2s  .  co  m*/

    final Iterator<T> piecesAlphabetIterator = pieceTypeSet.iterator();
    T nullType = null;
    try {
        while (!(nullType = piecesAlphabetIterator.next()).name().equalsIgnoreCase("null")) { // NOPMD
            ;
        }
    } catch (final NoSuchElementException e) {
        throw new IllegalPieceSetException(
                "Set of pieces '" + pieceTypeSetClass.getSimpleName() + "' must contain the NULL piece type."); // NOPMD
    }
    pieceTypeSet.remove(nullType);

    if (pieceTypeSet.isEmpty()) {
        throw new IllegalPieceSetException("Set of pieces '" + pieceTypeSetClass.getSimpleName()
                + "' must contain at least one not-NULL piece type.");
    }

    final String path = pieceTypeSetClass.getPackage().getName();

    final Map<SideInterface, Set<PieceInterface>> piecesMap = Maps.newHashMapWithExpectedSize(3);

    final Set<PieceInterface> nullPiece = Sets.newHashSet(this.newPiece(path, nullType, Sides.NULL));
    piecesMap.put(Sides.NULL, nullPiece);

    final Set<PieceInterface> firstSidePieces = Sets.newHashSetWithExpectedSize(pieceTypeSet.size());
    for (final PieceTypeInterface pieceType : pieceTypeSet) {
        firstSidePieces.add(this.newPiece(path, pieceType, Sides.FIRST));
    }
    piecesMap.put(Sides.FIRST, firstSidePieces);

    final Set<PieceInterface> secondSidePieces = Sets.newHashSetWithExpectedSize(pieceTypeSet.size());
    for (final PieceTypeInterface pieceType : pieceTypeSet) {
        secondSidePieces.add(this.newPiece(path, pieceType, Sides.SECOND));
    }
    piecesMap.put(Sides.SECOND, secondSidePieces);

    return Collections.unmodifiableMap(piecesMap);
}

From source file:com.opengamma.financial.currency.CurrencyMatrixSourcingFunction.java

@Override
public Set<ComputedValue> execute(final FunctionExecutionContext executionContext, final FunctionInputs inputs,
        final ComputationTarget target, final Set<ValueRequirement> desiredValues) {
    final Set<ComputedValue> rates = Sets.newHashSetWithExpectedSize(desiredValues.size());
    for (ValueRequirement desiredValue : desiredValues) {
        final Pair<Currency, Currency> currencies = parse(desiredValue.getTargetSpecification().getUniqueId());
        rates.add(new ComputedValue(createValueSpecification(desiredValue.getTargetSpecification()),
                getConversionRate(inputs, currencies.getFirst(), currencies.getSecond())));
    }//from   ww w.j a  va  2 s . co m
    return rates;
}

From source file:org.gradoop.flink.io.impl.tlf.functions.GraphTransactionFromTLFGraph.java

/**
 * In order to return the produced type one GraphTransaction has to be
 * initiated.//w ww  .  ja v  a  2 s  .  c  o  m
 */
private void prepareForProducedType() {
    Set<Vertex> vertices = Sets.newHashSetWithExpectedSize(2);
    Set<Edge> edges = Sets.newHashSetWithExpectedSize(1);
    Vertex source = this.vertexFactory.createVertex();
    Vertex target = this.vertexFactory.createVertex();
    vertices.add(source);
    vertices.add(target);
    edges.add(this.edgeFactory.createEdge(source.getId(), target.getId()));

    graphTransaction = new GraphTransaction(this.graphHeadFactory.initGraphHead(GradoopId.get()), vertices,
            edges);
}

From source file:org.n52.sos.request.operator.AqdGetCapabilitiesOperatorV10.java

private GetCapabilitiesResponse modifyCapabilities(GetCapabilitiesResponse response) {
    SosCapabilities capabilities = response.getCapabilities();
    capabilities.setVersion(AqdConstants.VERSION);
    capabilities.setService(AqdConstants.AQD);
    if (capabilities.isSetServiceIdentification()) {
        SosServiceIdentification serviceIdentification = capabilities.getServiceIdentification();
        serviceIdentification.setVersions(Lists.newArrayList(AqdConstants.VERSION));
    }//from   ww  w  .j  av  a  2 s . c o  m
    if (capabilities.isSetOperationsMetadata()) {
        for (String key : capabilities.getOperationsMetadata().getCommonValues().keySet()) {
            if (key.equals(OWSConstants.RequestParams.service.name())) {
                capabilities.getOperationsMetadata().overrideCommonValue(
                        OWSConstants.RequestParams.service.name(),
                        new OwsParameterValuePossibleValues(AqdConstants.AQD));
            } else if (key.equals(OWSConstants.RequestParams.version.name())) {
                capabilities.getOperationsMetadata().overrideCommonValue(
                        OWSConstants.RequestParams.version.name(),
                        new OwsParameterValuePossibleValues(AqdConstants.VERSION));
            }
        }
        Set<OwsOperation> aqdOperations = Sets.newHashSetWithExpectedSize(2);
        for (OwsOperation operation : capabilities.getOperationsMetadata().getOperations()) {
            if (operation.getOperationName().equals(SosConstants.Operations.GetCapabilities.name())) {
                if (operation.getParameterValues()
                        .containsKey(SosConstants.GetCapabilitiesParams.AcceptVersions.name())) {
                    operation.overrideParameter(SosConstants.GetCapabilitiesParams.AcceptVersions,
                            new OwsParameterValuePossibleValues(AqdConstants.VERSION));
                }
                aqdOperations.add(operation);
                checkDCP(operation);
            }
            if (operation.getOperationName().equals(SosConstants.Operations.GetObservation.name())) {
                if (operation.getParameterValues()
                        .containsKey(SosConstants.GetObservationParams.responseFormat.name())) {
                    operation.overrideParameter(SosConstants.GetObservationParams.responseFormat,
                            new OwsParameterValuePossibleValues(AqdConstants.NS_AQD));
                }
                aqdOperations.add(operation);
                checkDCP(operation);
                Set<String> flows = Sets.newHashSet(ReportObligationType.E1A.name(),
                        ReportObligationType.E1B.name(), ReportObligationType.E2A.name());
                operation.addParameterValue(AqdConstants.EXTENSION_FLOW,
                        new OwsParameterValuePossibleValues(flows));
            }
        }
        capabilities.getOperationsMetadata().setOperations(aqdOperations);
    }
    if (capabilities.isSetContents()) {
        ArrayList<String> responseFormats = Lists.newArrayList(AqdConstants.NS_AQD);
        for (SosObservationOffering observationOffering : capabilities.getContents()) {
            observationOffering.setResponseFormats(responseFormats);
        }
    }
    return response;
}

From source file:org.onosproject.net.intent.impl.compiler.MplsPathIntentCompiler.java

private LinkResourceAllocations assignMplsLabel(MplsPathIntent intent) {
    // TODO: do it better... Suggestions?
    Set<Link> linkRequest = Sets.newHashSetWithExpectedSize(intent.path().links().size() - 2);
    for (int i = 1; i <= intent.path().links().size() - 2; i++) {
        Link link = intent.path().links().get(i);
        linkRequest.add(link);/*from  w w w .  j ava 2 s.c o m*/
        // add the inverse link. I want that the label is reserved both for
        // the direct and inverse link
        linkRequest.add(linkStore.getLink(link.dst(), link.src()));
    }

    LinkResourceRequest.Builder request = DefaultLinkResourceRequest.builder(intent.id(), linkRequest)
            .addMplsRequest();
    LinkResourceAllocations reqMpls = resourceService.requestResources(request.build());
    return reqMpls;
}