Example usage for com.google.common.collect Iterables get

List of usage examples for com.google.common.collect Iterables get

Introduction

In this page you can find the example usage for com.google.common.collect Iterables get.

Prototype

public static <T> T get(Iterable<T> iterable, int position) 

Source Link

Document

Returns the element at the specified position in an iterable.

Usage

From source file:com.opengamma.bbg.loader.hts.BloombergHistoricalTimeSeriesLoader.java

/**
 * Fetches the time-series from Bloomberg and stores them in the master.
 * /*  ww  w  . j  av  a 2 s .  c  o  m*/
 * @param identifiers  the identifiers to fetch, not null
 * @param dataField  the data field, not null
 * @param dataProvider  the data provider, not null
 * @param startDate  the start date to load, not null
 * @param endDate  the end date to load, not null
 * @param result  the result map of identifiers, updated if already in database, not null
 */
protected void fetchTimeSeries(final Set<ExternalId> identifiers, final String dataField,
        final String dataProvider, final LocalDate startDate, final LocalDate endDate,
        final Map<ExternalId, UniqueId> result) {

    Map<ExternalIdBundleWithDates, ExternalId> withDates2ExternalId = new HashMap<ExternalIdBundleWithDates, ExternalId>();
    Map<ExternalIdBundle, ExternalIdBundleWithDates> bundle2WithDates = new HashMap<ExternalIdBundle, ExternalIdBundleWithDates>();

    // lookup full set of identifiers
    Map<ExternalId, ExternalIdBundleWithDates> externalId2WithDates = _identifierResolver
            .getExternalIds(identifiers);

    // reverse map and normalize identifiers
    for (Entry<ExternalId, ExternalIdBundleWithDates> entry : externalId2WithDates.entrySet()) {
        ExternalId requestIdentifier = entry.getKey();
        ExternalIdBundleWithDates bundle = entry.getValue();
        bundle = BloombergDataUtils.addTwoDigitYearCode(bundle);
        bundle2WithDates.put(bundle.toBundle(), bundle);
        withDates2ExternalId.put(bundle, requestIdentifier);
    }

    // fetch time-series and store to master
    if (bundle2WithDates.size() > 0) {
        int identifiersSize = bundle2WithDates.keySet().size();
        if (bundle2WithDates.size() == 1) {
            System.out.printf("Loading ts for %s: dataField: %s dataProvider: %s startDate: %s endDate: %s\n",
                    Iterables.get(bundle2WithDates.keySet(), 0), dataField, dataProvider, startDate, endDate);
        } else {
            System.out.printf("Loading %d ts:  dataField: %s dataProvider: %s startDate: %s endDate: %s\n",
                    identifiersSize, dataField, dataProvider, startDate, endDate);
        }
        OperationTimer timer = new OperationTimer(s_logger,
                " loading " + identifiersSize + " timeseries from Bloomberg");
        Map<ExternalIdBundle, LocalDateDoubleTimeSeries> tsMap = provideTimeSeries(bundle2WithDates.keySet(),
                dataField, dataProvider, startDate, endDate);
        timer.finished();

        timer = new OperationTimer(s_logger, " storing " + identifiersSize + " timeseries from Bloomberg");
        storeTimeSeries(tsMap, dataField, dataProvider, withDates2ExternalId, bundle2WithDates, result);
        timer.finished();
    }
}

From source file:org.onebusaway.nyc.vehicle_tracking.impl.simulator.SimulatorTask.java

public VehicleLocationDetails getTransitionParticleDetails(int parentParticleId, int transParticleNumber,
        int recordIndex) {
    final VehicleLocationDetails details = new VehicleLocationDetails();
    details.setId(_id);//w w  w  . ja  v a2s . c  o  m

    final Collection<Multiset.Entry<Particle>> particles;
    if (recordIndex < 0) {
        details.setLastObservation(
                RecordLibrary.getNycTestInferredLocationRecordAsNycRawLocationRecord(_mostRecentRecord));
        particles = _vehicleLocationInferenceService.getCurrentParticlesForVehicleId(_vehicleId).entrySet();
    } else {
        details.setLastObservation(getDetails(recordIndex).getLastObservation());
        particles = getDetails(recordIndex).getParticles();
    }

    if (particles != null) {
        for (final Multiset.Entry<Particle> pEntry : particles) {
            final Particle p = pEntry.getElement();
            if (p.getIndex() == parentParticleId) {
                final Multiset<Particle> history = HashMultiset.create();
                history.add(Iterables.get(p.getTransitions().elementSet(), transParticleNumber));
                details.setParticles(history);
                details.setHistory(true);
                break;
            }
        }
    }
    return details;
}

From source file:org.apache.brooklyn.cloudfoundry.location.CloudFoundryLocation.java

private Integer getSshPort() {
    // see https://docs.cloudfoundry.org/devguide/deploy-apps/ssh-apps.html#other-ssh-access
    GetInfoResponse info = getCloudFoundryClient().info().get(GetInfoRequest.builder().build()).block();
    String sshEndpoint = info.getApplicationSshEndpoint();
    return Integer.parseInt(Iterables.get(Splitter.on(":").split(sshEndpoint), 1));
}

From source file:org.linqs.psl.utils.dataloading.graph.Graph.java

public List<Subgraph<ET, RT>> splitGraphSnowball(int numsplits, ET splitType, int splitSize,
        double exploreProbability) {
    List<Subgraph<ET, RT>> splits = new ArrayList<Subgraph<ET, RT>>(numsplits);
    Set<Entity<ET, RT>> remaining = new HashSet<Entity<ET, RT>>();
    Set<Entity<ET, RT>> excluded = new HashSet<Entity<ET, RT>>();

    if (!entities.containsKey(splitType))
        throw new IllegalArgumentException("There are no entities of given type!");
    remaining.addAll(entities.get(splitType).values());

    //Grow splits
    for (int i = 0; i < numsplits; i++) {
        Set<Entity<ET, RT>> seed = new HashSet<Entity<ET, RT>>();
        GrowCondition gc = new SizeLimit(splitType, splitSize);
        Subgraph<ET, RT> sample;//from w  ww  .j a v a 2 s  . co  m
        do {
            int pos = (int) Math.floor(Math.random() * remaining.size());
            seed.add(Iterables.get(remaining, pos));
            sample = growSplit(seed, excluded, new SizeLimit(splitType, splitSize), exploreProbability);
        } while (gc.continueGrowing(sample));
        //Update sets
        remaining.removeAll(sample.getEntities(splitType));
        excluded.addAll(sample.getEntities(splitType));
        splits.add(sample);
    }

    return splits;
}

From source file:io.cloudsoft.cloudera.brooklynnodes.DirectClouderaManagerImpl.java

public static void authorizeIngress(ComputeServiceContext computeServiceContext, Set<Instance> instances,
        ClusterSpec clusterSpec, List<Cidr> cidrs, int... ports) {

    if (EC2ApiMetadata.CONTEXT_TOKEN.isAssignableFrom(computeServiceContext.getBackendType())) {
        //        from:
        //            FirewallManager.authorizeIngress(computeServiceContext, instances, clusterSpec, cidrs, ports);

        // This code (or something like it) may be added to jclouds (see
        // http://code.google.com/p/jclouds/issues/detail?id=336).
        // Until then we need this temporary workaround.
        String region = AWSUtils.parseHandle(Iterables.get(instances, 0).getId())[0];
        EC2Client ec2Client = computeServiceContext.unwrap(EC2ApiMetadata.CONTEXT_TOKEN).getApi();
        String groupName = "jclouds#" + clusterSpec.getClusterName();
        for (Cidr cidr : cidrs) {
            for (int port : ports) {
                try {
                    ec2Client.getSecurityGroupServices().authorizeSecurityGroupIngressInRegion(region,
                            groupName, IpProtocol.TCP, port, port, cidr.toString());
                } catch (IllegalStateException e) {
                    LOG.warn(e.getMessage());
                    /* ignore, it means that this permission was already granted */
                }//from w w w .j a  va  2s .  c om
            }
        }
    } else {
        // TODO generalise the above, or support more clouds, or bypass whirr
        LOG.debug("Skipping port ingress modifications for " + instances + " in cloud "
                + computeServiceContext.getBackendType());
    }
}

From source file:org.jclouds.ec2.compute.EC2ComputeService.java

private Set<NodeMetadata> addTagsAndNamesToInstancesInRegion(Map<String, String> common, Set<String> nodeNames,
        Set<? extends NodeMetadata> input, String region, String group) {
    Map<String, ? extends NodeMetadata> instancesById = Maps.uniqueIndex(input, instanceId);
    ImmutableSet.Builder<NodeMetadata> builder = ImmutableSet.<NodeMetadata>builder();

    if (generateInstanceNames && !common.containsKey("Name")) {
        for (Map.Entry<String, ? extends NodeMetadata> entry : instancesById.entrySet()) {
            String id = entry.getKey();
            String name;/*from w w  w .  j  a  va 2  s . co  m*/
            if (!nodeNames.isEmpty()) {
                name = Iterables.get(nodeNames, 0);
            } else {
                name = id.replaceAll(".*-", group + "-");
            }
            Map<String, String> tags = ImmutableMap.<String, String>builder().putAll(common).put("Name", name)
                    .build();
            logger.debug(">> applying tags %s to instance %s in region %s", tags, id, region);
            client.getTagApiForRegion(region).get().applyToResources(tags, ImmutableSet.of(id));
            builder.add(addTagsForInstance(tags, instancesById.get(id)));
        }
    } else {
        Iterable<String> ids = instancesById.keySet();
        logger.debug(">> applying tags %s to instances %s in region %s", common, ids, region);
        client.getTagApiForRegion(region).get().applyToResources(common, ids);
        for (NodeMetadata in : input)
            builder.add(addTagsForInstance(common, in));
    }
    if (logger.isDebugEnabled()) {
        Multimap<String, String> filter = new TagFilterBuilder().resourceIds(instancesById.keySet()).build();
        FluentIterable<Tag> tags = client.getTagApiForRegion(region).get().filter(filter);
        logger.debug("<< applied tags in region %s: %s", region, resourceToTagsAsMap(tags));
    }
    return builder.build();
}

From source file:org.icgc.dcc.portal.service.FileService.java

private static String toSummarizedString(Set<Object> values) {
    if (isEmpty(values)) {
        return "";
    }/*w w w  . j  av  a2s  .  co  m*/

    val count = values.size();

    // Get the value if there is only one element; otherwise get the count or empty string if empty.
    return (count > 1) ? String.valueOf(count) : Iterables.get(values, 0).toString();
}

From source file:brooklyn.entity.network.bind.BindDnsServerImpl.java

public void update() {
    Lifecycle serverState = getAttribute(Attributes.SERVICE_STATE_ACTUAL);
    if (Lifecycle.STOPPED.equals(serverState) || Lifecycle.STOPPING.equals(serverState)
            || Lifecycle.DESTROYED.equals(serverState) || !getAttribute(Attributes.SERVICE_UP)) {
        LOG.debug("Skipped update of {} when service state is {} and running is {}",
                new Object[] { this, getAttribute(Attributes.SERVICE_STATE_ACTUAL), getAttribute(SERVICE_UP) });
        return;//  w  w  w.  j a  v  a 2  s  .  c  o m
    }
    synchronized (this) {
        Iterable<Entity> availableEntities = FluentIterable.from(getEntities().getMembers())
                .filter(new HasHostnameAndValidLifecycle());
        LOG.debug("{} updating with entities: {}", this, Iterables.toString(availableEntities));
        ImmutableListMultimap<String, Entity> hostnameToEntity = Multimaps.index(availableEntities,
                new HostnameTransformer());

        Map<String, String> octetToName = Maps.newHashMap();
        BiMap<String, String> ipToARecord = HashBiMap.create();
        Multimap<String, String> aRecordToCnames = MultimapBuilder.hashKeys().hashSetValues().build();
        Multimap<String, String> ipToAllNames = MultimapBuilder.hashKeys().hashSetValues().build();

        for (Map.Entry<String, Entity> e : hostnameToEntity.entries()) {
            String domainName = e.getKey();
            Maybe<SshMachineLocation> location = Machines
                    .findUniqueSshMachineLocation(e.getValue().getLocations());
            if (!location.isPresent()) {
                LOG.debug("Member {} of {} does not have an SSH location so will not be configured",
                        e.getValue(), this);
                continue;
            } else if (ipToARecord.inverse().containsKey(domainName)) {
                continue;
            }

            String address = location.get().getAddress().getHostAddress();
            ipToAllNames.put(address, domainName);
            if (!ipToARecord.containsKey(address)) {
                ipToARecord.put(address, domainName);
                if (getReverseLookupNetwork().contains(new Cidr(address + "/32"))) {
                    String octet = Iterables.get(Splitter.on('.').split(address), 3);
                    if (!octetToName.containsKey(octet))
                        octetToName.put(octet, domainName);
                }
            } else {
                aRecordToCnames.put(ipToARecord.get(address), domainName);
            }
        }
        setAttribute(A_RECORDS, ImmutableMap.copyOf(ipToARecord.inverse()));
        setAttribute(PTR_RECORDS, ImmutableMap.copyOf(octetToName));
        setAttribute(CNAME_RECORDS, Multimaps.unmodifiableMultimap(aRecordToCnames));
        setAttribute(ADDRESS_MAPPINGS, Multimaps.unmodifiableMultimap(ipToAllNames));

        // Update Bind configuration files and restart the service
        getDriver().updateBindConfiguration();
    }
}