Example usage for org.apache.commons.lang3.tuple Pair getKey

List of usage examples for org.apache.commons.lang3.tuple Pair getKey

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple Pair getKey.

Prototype

@Override
public final L getKey() 

Source Link

Document

Gets the key from this pair.

This method implements the Map.Entry interface returning the left element as the key.

Usage

From source file:alfio.manager.UploadedResourceIntegrationTest.java

@Before
public void ensureConfiguration() {

    IntegrationTestUtil.ensureMinimalConfiguration(configurationRepository);
    List<TicketCategoryModification> categories = Collections.singletonList(new TicketCategoryModification(null,
            "default", AVAILABLE_SEATS, new DateTimeModification(LocalDate.now().minusDays(1), LocalTime.now()),
            new DateTimeModification(LocalDate.now().plusDays(1), LocalTime.now()), DESCRIPTION, BigDecimal.TEN,
            false, "", false, null, null, null, null, null));
    Pair<Event, String> eventAndUser = initEvent(categories, organizationRepository, userManager, eventManager,
            eventRepository);//from  w  w w .  j a  v a2 s  .c o  m

    event = eventAndUser.getKey();
    user = eventAndUser.getValue() + "_owner";
}

From source file:com.spotify.heroic.test.AbstractSuggestBackendIT.java

private void writeSeries(final SuggestBackend backend, final List<Pair<Series, DateRange>> data)
        throws Exception {

    final List<AsyncFuture<Void>> writes = new ArrayList<>();
    for (Pair<Series, DateRange> p : data) {
        writes.add(writeSeries(backend, p.getKey(), p.getValue()));
    }/*w  ww. ja v  a  2  s . c  o m*/
    async.collectAndDiscard(writes).get();
}

From source file:com.conversantmedia.mapreduce.tool.annotation.handler.NamedOutputAnnotationHandler.java

@Override
public void process(Annotation annotation, Job job, Object target) throws ToolException {
    NamedOutput namedOut = (NamedOutput) annotation;
    KeyValue kv = namedOut.type();

    // If this is a MultipleOutputs member we're annotating, see if we can't
    // get the key/value from the parameters if there are any.
    Pair<Type, Type> kvTypePair = getGenericTypeParams(target);

    Class<?> keyClass = kv.key();
    if (keyClass == void.class) {
        if (kvTypePair != null) {
            keyClass = (Class<?>) kvTypePair.getKey();
        } else {//from w w w .j a  v  a  2 s  .  c  o  m
            // fall back on job output key class
            keyClass = job.getOutputKeyClass();
        }
    }

    Class<?> valueClass = kv.value();
    if (valueClass == void.class) {
        if (kvTypePair != null) {
            valueClass = (Class<?>) kvTypePair.getValue();
        } else {
            valueClass = job.getOutputValueClass();
        }
    }

    String[] names = getNames(namedOut);
    for (String name : names) {
        name = (String) evaluateExpression(name);
        if (!configured.contains(name)) {
            MultipleOutputs.addNamedOutput(job, name, namedOut.format(), keyClass, valueClass);
            MultipleOutputs.setCountersEnabled(job, namedOut.countersEnabled());
            configured.add(name);
        }
    }
}

From source file:com.dangdang.config.service.zookeeper.ZookeeperConfigGroup.java

/**
 * ??//from  ww w  .  j a  va  2 s.co m
 */
void loadNode() {
    final String nodePath = ZKPaths.makePath(configProfile.getVersionedRootNode(), node);

    GetChildrenBuilder childrenBuilder = client.getChildren();

    try {
        List<String> children = childrenBuilder.watched().forPath(nodePath);
        if (children != null) {
            Map<String, String> configs = Maps.newHashMap();
            for (String child : children) {
                Pair<String, String> keyValue = loadKey(ZKPaths.makePath(nodePath, child));
                if (keyValue != null) {
                    configs.put(keyValue.getKey(), keyValue.getValue());
                }
            }
            super.putAll(configs);
        }
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.pinterest.terrapin.hadoop.BaseUploader.java

public void upload(String clusterName, String fileSet, Options options) throws Exception {
    List<Pair<Path, Long>> fileSizePairList = getFileList();

    int numShards = fileSizePairList.size();
    LOG.info("Got " + numShards + " files.");
    if (numShards == 0) {
        LOG.warn("No files found. Exiting.");
        System.exit(1);//from w  w w  .j av  a  2  s .  co  m
    }

    List<Path> parts = Lists.transform(fileSizePairList, new Function<Pair<Path, Long>, Path>() {
        @Override
        public Path apply(Pair<Path, Long> pathLongPair) {
            return pathLongPair.getKey();
        }
    });
    PartitionerType partitionerType = options.getPartitioner();

    validate(parts, partitionerType, numShards);
    long maxSize = -1;
    for (Pair<Path, Long> fileSizePair : fileSizePairList) {
        long size = fileSizePair.getRight();
        if (maxSize < size) {
            maxSize = size;
        }
    }
    // Come up with a new timestamp epoch for the latest data.
    long timestampEpochMillis = System.currentTimeMillis();
    String hdfsDir = Constants.HDFS_DATA_DIR + "/" + fileSet + "/" + timestampEpochMillis;
    ZooKeeperManager zkManager = getZKManager(clusterName);
    FileSetInfo fileSetInfo = new FileSetInfo(fileSet, hdfsDir, numShards, (List) Lists.newArrayList(),
            options);

    int replicationFactor = Constants.DEFAULT_HDFS_REPLICATION;
    if (terrapinNamenode == null || terrapinNamenode.isEmpty()) {
        ClusterInfo info = zkManager.getClusterInfo();
        if (info == null) {
            LOG.error("Could not find the namenode for " + clusterName);
            System.exit(1);
        }
        if (info.hdfsNameNode == null || info.hdfsNameNode.isEmpty()) {
            LOG.error("Could not find the namenode for " + clusterName);
            System.exit(1);
        }
        this.terrapinNamenode = info.hdfsNameNode;
        replicationFactor = info.hdfsReplicationFactor;
    }
    // Connect to the zookeeper and establish a lock on the fileset.
    LOG.info("Locking fileset " + fileSet);
    zkManager.lockFileSet(fileSet, fileSetInfo);

    try {
        LOG.info("Uploading " + numShards + " files through distcp to " + hdfsDir);

        // TODO: Add check for cluster disk space.
        List<Path> sourceFiles = Lists.newArrayListWithCapacity(fileSizePairList.size());
        for (Pair<Path, Long> fileSize : fileSizePairList) {
            sourceFiles.add(fileSize.getLeft());
        }
        if (sourceFiles.size() == 1) {
            hdfsDir = hdfsDir + "/" + TerrapinUtil.formatPartitionName(0);
        }
        DistCpOptions distCpOptions = new DistCpOptions(sourceFiles,
                new Path("hdfs", terrapinNamenode, hdfsDir));
        distCpOptions.setSyncFolder(true);
        distCpOptions.setSkipCRC(true);

        if (maxSize > Constants.DEFAULT_MAX_SHARD_SIZE_BYTES) {
            LOG.warn("Largest shard is " + maxSize + " bytes. This is more than 4G. "
                    + "Increase the # of shards to reduce the size.");
            System.exit(1);
        }
        TerrapinUtil.setupConfiguration(conf, maxSize, replicationFactor);

        DistCp distCp = getDistCp(conf, distCpOptions);
        Job job = distCp.execute();
        if (!job.waitForCompletion(true)) {
            throw new RuntimeException("Distributed copy failed.");
        }

        LOG.info("Successfully copied data.");

        loadFileSetData(zkManager, fileSetInfo, options);

        // Wait for a while so that zookeeper watches have propagated before relinquishing the lock.
        try {
            LOG.info("Releasing file set lock.");
            Thread.sleep(5000);
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted.");
        }
    } finally {
        zkManager.unlockFileSet(fileSet);
    }
}

From source file:com.galenframework.speclang2.reader.pagespec.PageSectionProcessor.java

private Pair<PageRule, Map<String, String>> findAndProcessRule(String ruleText, StructNode ruleNode) {
    for (Pair<Rule, PageRule> rulePair : pageSpecHandler.getPageRules()) {
        Matcher matcher = rulePair.getKey().getPattern().matcher(ruleText);
        if (matcher.matches()) {
            int index = 1;

            Map<String, String> parameters = new HashMap<String, String>();

            for (String parameterName : rulePair.getKey().getParameters()) {
                String value = matcher.group(index);
                pageSpecHandler.setGlobalVariable(parameterName, value, ruleNode);

                parameters.put(parameterName, value);
                index += 1;//from   w  ww . j  a v  a 2  s.  c  o  m
            }

            return new ImmutablePair<PageRule, Map<String, String>>(rulePair.getValue(), parameters);
        }
    }
    throw new SyntaxException(ruleNode, "Could find rule matching: " + ruleText);
}

From source file:com.trenako.web.tags.BreadcrumbTags.java

private void addElement(List<HtmlTag> items, String contextPath, Criteria criterion) {
    Pair<String, String> crit = getCriteria().get(criterion);
    if (crit == null)
        return;/*from  w w  w.  jav  a  2 s.  c  om*/

    String criteriaName = criterion.criterionName();

    String path = new StringBuilder().append("/rs/").append(criteriaName).append("/").append(crit.getKey())
            .toString();

    String label = messageSource.getMessage("breadcrumb." + criteriaName + ".label", null, criteriaName, null);
    String title = messageSource.getMessage("breadcrumb." + criteriaName + ".title.label", null, criteriaName,
            null);

    items.add(snippet(li(plain(label + " "), span("/").cssClass("divider")).cssClass("active"),

            li(a(crit.getValue()).href(contextPath, path).title(title), plain(" "),
                    span("/").cssClass("divider"))));
}

From source file:com.teradata.tempto.internal.hadoop.hdfs.WebHDFSClient.java

private URI buildUri(String path, String username, String operation, Pair<String, String>... parameters) {
    try {/*from  w w  w  .j av a 2 s  . com*/
        if (!path.startsWith("/")) {
            path = "/" + path;
        }
        URIBuilder uriBuilder = new URIBuilder().setScheme("http").setHost(nameNode.getHostText())
                .setPort(nameNode.getPort()).setPath("/webhdfs/v1" + checkNotNull(path))
                .setParameter("op", checkNotNull(operation)).setParameter("user.name", checkNotNull(username));

        for (Pair<String, String> parameter : parameters) {
            uriBuilder.setParameter(parameter.getKey(), parameter.getValue());
        }

        return uriBuilder.build();
    } catch (URISyntaxException e) {
        throw new RuntimeException("Could not create save file URI" + ", nameNode: " + nameNode + ", path: "
                + path + ", username: " + username);
    }
}

From source file:it.polimi.diceH2020.SPACE4CloudWS.solvers.solversImpl.MINLPSolver.MINLPDataFileBuilder.java

private <N extends Number> void printIndexedTable(int idx, Pair<Iterable<Integer>, Iterable<N>> pair) {
    String currentLine = String.format("  [%d, *] :=", idx);
    lines.add(currentLine);/* w w w . j a  v  a 2s. c  o m*/
    Iterator<Integer> first = pair.getKey().iterator();
    Iterator<N> second = pair.getValue().iterator();
    while (first.hasNext() && second.hasNext()) {
        Integer key = first.next();
        N value = second.next();
        if (value instanceof Double) {
            //UK to have . instead of , as separator
            currentLine = String.format(Locale.UK, "    %d %f", key, value.doubleValue());
        } else if (value instanceof Integer) {
            currentLine = String.format("    %d %d", key, value.intValue());
        }
        lines.add(currentLine);
    }
}

From source file:io.pravega.segmentstore.server.host.stat.AutoScaleProcessor.java

private void triggerScaleUp(String streamSegmentName, int numOfSplits) {
    if (initialized.get()) {
        Pair<Long, Long> pair = cache.getIfPresent(streamSegmentName);
        long lastRequestTs = 0;

        if (pair != null && pair.getKey() != null) {
            lastRequestTs = pair.getKey();
        }/*from w  w  w  . ja v a 2s.  c  om*/

        long timestamp = System.currentTimeMillis();

        if (timestamp - lastRequestTs > configuration.getMuteDuration().toMillis()) {
            log.info("sending request for scale up for {}", streamSegmentName);

            Segment segment = Segment.fromScopedName(streamSegmentName);
            AutoScaleEvent event = new AutoScaleEvent(segment.getScope(), segment.getStreamName(),
                    segment.getSegmentNumber(), AutoScaleEvent.UP, timestamp, numOfSplits, false);
            // Mute scale for timestamp for both scale up and down
            writeRequest(event)
                    .thenAccept(x -> cache.put(streamSegmentName, new ImmutablePair<>(timestamp, timestamp)));
        }
    }
}