Example usage for org.apache.commons.lang3.tuple Pair getLeft

List of usage examples for org.apache.commons.lang3.tuple Pair getLeft

Introduction

In this page you can find the example usage for org.apache.commons.lang3.tuple Pair getLeft.

Prototype

public abstract L getLeft();

Source Link

Document

Gets the left element from this pair.

When treated as a key-value pair, this is the key.

Usage

From source file:com.jkoolcloud.tnt4j.streams.utils.TimestampFormatter.java

/**
 * Parses the value into a timestamp with microsecond accuracy based on the specified units.
 * <p>//from   w  ww . java2s. c o m
 * If {@code value} represents decimal number (as {@link Number} or {@link String}, fraction gets preserved by
 * scaling down {@code value} in {@code units} until numeric value expression gets with low (epsilon is
 * {@code 0.001}) or without fraction or {@code units} gets set to {@link TimeUnit#NANOSECONDS}.
 *
 * @param units
 *            units that value is in
 * @param value
 *            value to convert
 * @return microsecond timestamp
 * @throws ParseException
 *             if an error parsing the specified value
 *
 * @see #scale(double, TimeUnit)
 */
public static UsecTimestamp parse(TimeUnit units, Object value) throws ParseException {
    UsecTimestamp ts;
    try {
        long time;
        if (value instanceof Date) {
            time = ((Date) value).getTime();
            units = TimeUnit.MILLISECONDS;
        } else if (value instanceof Calendar) {
            time = ((Calendar) value).getTimeInMillis();
            units = TimeUnit.MILLISECONDS;
        } else {
            if (units == null) {
                units = TimeUnit.MILLISECONDS;
            }

            double dTime = value instanceof Number ? ((Number) value).doubleValue()
                    : Double.parseDouble(value.toString());

            Pair<Double, TimeUnit> sTimePair = scale(dTime, units);
            dTime = sTimePair.getLeft();
            units = sTimePair.getRight();

            time = (long) dTime;
        }

        switch (units) {
        case NANOSECONDS:
            long scale = 1000000L;
            long mSecs = time / scale;
            long uSecs = (time - mSecs * scale) / 1000L;
            ts = new UsecTimestamp(mSecs, uSecs);
            break;
        case MICROSECONDS:
            scale = 1000L;
            mSecs = time / scale;
            uSecs = time - mSecs * scale;
            ts = new UsecTimestamp(mSecs, uSecs);
            break;
        default:
            ts = new UsecTimestamp(units.toMicros(time));
            break;
        }
    } catch (NumberFormatException nfe) {
        ParseException pe = new ParseException(
                StreamsResources.getStringFormatted(StreamsResources.RESOURCE_BUNDLE_NAME,
                        "TimestampFormatter.failed.parsing", value, nfe.getLocalizedMessage()),
                0);
        pe.initCause(nfe);
        throw pe;
    }
    return ts;
}

From source file:com.acmutv.ontoqa.core.parser.SimpleSltagParser.java

private static void solveConflicts(ParserState dashboard) throws LTAGException {
    List<String> words = dashboard.getWords();
    WaitingList wlist = dashboard.getWaitingList();
    Map<Integer, Triple<Variable, Variable, Set<Statement>>> missedMainVariables = dashboard
            .getMissedMainVariables();//w  w w.j  av  a2s  . c o  m
    Sltag curr = dashboard.getCurr();

    Iterator<ConflictElement> iter = wlist.iterator();
    while (iter.hasNext()) {
        ConflictElement elements = iter.next();
        boolean used = false;

        LOGGER.debug("Examining collissions: substitutions");
        for (Pair<Sltag, Integer> elem : elements.getSubstitutions()) {
            Sltag other = elem.getLeft();
            Integer start = elem.getRight();
            String startLexicalEntry = (start != null) ? words.get(start) : null;
            LOGGER.debug("Collision examination : substitution starting at {} ({}):\n{}", start,
                    startLexicalEntry, other.toPrettyString());
            LtagNode target = curr.firstMatch(other.getRoot().getCategory(), startLexicalEntry,
                    LtagNodeMarker.SUB);
            if (target != null && LtagNodeMarker.SUB.equals(target.getMarker())) {
                LOGGER.debug("Collision examination : substitution : eligible target found {}", target);
                try {
                    curr.substitution(other, target);
                    LOGGER.debug("Substituted (colliding element) {} with:\n{}", target,
                            other.toPrettyString());
                    used = true;
                    break;
                } catch (LTAGException exc) {
                    LOGGER.warn(exc.getMessage());
                }
            }
        }

        if (used) {
            iter.remove();
            continue;
        } else {
            LOGGER.debug("Cannot find nodes eligible for substitution");
        }

        LOGGER.debug("Examining collissions: adjunctions");
        for (Pair<Sltag, Integer> elem : elements.getAdjunctions()) {
            Sltag toAdjunct = elem.getLeft();
            Integer start = elem.getRight();
            String startLexicalEntry = (start != null) ? words.get(start) : null;
            SyntaxCategory category = toAdjunct.getRoot().getCategory();
            LOGGER.debug("Collision examination : adjunction starting at {} ({}):\n{}", start,
                    startLexicalEntry, toAdjunct.toPrettyString());
            LtagNode localTarget = curr.firstMatch(category, startLexicalEntry, null);
            if (localTarget != null) { /* CAN MAKE ADJUNCTION */
                LOGGER.debug("isLeftAdj: {} | isRightAdj: {}", toAdjunct.isLeftAdj(), toAdjunct.isRightAdj());
                LOGGER.debug("missedMainVariables: {}", missedMainVariables);
                if (curr.getSemantics().getMainVariable() == null && toAdjunct.isLeftAdj()
                        && missedMainVariables.containsKey(start)) { /* INSPECT MAIN VARIABLE MISS */
                    int lookup = (start != null) ? start : 0;
                    Variable missedMainVar = missedMainVariables.get(lookup).getMiddle();
                    LOGGER.warn("Found possible main variable miss at pos {}: {}", lookup, missedMainVar);
                    curr.getSemantics().setMainVariable(missedMainVar);
                    LOGGER.warn("Main variable temporarily set to: {}", missedMainVar);
                    curr.adjunction(toAdjunct, localTarget);
                    curr.getSemantics().setMainVariable(null);
                    LOGGER.warn("Resetting main variable to NULL");
                } else if (curr.getSemantics().getMainVariable() == null && toAdjunct.isRightAdj()
                        && missedMainVariables.containsKey((start != null) ? start + 2 : 1)) {
                    int lookup = (start != null) ? start + 2 : 1;
                    Variable missedMainVar = missedMainVariables.get(lookup).getMiddle();
                    LOGGER.warn("Found possible main variable miss at pos {}: {}", lookup, missedMainVar);
                    curr.getSemantics().setMainVariable(missedMainVar);
                    LOGGER.warn("Main variable temporarily set to: {}", missedMainVar);
                    curr.adjunction(toAdjunct, localTarget);
                    curr.getSemantics().setMainVariable(null);
                    LOGGER.warn("Resetting main variable to NULL");
                } else {
                    curr.adjunction(toAdjunct, localTarget);
                }
            }
        }
    }
}

From source file:com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesCacheDataConverter.java

static void addSingleRelationship(Map<String, Collection<String>> relationships, String account,
        String namespace, String fullName) {
    Pair<KubernetesKind, String> triple = KubernetesManifest.fromFullResourceName(fullName);
    KubernetesKind kind = triple.getLeft();
    String name = triple.getRight();

    Collection<String> keys = relationships.get(kind.toString());

    if (keys == null) {
        keys = new ArrayList<>();
    }//from  w  w  w  . j a v a2 s  .c  om

    keys.add(Keys.infrastructure(kind, account, namespace, name));

    relationships.put(kind.toString(), keys);
}

From source file:com.act.lcms.db.analysis.ChemicalToMapOfMetlinIonsToIntensityTimeValues.java

/**
 * This function plots a combination of positive and negative control intensity-time values.
 * @param searchMzs A list of mass charge values
 * @param plottingPath The wells used for the analysis. This variable is mainly used for
 * @param peakDataPos The postive intensity-time value
 * @param peakDataNegs The negative controls intensity-time values
 * @param plottingDirectory The directory where the plots are going to be placed in
 * @return//from   w w w.ja  v  a2  s  . c  om
 * @throws IOException
 */
public static Map<String, String> plotPositiveAndNegativeControlsForEachMZ(Set<Pair<String, Double>> searchMzs,
        String plottingPath, ChemicalToMapOfMetlinIonsToIntensityTimeValues peakDataPos,
        List<ChemicalToMapOfMetlinIonsToIntensityTimeValues> peakDataNegs, String plottingDirectory)
        throws IOException {

    Map<String, String> result = new HashMap<>();
    Map<String, Double> individualMaxIntensities = new HashMap<>();
    WriteAndPlotMS1Results plottingUtil = new WriteAndPlotMS1Results();

    for (Pair<String, Double> mz : searchMzs) {
        LinkedHashMap<String, List<XZ>> ms1s = new LinkedHashMap<>();
        Map<String, Double> metlinMasses = new HashMap<>();
        Double maxIntensity = 0.0d;

        String chemicalAndIonName = mz.getLeft();
        Double massChargeValue = mz.getRight();

        // Get positive ion results
        String positiveChemicalName = AnalysisHelper.constructChemicalAndScanTypeName(chemicalAndIonName,
                ScanData.KIND.POS_SAMPLE);
        List<XZ> ionValuesPos = peakDataPos.peakData.get(positiveChemicalName).get(chemicalAndIonName);
        ms1s.put(positiveChemicalName, ionValuesPos);
        Double localMaxIntensityPos = findPeakMaxIntensity(ionValuesPos);
        maxIntensity = Math.max(maxIntensity, localMaxIntensityPos);
        individualMaxIntensities.put(positiveChemicalName, localMaxIntensityPos);
        metlinMasses.put(positiveChemicalName, massChargeValue);

        // Get negative control results
        Integer negNameCounter = 0;
        for (ChemicalToMapOfMetlinIonsToIntensityTimeValues peakDataNeg : peakDataNegs) {
            String negativeChemicalName = AnalysisHelper.constructChemicalAndScanTypeName(chemicalAndIonName,
                    ScanData.KIND.NEG_CONTROL);
            String negativeChemicalNameId = negativeChemicalName + "_" + negNameCounter.toString();
            List<XZ> ionValuesNeg = peakDataNeg.peakData.get(negativeChemicalName).get(chemicalAndIonName);
            ms1s.put(negativeChemicalNameId, ionValuesNeg);
            Double localMaxIntensityNeg = findPeakMaxIntensity(ionValuesNeg);
            maxIntensity = Math.max(maxIntensity, localMaxIntensityNeg);
            individualMaxIntensities.put(negativeChemicalNameId, localMaxIntensityNeg);
            metlinMasses.put(negativeChemicalNameId, massChargeValue);
            negNameCounter++;
        }

        String relativePath = massChargeValue.toString() + "_" + plottingPath + "_" + chemicalAndIonName;
        File absolutePathFileWithoutExtension = new File(plottingDirectory, relativePath);
        String absolutePathWithoutExtension = absolutePathFileWithoutExtension.getAbsolutePath();
        String absolutePathWithExtension = absolutePathWithoutExtension + "." + FMT;

        // Check if the plotting file already exists. If it does, we should not overwrite it. Instead, we just change
        // the path name by appending a counter till the collision no longer exists.
        // TODO: Implement an elegant solution to this problem.
        File duplicateFile = new File(absolutePathWithExtension);
        Integer fileDuplicateCounter = 0;
        while (duplicateFile.exists() && !duplicateFile.isDirectory()) {
            LOGGER.warn("Duplicate file exists for %s, writing to another file",
                    duplicateFile.getAbsolutePath());
            fileDuplicateCounter++;
            relativePath = relativePath + "_" + fileDuplicateCounter.toString();
            absolutePathFileWithoutExtension = new File(plottingDirectory, relativePath);
            absolutePathWithoutExtension = absolutePathFileWithoutExtension.getAbsolutePath();
            absolutePathWithExtension = absolutePathWithoutExtension + "." + FMT;
            duplicateFile = new File(absolutePathWithExtension);
        }

        LOGGER.info("Wrote plot to %s", absolutePathWithoutExtension);

        plottingUtil.plotSpectra(ms1s, maxIntensity, individualMaxIntensities, metlinMasses,
                absolutePathWithoutExtension, FMT, false, false);

        result.put(mz.getLeft(), relativePath + "." + FMT);
    }

    return result;
}

From source file:ch.aonyx.broker.ib.api.util.AnnotationUtils.java

/**
 * Find a single {@link Annotation} of <code>annotationType</code> from the supplied {@link Class}, traversing its
 * interfaces and superclasses if no annotation can be found on the given class itself.
 * <p>//from  ww  w .ja va 2s .c om
 * This method explicitly handles class-level annotations which are not declared as
 * {@link java.lang.annotation.Inherited inherited} <i>as well as annotations on interfaces</i>.
 * <p>
 * The algorithm operates as follows: Searches for an annotation on the given class and returns it if found. Else
 * searches all interfaces that the given class declares, returning the annotation from the first matching
 * candidate, if any. Else proceeds with introspection of the superclass of the given class, checking the superclass
 * itself; if no annotation found there, proceeds with the interfaces that the superclass declares. Recursing up
 * through the entire superclass hierarchy if no match is found.
 * 
 * @param clazz
 *            the class to look for annotations on
 * @param annotationType
 *            the annotation class to look for
 * @return A tuple {@link Pair} containing the annotation on the left hand side and the class on the right hand side
 *         or <code>null</code> if none found
 */
public static <A extends Annotation> Pair<A, Class<?>> findAnnotation(final Class<?> clazz,
        final Class<A> annotationType) {
    Validate.notNull(clazz, "Class must not be null");
    A annotation = clazz.getAnnotation(annotationType);
    if (annotation != null) {
        return new ImmutablePair<A, Class<?>>(annotation, clazz);
    }
    for (final Class<?> ifc : clazz.getInterfaces()) {
        final Pair<A, Class<?>> pair = findAnnotation(ifc, annotationType);
        if (pair != null) {
            annotation = pair.getLeft();
            if (annotation != null) {
                return new ImmutablePair<A, Class<?>>(annotation, ifc);
            }
        }
    }
    if (!Annotation.class.isAssignableFrom(clazz)) {
        for (final Annotation ann : clazz.getAnnotations()) {
            final Pair<A, Class<?>> pair = findAnnotation(ann.annotationType(), annotationType);
            if (pair != null) {
                annotation = pair.getLeft();
                if (annotation != null) {
                    return new ImmutablePair<A, Class<?>>(annotation, ann.annotationType());
                }
            }
        }
    }
    final Class<?> superClass = clazz.getSuperclass();
    if ((superClass == null) || (superClass == Object.class)) {
        return null;
    }
    return findAnnotation(superClass, annotationType);
}

From source file:com.twitter.graphjet.bipartite.GraphConcurrentTestHelper.java

/**
 * This helper method sets up a concurrent read-write situation with a single writer and multiple
 * readers that access the same underlying bipartiteGraph, and tests for correct edge access during
 * simultaneous edge writes. This helps test read consistency during arbitrary points of
 * inserting edges. Note that the exact read-write sequence here is non-deterministic and would
 * vary depending on the machine, but the hope is that given the large number of readers the reads
 * would be done at many different points of edge insertion. The test itself checks only for
 * partial correctness (it could have false positives) so this should only be used as a supplement
 * to other testing./*from w  w  w . j av  a 2s.  c  o  m*/
 *
 * @param graph              is the underlying
 *                           {@link BipartiteGraph}
 * @param numReadersPerNode  is the number of reader threads to use per node
 * @param leftSize           is the number of left nodes
 * @param rightSize          is the number of right nodes
 * @param edgeProbability    is the probability of an edge between a left-right node pair
 * @param random             is the random number generator to use for generating a random graph
 */
public static <T extends BipartiteGraph & DynamicBipartiteGraph> void testRandomConcurrentReadWriteThreads(
        T graph, int numReadersPerNode, int leftSize, int rightSize, double edgeProbability, Random random) {
    int maxWaitingTimeForThreads = 20; // in milliseconds
    int numLeftReaders = leftSize * numReadersPerNode;
    int numRightReaders = rightSize * numReadersPerNode;
    int totalNumReaders = numLeftReaders + numRightReaders;
    CountDownLatch readersDoneLatch = new CountDownLatch(totalNumReaders);
    // First, construct a random set of edges to insert in the graph
    Set<Pair<Long, Long>> edges = Sets
            .newHashSetWithExpectedSize((int) (leftSize * rightSize * edgeProbability));
    List<BipartiteGraphReader> leftReaders = Lists.newArrayListWithCapacity(numLeftReaders);
    List<BipartiteGraphReader> rightReaders = Lists.newArrayListWithCapacity(numRightReaders);
    Long2ObjectMap<LongSet> leftSideGraph = new Long2ObjectOpenHashMap<LongSet>(leftSize);
    Long2ObjectMap<LongSet> rightSideGraph = new Long2ObjectOpenHashMap<LongSet>(leftSize);
    int averageLeftDegree = (int) (rightSize * edgeProbability);
    for (int i = 0; i < leftSize; i++) {
        LongSet nodeEdges = new LongOpenHashSet(averageLeftDegree);
        for (int j = 0; j < rightSize; j++) {
            if (random.nextDouble() < edgeProbability) {
                nodeEdges.add(j);
                if (!rightSideGraph.containsKey(j)) {
                    rightSideGraph.put(j, new LongOpenHashSet(new long[] { i }));
                } else {
                    rightSideGraph.get(j).add(i);
                }
                edges.add(Pair.of((long) i, (long) j));
            }
        }
        leftSideGraph.put(i, nodeEdges);
    }

    // Create a bunch of leftReaders per node that'll read from the graph at random
    for (int i = 0; i < leftSize; i++) {
        for (int j = 0; j < numReadersPerNode; j++) {
            leftReaders.add(new BipartiteGraphReader(graph, new CountDownLatch(0), readersDoneLatch, i, true,
                    random.nextInt(maxWaitingTimeForThreads)));
        }
    }

    // Create a bunch of rightReaders per node that'll read from the graph at random
    for (int i = 0; i < rightSize; i++) {
        for (int j = 0; j < numReadersPerNode; j++) {
            rightReaders.add(new BipartiteGraphReader(graph, new CountDownLatch(0), readersDoneLatch, i, false,
                    random.nextInt(maxWaitingTimeForThreads)));
        }
    }

    // Create a single writer that will insert these edges in random order
    List<WriterInfo> writerInfo = Lists.newArrayListWithCapacity(edges.size());
    List<Pair<Long, Long>> edgesList = Lists.newArrayList(edges);
    Collections.shuffle(edgesList);
    CountDownLatch writerDoneLatch = new CountDownLatch(edgesList.size());
    for (Pair<Long, Long> edge : edgesList) {
        writerInfo.add(new WriterInfo(edge.getLeft(), edge.getRight(), new CountDownLatch(0), writerDoneLatch));
    }

    ExecutorService executor = Executors.newFixedThreadPool(totalNumReaders + 1); // single writer
    List<Callable<Integer>> allThreads = Lists.newArrayListWithCapacity(totalNumReaders + 1);
    // First, we add the writer
    allThreads.add(Executors.callable(new BipartiteGraphWriter(graph, writerInfo), 1));
    // then the readers
    for (int i = 0; i < numLeftReaders; i++) {
        allThreads.add(Executors.callable(leftReaders.get(i), 1));
    }
    for (int i = 0; i < numRightReaders; i++) {
        allThreads.add(Executors.callable(rightReaders.get(i), 1));
    }
    // these will execute in some non-deterministic order
    Collections.shuffle(allThreads, random);

    // Wait for all the processes to finish
    try {
        List<Future<Integer>> results = executor.invokeAll(allThreads, 10, TimeUnit.SECONDS);
        for (Future<Integer> result : results) {
            assertTrue(result.isDone());
            assertEquals(1, result.get().intValue());
        }
    } catch (InterruptedException e) {
        throw new RuntimeException("Execution for a thread was interrupted: ", e);
    } catch (ExecutionException e) {
        throw new RuntimeException("Execution issue in an executor thread: ", e);
    }

    // confirm that these worked as expected
    try {
        readersDoneLatch.await();
        writerDoneLatch.await();
    } catch (InterruptedException e) {
        throw new RuntimeException("Execution for a latch was interrupted: ", e);
    }

    // Check that all readers' read info is consistent with the graph
    // first check the left side
    for (int i = 0; i < numLeftReaders; i++) {
        LongSet expectedLeftEdges = leftSideGraph.get(leftReaders.get(i).queryNode);
        assertTrue(leftReaders.get(i).getQueryNodeDegree() <= expectedLeftEdges.size());
        if (leftReaders.get(i).getQueryNodeDegree() == 0) {
            assertNull(leftReaders.get(i).getQueryNodeEdges());
        } else {
            for (long edge : leftReaders.get(i).getQueryNodeEdges()) {
                assertTrue(expectedLeftEdges.contains(edge));
            }
        }
    }

    // then the right side
    for (int i = 0; i < numRightReaders; i++) {
        LongSet expectedRightEdges = rightSideGraph.get(rightReaders.get(i).queryNode);
        assertTrue(rightReaders.get(i).getQueryNodeDegree() <= expectedRightEdges.size());
        if (rightReaders.get(i).getQueryNodeDegree() == 0) {
            assertNull(rightReaders.get(i).getQueryNodeEdges());
        } else {
            for (long edge : rightReaders.get(i).getQueryNodeEdges()) {
                assertTrue(expectedRightEdges.contains(edge));
            }
        }
    }
}

From source file:net.minecraftforge.fml.relauncher.libraries.LibraryManager.java

private static void cleanDirectory(File dir, ModList modlist, File... modDirs) {
    if (!dir.exists())
        return;//w  ww  .  j  ava 2 s . co m

    FMLLog.log.debug("Cleaning up mods folder: {}", dir);
    for (File file : dir.listFiles(f -> f.isFile() && f.getName().endsWith(".jar"))) {
        Pair<Artifact, byte[]> ret = extractPacked(file, modlist, modDirs);
        if (ret != null) {
            Artifact artifact = ret.getLeft();
            Repository repo = modlist.getRepository() == null ? libraries_dir : modlist.getRepository();
            File moved = repo.archive(artifact, file, ret.getRight());
            processed.add(moved);
        }
    }

    try {
        if (modlist.changed())
            modlist.save();
    } catch (IOException e) {
        FMLLog.log.error(
                FMLLog.log.getMessageFactory().newMessage("Error updating modlist file {}", modlist.getName()),
                e);
    }
}

From source file:com.vmware.identity.openidconnect.server.LoginTest.java

private static void assertErrorResponseUsingPersonUserCert(String certHeader, Object certAttribute,
        Cookie cookie, String expectedError) throws Exception {
    Pair<ModelAndView, MockHttpServletResponse> result = doRequestUsingPersonUserCert(certHeader, certAttribute,
            cookie);/*from w w  w .  ja  v  a2s  . c  om*/
    ModelAndView modelView = result.getLeft();
    MockHttpServletResponse response = result.getRight();

    Assert.assertNull("modelView", modelView);
    Assert.assertNull("sessionCookie", response.getCookie(SESSION_COOKIE_NAME));
    Assert.assertEquals("status", 401, response.getStatus());
    Assert.assertNotNull("errorResponseHeader", response.getHeader("CastleError"));
    Assert.assertEquals("errorMessage", expectedError, response.getErrorMessage());
}

From source file:com.vmware.identity.openidconnect.server.LoginTest.java

private static void assertSuccessResponseUsingPersonUserCert(String certHeader, Object certAttribute)
        throws Exception {
    Pair<ModelAndView, MockHttpServletResponse> result = doRequestUsingPersonUserCert(certHeader, certAttribute,
            (Cookie) null);/* www  .  j a v  a2 s. c  om*/
    ModelAndView modelView = result.getLeft();
    MockHttpServletResponse response = result.getRight();

    Assert.assertNull("modelView", modelView);
    validateAuthnSuccessResponse(response, Flow.AUTHZ_CODE, Scope.OPENID, false /* redirectResponseMode */,
            true /* ajaxRequest */, STATE, NONCE);
}

From source file:com.vmware.identity.openidconnect.server.LoginTest.java

private static void assertSuccessResponse(String loginString, String authzHeader) throws Exception {
    Pair<ModelAndView, MockHttpServletResponse> result = doRequest(loginString, authzHeader,
            null /* sessionCookie */, idmClient());
    ModelAndView modelView = result.getLeft();
    MockHttpServletResponse response = result.getRight();
    Assert.assertNull("modelView", modelView);
    validateAuthnSuccessResponse(response, Flow.AUTHZ_CODE, Scope.OPENID, false, true, STATE, NONCE);
}