Example usage for com.google.common.primitives Longs toArray

List of usage examples for com.google.common.primitives Longs toArray

Introduction

In this page you can find the example usage for com.google.common.primitives Longs toArray.

Prototype

public static long[] toArray(Collection<? extends Number> collection) 

Source Link

Document

Returns an array containing each value of collection , converted to a long value in the manner of Number#longValue .

Usage

From source file:org.sosy_lab.solver.z3.Z3UnsafeFormulaManager.java

@Override
public Long replaceArgs(Long t, List<Long> newArgs) {
    Preconditions.checkState(get_app_num_args(z3context, t) == newArgs.size());
    long[] newParams = Longs.toArray(newArgs);
    // TODO check equality of sort of each oldArg and newArg
    long funcDecl = get_app_decl(z3context, t);
    return mk_app(z3context, funcDecl, newParams);
}

From source file:org.apache.tephra.util.TxUtils.java

/**
 * Creates a "dummy" transaction based on the given txVisibilityState's state.  This is not a "real" transaction in
 * the sense that it has not been started, data should not be written with it, and it cannot be committed.  However,
 * this can still be useful for filtering data according to the txVisibilityState's state.  Instead of the actual
 * write pointer from the txVisibilityState, however, we use {@code Long.MAX_VALUE} to avoid mis-identifying any cells
 * as being written by this transaction (and therefore visible).
 *///from w ww .j a  v a  2s.  c o m
public static Transaction createDummyTransaction(TransactionVisibilityState txVisibilityState) {
    return new Transaction(txVisibilityState.getReadPointer(), Long.MAX_VALUE,
            Longs.toArray(txVisibilityState.getInvalid()),
            Longs.toArray(txVisibilityState.getInProgress().keySet()),
            TxUtils.getFirstShortInProgress(txVisibilityState.getInProgress()), TransactionType.SHORT);
}

From source file:com.linkedin.drelephant.tez.heuristics.GenericDataSkewHeuristic.java

public HeuristicResult apply(TezApplicationData data) {

    if (!data.getSucceeded()) {
        return null;
    }//from  w  w w  .  j av a2 s  .  co  m

    TezTaskData[] tasks = getTasks(data);

    //Gathering data for checking time skew
    List<Long> timeTaken = new ArrayList<Long>();

    for (int i = 0; i < tasks.length; i++) {
        if (tasks[i].isSampled()) {
            timeTaken.add(tasks[i].getTotalRunTimeMs());
        }
    }

    long[][] groupsTime = Statistics.findTwoGroups(Longs.toArray(timeTaken));

    long timeAvg1 = Statistics.average(groupsTime[0]);
    long timeAvg2 = Statistics.average(groupsTime[1]);

    //seconds are used for calculating deviation as they provide a better idea than millisecond.
    long timeAvgSec1 = TimeUnit.MILLISECONDS.toSeconds(timeAvg1);
    long timeAvgSec2 = TimeUnit.MILLISECONDS.toSeconds(timeAvg2);

    long minTime = Math.min(timeAvgSec1, timeAvgSec2);
    long diffTime = Math.abs(timeAvgSec1 - timeAvgSec2);

    //using the same deviation limits for time skew as for data skew. It can be changed in the fututre.
    Severity severityTime = getDeviationSeverity(minTime, diffTime);

    //This reduces severity if number of tasks is insignificant
    severityTime = Severity.min(severityTime, Severity.getSeverityAscending(groupsTime[0].length,
            numTasksLimits[0], numTasksLimits[1], numTasksLimits[2], numTasksLimits[3]));

    //Gather data
    List<Long> inputSizes = new ArrayList<Long>();

    for (int i = 0; i < tasks.length; i++) {
        if (tasks[i].isSampled()) {

            long inputByte = 0;
            for (TezCounterData.CounterName counterName : _counterNames) {
                inputByte += tasks[i].getCounters().get(counterName);
            }

            inputSizes.add(inputByte);
        }
    }

    long[][] groups = Statistics.findTwoGroups(Longs.toArray(inputSizes));

    long avg1 = Statistics.average(groups[0]);
    long avg2 = Statistics.average(groups[1]);

    long min = Math.min(avg1, avg2);
    long diff = Math.abs(avg2 - avg1);

    Severity severityData = getDeviationSeverity(min, diff);

    //This reduces severity if the largest file sizes are insignificant
    severityData = Severity.min(severityData, getFilesSeverity(avg2));

    //This reduces severity if number of tasks is insignificant
    severityData = Severity.min(severityData, Severity.getSeverityAscending(groups[0].length, numTasksLimits[0],
            numTasksLimits[1], numTasksLimits[2], numTasksLimits[3]));

    Severity severity = Severity.max(severityData, severityTime);

    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
            _heuristicConfData.getHeuristicName(), severity,
            Utils.getHeuristicScore(severityData, tasks.length));

    result.addResultDetail("Data skew (Number of tasks)", Integer.toString(tasks.length));
    result.addResultDetail("Data skew (Group A)",
            groups[0].length + " tasks @ " + FileUtils.byteCountToDisplaySize(avg1) + " avg");
    result.addResultDetail("Data skew (Group B)",
            groups[1].length + " tasks @ " + FileUtils.byteCountToDisplaySize(avg2) + " avg");

    result.addResultDetail("Time skew (Number of tasks)", Integer.toString(tasks.length));
    result.addResultDetail("Time skew (Group A)",
            groupsTime[0].length + " tasks @ " + convertTimeMs(timeAvg1) + " avg");
    result.addResultDetail("Time skew (Group B)",
            groupsTime[1].length + " tasks @ " + convertTimeMs(timeAvg2) + " avg");

    return result;
}

From source file:com.linkedin.drelephant.mapreduce.heuristics.GenericDataSkewHeuristic.java

@Override
public HeuristicResult apply(MapReduceApplicationData data) {

    if (!data.getSucceeded()) {
        return null;
    }// w w  w.j  av a2  s .  co  m

    MapReduceTaskData[] tasks = getTasks(data);

    //Gather data
    List<Long> inputBytes = new ArrayList<Long>();

    for (int i = 0; i < tasks.length; i++) {
        if (tasks[i].isSampled()) {
            inputBytes.add(tasks[i].getCounters().get(_counterName));
        }
    }

    // Ratio of total tasks / sampled tasks
    double scale = ((double) tasks.length) / inputBytes.size();
    //Analyze data. TODO: This is a temp fix. findTwogroups should support list as input
    long[][] groups = Statistics.findTwoGroups(Longs.toArray(inputBytes));

    long avg1 = Statistics.average(groups[0]);
    long avg2 = Statistics.average(groups[1]);

    long min = Math.min(avg1, avg2);
    long diff = Math.abs(avg2 - avg1);

    Severity severity = getDeviationSeverity(min, diff);

    //This reduces severity if the largest file sizes are insignificant
    severity = Severity.min(severity, getFilesSeverity(avg2));

    //This reduces severity if number of tasks is insignificant
    severity = Severity.min(severity, Severity.getSeverityAscending(groups[0].length, numTasksLimits[0],
            numTasksLimits[1], numTasksLimits[2], numTasksLimits[3]));

    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
            _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));

    result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
    result.addResultDetail("Group A",
            groups[0].length + " tasks @ " + FileUtils.byteCountToDisplaySize(avg1) + " avg");
    result.addResultDetail("Group B",
            groups[1].length + " tasks @ " + FileUtils.byteCountToDisplaySize(avg2) + " avg");

    return result;
}

From source file:com.linkedin.drelephant.mapreduce.heuristics.GenericSkewHeuristic.java

@Override
public HeuristicResult apply(MapReduceApplicationData data) {

    if (!data.getSucceeded()) {
        return null;
    }//www  . jav  a  2 s.  com

    MapReduceTaskData[] tasks = getTasks(data);

    //Gathering data for checking time skew
    List<Long> timeTaken = new ArrayList<Long>();

    for (int i = 0; i < tasks.length; i++) {
        if (tasks[i].isTimeDataPresent()) {
            timeTaken.add(tasks[i].getTotalRunTimeMs());
        }
    }

    long[][] groupsTime = Statistics.findTwoGroups(Longs.toArray(timeTaken));

    long timeAvg1 = Statistics.average(groupsTime[0]);
    long timeAvg2 = Statistics.average(groupsTime[1]);

    //seconds are used for calculating deviation as they provide a better idea than millisecond.
    long timeAvgSec1 = TimeUnit.MILLISECONDS.toSeconds(timeAvg1);
    long timeAvgSec2 = TimeUnit.MILLISECONDS.toSeconds(timeAvg2);

    long minTime = Math.min(timeAvgSec1, timeAvgSec2);
    long diffTime = Math.abs(timeAvgSec1 - timeAvgSec2);

    //using the same deviation limits for time skew as for data skew. It can be changed in the fututre.
    Severity severityTime = getDeviationSeverity(minTime, diffTime);

    //This reduces severity if number of tasks is insignificant
    severityTime = Severity.min(severityTime, Severity.getSeverityAscending(groupsTime[0].length,
            numTasksLimits[0], numTasksLimits[1], numTasksLimits[2], numTasksLimits[3]));

    //Gather data
    List<Long> inputBytes = new ArrayList<Long>();

    for (int i = 0; i < tasks.length; i++) {
        if (tasks[i].isCounterDataPresent()) {
            long inputByte = 0;
            for (MapReduceCounterData.CounterName counterName : _counterNames) {
                inputByte += tasks[i].getCounters().get(counterName);
            }
            inputBytes.add(inputByte);
        }
    }

    // Ratio of total tasks / sampled tasks
    double scale = ((double) tasks.length) / inputBytes.size();
    //Analyze data. TODO: This is a temp fix. findTwogroups should support list as input
    long[][] groups = Statistics.findTwoGroups(Longs.toArray(inputBytes));

    long avg1 = Statistics.average(groups[0]);
    long avg2 = Statistics.average(groups[1]);

    long min = Math.min(avg1, avg2);
    long diff = Math.abs(avg2 - avg1);

    Severity severityData = getDeviationSeverity(min, diff);

    //This reduces severity if the largest file sizes are insignificant
    severityData = Severity.min(severityData, getFilesSeverity(avg2));

    //This reduces severity if number of tasks is insignificant
    severityData = Severity.min(severityData, Severity.getSeverityAscending(groups[0].length, numTasksLimits[0],
            numTasksLimits[1], numTasksLimits[2], numTasksLimits[3]));

    Severity severity = Severity.max(severityData, severityTime);

    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
            _heuristicConfData.getHeuristicName(), severity,
            Utils.getHeuristicScore(severityData, tasks.length));

    result.addResultDetail("Data skew (Number of tasks)", Integer.toString(tasks.length));
    result.addResultDetail("Data skew (Group A)",
            groups[0].length + " tasks @ " + FileUtils.byteCountToDisplaySize(avg1) + " avg");
    result.addResultDetail("Data skew (Group B)",
            groups[1].length + " tasks @ " + FileUtils.byteCountToDisplaySize(avg2) + " avg");

    result.addResultDetail("Time skew (Number of tasks)", Integer.toString(tasks.length));
    result.addResultDetail("Time skew (Group A)",
            groupsTime[0].length + " tasks @ " + convertTimeMs(timeAvg1) + " avg");
    result.addResultDetail("Time skew (Group B)",
            groupsTime[1].length + " tasks @ " + convertTimeMs(timeAvg2) + " avg");

    return result;
}

From source file:org.sosy_lab.solver.z3.Z3InterpolatingProver.java

@Override
public List<BooleanFormula> getTreeInterpolants(List<Set<Long>> partitionedFormulas, int[] startOfSubTree) {

    final long[] conjunctionFormulas = new long[partitionedFormulas.size()];

    // build conjunction of each partition
    for (int i = 0; i < partitionedFormulas.size(); i++) {
        Preconditions.checkState(!partitionedFormulas.get(i).isEmpty());
        long conjunction = mk_and(z3context, Longs.toArray(partitionedFormulas.get(i)));
        inc_ref(z3context, conjunction);
        conjunctionFormulas[i] = conjunction;
    }//  w w  w . ja  va  2s . com

    // build tree of interpolation-points
    final long[] interpolationFormulas = new long[partitionedFormulas.size()];
    final Deque<Pair<Integer, Long>> stack = new ArrayDeque<>(); // contains <subtree,interpolationPoint>

    int lastSubtree = -1; // subtree starts with 0. With -1<0 we start a new subtree.
    for (int i = 0; i < startOfSubTree.length; i++) {
        final int currentSubtree = startOfSubTree[i];
        final long conjunction;
        if (currentSubtree > lastSubtree) {
            // start of a new subtree -> first element has no children
            conjunction = conjunctionFormulas[i];

        } else { // if (currentSubtree <= lastSubtree) {
            // merge-point in tree, several children at a node -> pop from stack and conjunct
            final List<Long> children = new ArrayList<>();
            while (!stack.isEmpty() && currentSubtree <= stack.peekLast().getFirstNotNull()) {
                // adding at front is important for tree-structure!
                children.add(0, stack.pollLast().getSecond());
            }
            children.add(conjunctionFormulas[i]); // add the node itself
            conjunction = mk_and(z3context, Longs.toArray(children));
        }

        final long interpolationPoint;
        if (i == startOfSubTree.length - 1) {
            // the last node in the tree (=root) does not need the interpolation-point-flag
            interpolationPoint = conjunction;
            Preconditions.checkState(currentSubtree == 0, "subtree of root should start at 0.");
            Preconditions.checkState(stack.isEmpty(), "root should be the last element in the stack.");
        } else {
            interpolationPoint = mk_interpolant(z3context, conjunction);
        }

        inc_ref(z3context, interpolationPoint);
        interpolationFormulas[i] = interpolationPoint;
        stack.addLast(Pair.of(currentSubtree, interpolationPoint));
        lastSubtree = currentSubtree;
    }

    Preconditions.checkState(stack.peekLast().getFirst() == 0, "subtree of root should start at 0.");
    long root = stack.pollLast().getSecond();
    Preconditions.checkState(stack.isEmpty(), "root should have been the last element in the stack.");

    final PointerToLong model = new PointerToLong();
    final PointerToLong interpolant = new PointerToLong();
    int isSat = compute_interpolant(z3context, root, // last element is end of chain (root of tree)
            0, interpolant, model);

    Preconditions.checkState(isSat == Z3_LBOOL.Z3_L_FALSE.status,
            "interpolation not possible, because SAT-check returned status '%s'", isSat);

    // n partitions -> n-1 interpolants
    // the given tree interpolants are sorted in post-order,
    // so we only need to copy them
    final List<BooleanFormula> result = new ArrayList<>();
    for (int i = 0; i < partitionedFormulas.size() - 1; i++) {
        result.add(mgr.encapsulateBooleanFormula(ast_vector_get(z3context, interpolant.value, i)));
    }

    // cleanup
    for (long partition : conjunctionFormulas) {
        dec_ref(z3context, partition);
    }
    for (long partition : interpolationFormulas) {
        dec_ref(z3context, partition);
    }

    return result;
}

From source file:org.sosy_lab.cpachecker.util.predicates.z3.Z3InterpolatingProver.java

@Override
public List<BooleanFormula> getSeqInterpolants(List<Set<Long>> partitionedFormulas) {

    final long[] interpolationFormulas = new long[partitionedFormulas.size()];

    for (int i = 0; i < interpolationFormulas.length; i++) {
        long partition = mk_and(z3context, Longs.toArray(partitionedFormulas.get(i)));
        inc_ref(z3context, partition);//from www  .  j  a  v  a 2  s  .co  m
        interpolationFormulas[i] = partition;
    }

    // n groups -> n-1 interpolants
    final long[] itps = new long[interpolationFormulas.length - 1];
    Arrays.fill(itps, 1); // initialize with value != 0

    PointerToLong labels = new PointerToLong();
    PointerToLong model = new PointerToLong();

    // next lines are not needed due to a direct implementation in the C-code.
    //    long options = mk_params(z3context);
    //    inc_ref(z3context, options);
    //    int[] parents = new int[0]; // this line is not working
    long[] theory = new long[0]; // do we need a theory?

    smtLogger.logSeqInterpolation(interpolationFormulas);

    // get interpolant of groups
    int isSat = interpolateSeq(z3context, interpolationFormulas, itps, model, labels, 0, theory);

    assert isSat != Z3_LBOOL.Z3_L_TRUE.status;

    final List<BooleanFormula> result = new ArrayList<>();
    for (long itp : itps) {
        result.add(mgr.encapsulateBooleanFormula(itp));
    }

    // cleanup
    for (long partition : interpolationFormulas) {
        dec_ref(z3context, partition);
    }

    return result;
}

From source file:com.facebook.buck.core.build.distributed.synchronization.impl.RemoteBuildRuleSynchronizer.java

private long[] genBackOffsMillis(long cacheSyncFirstBackoffMillis, long cacheSyncMaxTotalBackoffMillis) {
    // Backoffs disabled.
    if (cacheSyncFirstBackoffMillis == 0) {
        return new long[0];
    }/*from w  ww  . j  av a2  s  . com*/

    ArrayList<Long> backOffs = new ArrayList<>();

    // Gen linear backoffs millis.
    long sumBackoffs = 0;
    long currentBackoff = cacheSyncFirstBackoffMillis;
    while (sumBackoffs + currentBackoff <= cacheSyncMaxTotalBackoffMillis) {
        backOffs.add(currentBackoff);
        sumBackoffs += currentBackoff;

        currentBackoff += cacheSyncFirstBackoffMillis;
    }
    if (sumBackoffs < cacheSyncMaxTotalBackoffMillis) {
        backOffs.add(cacheSyncMaxTotalBackoffMillis - sumBackoffs);
    }

    return Longs.toArray(backOffs);
}

From source file:org.sosy_lab.java_smt.solvers.z3.Z3InterpolatingProver.java

@Override
public List<BooleanFormula> getTreeInterpolants(List<Set<Long>> partitionedFormulas, int[] startOfSubTree)
        throws InterruptedException, SolverException {
    Preconditions.checkState(!closed);/*from  w w w .  j  ava 2  s  .c om*/
    final long[] conjunctionFormulas = new long[partitionedFormulas.size()];

    // build conjunction of each partition
    for (int i = 0; i < partitionedFormulas.size(); i++) {
        long conjunction = Native.mkAnd(z3context, partitionedFormulas.get(i).size(),
                Longs.toArray(partitionedFormulas.get(i)));
        Native.incRef(z3context, conjunction);
        conjunctionFormulas[i] = conjunction;
    }

    // build tree of interpolation-points
    final long[] interpolationFormulas = new long[partitionedFormulas.size()];
    final Deque<Z3TreeInterpolant> stack = new ArrayDeque<>();

    int lastSubtree = -1; // subtree starts with 0. With -1<0 we start a new subtree.
    for (int i = 0; i < startOfSubTree.length; i++) {
        final int currentSubtree = startOfSubTree[i];
        final long conjunction;
        if (currentSubtree > lastSubtree) {
            // start of a new subtree -> first element has no children
            conjunction = conjunctionFormulas[i];

        } else { // if (currentSubtree <= lastSubtree) {
            // merge-point in tree, several children at a node -> pop from stack and conjunct
            final List<Long> children = new ArrayList<>();
            while (!stack.isEmpty() && currentSubtree <= stack.peek().getRootOfTree()) {
                // adding at front is important for tree-structure!
                children.add(0, stack.pop().getInterpolationPoint());
            }
            children.add(conjunctionFormulas[i]); // add the node itself
            conjunction = Native.mkAnd(z3context, children.size(), Longs.toArray(children));
        }

        final long interpolationPoint;
        if (i == startOfSubTree.length - 1) {
            // the last node in the tree (=root) does not need the interpolation-point-flag
            interpolationPoint = conjunction;
            Preconditions.checkState(currentSubtree == 0, "subtree of root should start at 0.");
            Preconditions.checkState(stack.isEmpty(), "root should be the last element in the stack.");
        } else {
            interpolationPoint = Native.mkInterpolant(z3context, conjunction);
        }

        Native.incRef(z3context, interpolationPoint);
        interpolationFormulas[i] = interpolationPoint;
        stack.push(new Z3TreeInterpolant(currentSubtree, interpolationPoint));
        lastSubtree = currentSubtree;
    }

    Preconditions.checkState(stack.peek().getRootOfTree() == 0, "subtree of root should start at 0.");
    long root = stack.pop().getInterpolationPoint();
    Preconditions.checkState(stack.isEmpty(), "root should have been the last element in the stack.");

    final long proof = Native.solverGetProof(z3context, z3solver);
    Native.incRef(z3context, proof);

    long interpolationResult;
    try {
        interpolationResult = Native.getInterpolant(z3context, proof, //refutation of premises := proof
                root, // last element is end of chain (root of tree), pattern := interpolation tree
                Native.mkParams(z3context));
    } catch (Z3Exception e) {
        if (dumpFailedInterpolationQueries != null && !creator.shutdownNotifier.shouldShutdown()) {
            try (Writer dumpFile = MoreFiles.openOutputFile(dumpFailedInterpolationQueries.getFreshPath(),
                    StandardCharsets.UTF_8)) {
                dumpFile.write(Native.solverToString(z3context, z3solver));
                dumpFile.write("\n(compute-interpolant ");
                dumpFile.write(Native.astToString(z3context, root));
                dumpFile.write(")\n");
            } catch (IOException e2) {
                logger.logUserException(Level.WARNING, e2, "Could not dump failed interpolation query to file");
            }
        }
        if ("theory not supported by interpolation or bad proof".equals(e.getMessage())) {
            throw new SolverException(e.getMessage(), e);
        }
        throw creator.handleZ3Exception(e);
    }

    // n partitions -> n-1 interpolants
    // the given tree interpolants are sorted in post-order,
    // so we only need to copy them
    final List<BooleanFormula> result = new ArrayList<>();
    for (int i = 0; i < partitionedFormulas.size() - 1; i++) {
        result.add(creator.encapsulateBoolean(Native.astVectorGet(z3context, interpolationResult, i)));
    }

    // cleanup
    Native.decRef(z3context, proof);
    for (long partition : conjunctionFormulas) {
        Native.decRef(z3context, partition);
    }
    for (long partition : interpolationFormulas) {
        Native.decRef(z3context, partition);
    }

    checkInterpolantsForUnboundVariables(result); // Do this last after cleanup.

    return result;
}

From source file:com.microsoft.applicationinsights.internal.channel.common.SenderThreadsBackOffManager.java

/**
 * Initialize the 'backOffTimeoutsInSeconds' container, which should be done only once.
 * @param container The container that supplies the back-off timeouts in seconds.
 *                  Note that if the container returns null, an exception (NullPointerException) will be thrown.
 *//*ww  w .j  a  v a 2  s . c  om*/
private synchronized void initializeBackOffTimeouts(BackOffTimesPolicy container) {
    if (backOffTimeoutsInMilliseconds != null) {
        return;
    }

    if (container == null) {
        backOffTimeoutsInMilliseconds = new ExponentialBackOffTimesPolicy().getBackOffTimeoutsInMillis();
        InternalLogger.INSTANCE.trace("No BackOffTimesContainer, using default values.");
        return;
    }

    long[] injectedBackOffTimeoutsInSeconds = container.getBackOffTimeoutsInMillis();
    ArrayList<Long> validBackOffTimeoutsInSeconds = new ArrayList<Long>();
    if (injectedBackOffTimeoutsInSeconds != null) {
        for (long backOffValue : injectedBackOffTimeoutsInSeconds) {
            if (backOffValue <= 0) {
                continue;
            }

            validBackOffTimeoutsInSeconds.add(backOffValue);
        }
    }

    if (validBackOffTimeoutsInSeconds.isEmpty()) {
        backOffTimeoutsInMilliseconds = new ExponentialBackOffTimesPolicy().getBackOffTimeoutsInMillis();
        InternalLogger.INSTANCE.trace("BackOff timeouts are not supplied or not valid, using default values.");
        return;
    }

    backOffTimeoutsInMilliseconds = Longs.toArray(validBackOffTimeoutsInSeconds);
}