Example usage for java.util ArrayList subList

List of usage examples for java.util ArrayList subList

Introduction

In this page you can find the example usage for java.util ArrayList subList.

Prototype

public List<E> subList(int fromIndex, int toIndex) 

Source Link

Document

Returns a view of the portion of this list between the specified fromIndex , inclusive, and toIndex , exclusive.

Usage

From source file:hms.hwestra.interactionrebuttal.InteractionRebuttal.java

public void iterativelyBuildProxiesAndCorrelate(String expressionfile, String gte, String outputdir,
        String probefile, int permutations, String proxy, Integer numProbesMax, boolean justcorrelate)
        throws IOException {

    Set<String> samplesToUse = null;
    if (gte != null) {
        TextFile tfq = new TextFile(gte, TextFile.R);
        samplesToUse = tfq.readAsSet(1, TextFile.tab);
        tfq.close();/*from  www  .  j  a  v a  2  s.  co m*/
    }
    DoubleMatrixDataset<String, String> rawExpressionDataset = new DoubleMatrixDataset<String, String>(
            expressionfile, null, samplesToUse);
    int numTotalIter = 0;

    ArrayList<String> probes = null;
    if (probefile != null) {
        TextFile tf = new TextFile(probefile, TextFile.R);
        probes = tf.readAsArrayList();
        tf.close();
        numProbesMax = probes.size();
    } else {
        System.out.println("Selecting random probes");
        List<String> rows = rawExpressionDataset.rowObjects;
        probes = new ArrayList<String>(rows);
    }

    outputdir = Gpio.formatAsDirectory(outputdir);
    Gpio.createDir(outputdir);
    int iter = 5;
    System.out.println(probes.size() + " probes availables");

    int remainder = numProbesMax % iter;
    int numProbesMaxIter = numProbesMax + (iter - remainder);

    if (!justcorrelate) {

        for (int num = 0; num < numProbesMaxIter + 1; num += iter) {
            int probesToSelect = num;
            if (num == 0) {
                probesToSelect = 1;
            }
            if (num > numProbesMax) {
                probesToSelect = numProbesMax;
            }
            System.out.println("Selecting: " + probesToSelect + " probes");
            for (int permutation = 0; permutation < permutations; permutation++) {
                Collections.shuffle(probes);
                List<String> subsample = probes.subList(0, probesToSelect);

                // create output dir
                String outputdirPerm = outputdir + probesToSelect + "-Probes/Permutation-" + permutation + "/";
                outputdirPerm = Gpio.formatAsDirectory(outputdirPerm);
                Gpio.createDir(outputdirPerm);
                String subset = outputdirPerm + "probes.txt";
                TextFile probeout = new TextFile(subset, TextFile.W);
                probeout.writeList(subsample);
                probeout.close();

                // run normalizer
                prepareDataForCelltypeSpecificEQTLMapping(rawExpressionDataset, expressionfile, outputdirPerm,
                        Double.NaN, subset, null, null, null, 4);
                // remove superfluous files
                // correlate with cell count
            }
            numTotalIter++;
        }
    }

    DoubleMatrixDataset<String, String> ds = new DoubleMatrixDataset<String, String>(proxy); // samples on the rows...
    ds.transposeDataset(); // samples on the columns
    for (int row = 0; row < ds.nrRows; row++) {
        String pheno = ds.rowObjects.get(row);

        double[] x = ds.rawData[row];
        System.out.println("x length: " + x.length);

        TextFile statsout = new TextFile(outputdir + pheno + ".txt", TextFile.W);
        statsout.writeln("Num\tMeanPearson\tsdPearson\tMeanSpearman\tsdSpearman");
        SpearmansCorrelation sp = new SpearmansCorrelation();
        for (int num = 0; num < numProbesMaxIter + 1; num += iter) {
            int probesToSelect = num;
            if (num == 0) {
                probesToSelect = 1;
            }
            if (num > numProbesMax) {
                probesToSelect = numProbesMax;
            }
            double[] allCorrelations = new double[permutations];
            double[] allCorrelationSpearman = new double[permutations];
            for (int permutation = 0; permutation < permutations; permutation++) {
                String inputdirPerm = outputdir + probesToSelect + "-Probes/Permutation-" + permutation
                        + "/CellTypeProxyFile.txt";
                DoubleMatrixDataset<String, String> ds2 = new DoubleMatrixDataset<String, String>(inputdirPerm);
                ds2.transposeDataset(); // samples on the column
                double[] y = new double[x.length];
                System.out.println("y: " + y.length);
                double[] ytmp = ds2.rawData[0];
                //                    if (ytmp.length != x.length) {
                //                        System.err.println("Error: " + y.length);
                //                        System.exit(-1);
                //                    } else {
                for (int col = 0; col < ds.nrCols; col++) {
                    int otherCol = ds2.hashCols.get(ds.colObjects.get(col));
                    y[col] = ytmp[otherCol];
                }
                double corr = JSci.maths.ArrayMath.correlation(x, y);
                System.out.println(num + "\t" + permutation + "\t" + corr);
                double spearman = sp.correlation(x, y);
                allCorrelations[permutation] = corr;
                allCorrelationSpearman[permutation] = spearman;
                //                    }
            }
            // doe
            double meanP = JSci.maths.ArrayMath.mean(allCorrelations);
            double sdP = JSci.maths.ArrayMath.standardDeviation(allCorrelations);
            double meanSP = JSci.maths.ArrayMath.mean(allCorrelationSpearman);
            double sdSP = JSci.maths.ArrayMath.standardDeviation(allCorrelationSpearman);
            statsout.writeln(num + "\t" + meanP + "\t" + sdP + "\t" + meanSP + "\t" + sdSP);
        }

        statsout.close();

    }

}

From source file:com.android.contacts.ContactSaveService.java

/**
 * Splits "diff" into subsets based on "MAX_CONTACTS_PROVIDER_BATCH_SIZE", applies each of the
 * subsets, adds the returned array to "results".
 *
 * @return the size of the array, if not null; -1 when the array is null.
 *///  w w w.jav  a  2  s .  co m
private int applyDiffSubset(ArrayList<ContentProviderOperation> diff, int offset,
        ContentProviderResult[] results, ContentResolver resolver)
        throws RemoteException, OperationApplicationException {
    final int subsetCount = Math.min(diff.size() - offset, MAX_CONTACTS_PROVIDER_BATCH_SIZE);
    final ArrayList<ContentProviderOperation> subset = new ArrayList<>();
    subset.addAll(diff.subList(offset, offset + subsetCount));
    final ContentProviderResult[] subsetResult = resolver.applyBatch(ContactsContract.AUTHORITY, subset);
    if (subsetResult == null || (offset + subsetResult.length) > results.length) {
        return -1;
    }
    for (ContentProviderResult c : subsetResult) {
        results[offset++] = c;
    }
    return subsetResult.length;
}

From source file:org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy.java

/**
  * @param candidates pre-filtrate/* ww  w  .  j a v a2  s.c om*/
  * @return filtered subset
  * -- Default minor compaction selection algorithm:
  * choose CompactSelection from candidates --
  * First exclude bulk-load files if indicated in configuration.
  * Start at the oldest file and stop when you find the first file that
  * meets compaction criteria:
  * (1) a recently-flushed, small file (i.e. <= minCompactSize)
  * OR
  * (2) within the compactRatio of sum(newer_files)
  * Given normal skew, any newer files will also meet this criteria
  * <p/>
  * Additional Note:
  * If fileSizes.size() >> maxFilesToCompact, we will recurse on
  * compact().  Consider the oldest files first to avoid a
  * situation where we always compact [end-threshold,end).  Then, the
  * last file becomes an aggregate of the previous compactions.
  *
  * normal skew:
  *
  *         older ----> newer (increasing seqID)
  *     _
  *    | |   _
  *    | |  | |   _
  *  --|-|- |-|- |-|---_-------_-------  minCompactSize
  *    | |  | |  | |  | |  _  | |
  *    | |  | |  | |  | | | | | |
  *    | |  | |  | |  | | | | | |
  */
ArrayList<StoreFile> applyCompactionPolicy(ArrayList<StoreFile> candidates, boolean mayUseOffPeak,
        boolean mayBeStuck) throws IOException {
    if (candidates.isEmpty()) {
        return candidates;
    }

    // we're doing a minor compaction, let's see what files are applicable
    int start = 0;
    double ratio = comConf.getCompactionRatio();
    if (mayUseOffPeak) {
        ratio = comConf.getCompactionRatioOffPeak();
        LOG.info("Running an off-peak compaction, selection ratio = " + ratio);
    }

    // get store file sizes for incremental compacting selection.
    final int countOfFiles = candidates.size();
    long[] fileSizes = new long[countOfFiles];
    long[] sumSize = new long[countOfFiles];
    for (int i = countOfFiles - 1; i >= 0; --i) {
        StoreFile file = candidates.get(i);
        fileSizes[i] = file.getReader().length();
        // calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo
        int tooFar = i + comConf.getMaxFilesToCompact() - 1;
        sumSize[i] = fileSizes[i] + ((i + 1 < countOfFiles) ? sumSize[i + 1] : 0)
                - ((tooFar < countOfFiles) ? fileSizes[tooFar] : 0);
    }

    while (countOfFiles - start >= comConf.getMinFilesToCompact()
            && fileSizes[start] > Math.max(comConf.getMinCompactSize(), (long) (sumSize[start + 1] * ratio))) {
        ++start;
    }
    if (start < countOfFiles) {
        LOG.info("Default compaction algorithm has selected " + (countOfFiles - start) + " files from "
                + countOfFiles + " candidates");
    } else if (mayBeStuck) {
        // We may be stuck. Compact the latest files if we can.
        int filesToLeave = candidates.size() - comConf.getMinFilesToCompact();
        if (filesToLeave >= 0) {
            start = filesToLeave;
        }
    }
    candidates.subList(0, start).clear();
    return candidates;
}

From source file:mondrian.olap.fun.FunUtil.java

/**
 * Partial sort an array by sorting it and returning the first {@code limit}
 * elements. Fastest approach if limit is a significant fraction of the
 * list.//from   w ww .  ja v a2  s  .co  m
 */
public static <T> List<T> stablePartialSortArray(final List<T> list, final Comparator<T> comp, int limit) {
    ArrayList<T> list2 = new ArrayList<T>(list);
    Collections.sort(list2, comp);
    return list2.subList(0, limit);
}

From source file:com.netflix.spinnaker.front50.model.GcsStorageService.java

@Override
public <T extends Timestamped> Collection<T> listObjectVersions(ObjectType objectType, String objectKey,
        int maxResults) throws NotFoundException {
    String path = keyToPath(objectKey, objectType.group);
    ArrayList<T> result = new ArrayList<T>();
    try {//w ww. j a  v a  2  s. c  o  m
        // NOTE: gcs only returns things in forward chronological order
        // so to get maxResults, we need to download everything then
        // take the last maxResults, not .setMaxResults(new Long(maxResults)) here.
        Storage.Objects.List listObjects = obj_api.list(bucketName).setPrefix(path).setVersions(true);
        Objects[] objectsHolder = new Objects[1];
        do {
            Closure timeExecuteClosure = new Closure<String>(this, this) {
                public Object doCall() throws Exception {
                    objectsHolder[0] = timeExecute(listTimer, listObjects);
                    return Closure.DONE;
                }
            };
            doRetry(timeExecuteClosure, "list versions", objectType.group);

            List<StorageObject> items = objectsHolder[0].getItems();
            if (items != null) {
                for (StorageObject item : items) {
                    T have = deserialize(item, (Class<T>) objectType.clazz, false);
                    if (have != null) {
                        have.setLastModified(item.getUpdated().getValue());
                        result.add(have);
                    }
                }
            }
            listObjects.setPageToken(objectsHolder[0].getNextPageToken());
        } while (objectsHolder[0].getNextPageToken() != null);
    } catch (IOException e) {
        log.error("Could not fetch versions from Google Cloud Storage: {}", e.getMessage());
        return new ArrayList<>();
    }

    Comparator<T> comp = (T a, T b) -> {
        // reverse chronological
        return b.getLastModified().compareTo(a.getLastModified());
    };
    Collections.sort(result, comp);
    if (result.size() > maxResults) {
        return result.subList(0, maxResults);
    }
    return result;
}

From source file:com.hygenics.parser.ParseDispatcher.java

private void spl(ArrayList<String> json, boolean split) {
    if (json.size() > 0)
        log.info("Records to Add: " + json.size());

    if (split) {//from w w  w  .  j ava  2  s.  co m

        ForkJoinPool f2 = new ForkJoinPool(
                (Runtime.getRuntime().availableProcessors() + ((int) Math.ceil(procnum * sqlnum))));
        ArrayList<String> l;
        int size = (int) Math.ceil(json.size() / qnum);
        for (int conn = 0; conn < qnum; conn++) {
            l = new ArrayList<String>();
            if (((conn + 1) * size) < json.size()) {
                l.addAll(json.subList((conn * size), ((conn + 1) * size)));

            } else {
                l.addAll(json.subList((conn * size), (json.size() - 1)));
                f2.execute(new SplitPost(template, l));

                break;
            }

            f2.execute(new SplitPost(template, l));
        }

        try {
            f2.awaitTermination(termtime, TimeUnit.MILLISECONDS);
        } catch (InterruptedException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }

        f2.shutdown();

        int incrementor = 0;

        while (f2.isShutdown() == false && f2.getActiveThreadCount() > 0 && f2.isQuiescent() == false) {
            incrementor++;
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
            log.info("Shutting Down" + incrementor);
        }

        l = null;
        f2 = null;

    } else {
        for (String j : json) {

            boolean valid = false;

            try {
                Json.read(j);
                valid = true;
            } catch (Exception e) {
                log.info("ERROR: JSON NOT FORMATTED PROPERLY");
                System.out.println(j);
            }

            try {

                this.template.postSingleJson(j);
            } catch (Exception e) {
                log.info("Failed to Post");
                log.error(j);
                e.printStackTrace();
            }
        }
    }

}

From source file:com.hygenics.parser.ParseDispatcher.java

private void sendToDb(ArrayList<String> json, boolean split) {
    if (json.size() > 0)
        log.info("Records to Add: " + json.size());

    if (split) {//w  w w.  j ava  2  s . c o  m

        ForkJoinPool f2 = new ForkJoinPool(
                (Runtime.getRuntime().availableProcessors() + ((int) Math.ceil(procnum * sqlnum))));
        ArrayList<String> l;
        int size = (int) Math.ceil(json.size() / qnum);
        for (int conn = 0; conn < qnum; conn++) {
            l = new ArrayList<String>();
            if (((conn + 1) * size) < json.size()) {
                l.addAll(json.subList((conn * size), ((conn + 1) * size)));

            } else {
                l.addAll(json.subList((conn * size), (json.size() - 1)));
                f2.execute(new SplitPost(template, l));

                break;
            }

            f2.execute(new SplitPost(template, l));
        }

        try {
            f2.awaitTermination(termtime, TimeUnit.MILLISECONDS);
        } catch (InterruptedException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }

        f2.shutdown();

        int incrementor = 0;

        while (f2.isShutdown() == false && f2.getActiveThreadCount() > 0 && f2.isQuiescent() == false) {
            incrementor++;
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
            log.info("Shutting Down" + incrementor);
        }

        l = null;
        f2 = null;

    } else {
        for (String j : json) {

            boolean valid = false;

            try {
                Json.read(j);
                valid = true;
            } catch (Exception e) {
                log.info("ERROR: JSON NOT FORMATTED PROPERLY");
                System.out.println(j);
            }

            try {

                this.template.postSingleJson(j);
            } catch (Exception e) {
                log.info("Failed to Post");
                log.error(j);
                e.printStackTrace();
            }
        }
    }

}

From source file:kr.ac.korea.dbserver.parser.SQLAnalyzer.java

@Override
public Aggregation visitGroupby_clause(SQLParser.Groupby_clauseContext ctx) {
    Aggregation clause = new Aggregation();

    // If grouping group is not empty
    if (ctx.grouping_element_list().grouping_element().get(0).empty_grouping_set() == null) {
        int elementSize = ctx.grouping_element_list().grouping_element().size();
        ArrayList<GroupElement> groups = new ArrayList<GroupElement>(elementSize + 1);
        ArrayList<Expr> ordinaryExprs = null;
        int groupSize = 1;
        groups.add(null);//from w  w  w  .j a  v a  2s  .  c om

        for (int i = 0; i < elementSize; i++) {
            SQLParser.Grouping_elementContext element = ctx.grouping_element_list().grouping_element().get(i);
            if (element.ordinary_grouping_set() != null) {
                if (ordinaryExprs == null) {
                    ordinaryExprs = new ArrayList<Expr>();
                }
                Collections.addAll(ordinaryExprs,
                        getRowValuePredicandsFromOrdinaryGroupingSet(element.ordinary_grouping_set()));
            } else if (element.rollup_list() != null) {
                groupSize++;
                groups.add(new GroupElement(GroupType.Rollup,
                        getRowValuePredicandsFromOrdinaryGroupingSetList(element.rollup_list().c)));
            } else if (element.cube_list() != null) {
                groupSize++;
                groups.add(new GroupElement(GroupType.Cube,
                        getRowValuePredicandsFromOrdinaryGroupingSetList(element.cube_list().c)));
            }
        }

        if (ordinaryExprs != null) {
            groups.set(0, new GroupElement(GroupType.OrdinaryGroup,
                    ordinaryExprs.toArray(new Expr[ordinaryExprs.size()])));
            clause.setGroups(groups.subList(0, groupSize).toArray(new GroupElement[groupSize]));
        } else if (groupSize > 1) {
            clause.setGroups(groups.subList(1, groupSize).toArray(new GroupElement[groupSize - 1]));
        }
    }

    return clause;
}

From source file:org.apache.tajo.engine.parser.HiveQLAnalyzer.java

@Override
public Aggregation visitGroupByClause(HiveQLParser.GroupByClauseContext ctx) {
    Aggregation clause = new Aggregation();

    if (ctx.groupByExpression().size() > 0) {
        int elementSize = ctx.groupByExpression().size();
        ArrayList<Aggregation.GroupElement> groups = new ArrayList<Aggregation.GroupElement>(elementSize + 1);
        ArrayList<Expr> ordinaryExprs = new ArrayList<Expr>();
        int groupSize = 1;
        groups.add(null);/*from   ww w . j  a v a2  s  .  c o  m*/

        for (int i = 0; i < ctx.groupByExpression().size(); i++) {
            Expr expr = visitGroupByExpression(ctx.groupByExpression(i));

            if (expr instanceof FunctionExpr) {
                FunctionExpr function = (FunctionExpr) expr;

                if (function.getSignature().equalsIgnoreCase("ROLLUP")) {
                    groupSize++;
                    groups.add(
                            new Aggregation.GroupElement(Aggregation.GroupType.Rollup, function.getParams()));
                } else if (function.getSignature().equalsIgnoreCase("CUBE")) {
                    groupSize++;
                    groups.add(new Aggregation.GroupElement(Aggregation.GroupType.Cube, function.getParams()));
                } else {
                    Collections.addAll(ordinaryExprs, function);
                }
            } else {
                Collections.addAll(ordinaryExprs, (ColumnReferenceExpr) expr);
            }
        }

        if (ordinaryExprs != null) {
            groups.set(0, new Aggregation.GroupElement(Aggregation.GroupType.OrdinaryGroup,
                    ordinaryExprs.toArray(new Expr[ordinaryExprs.size()])));
            clause.setGroups(groups.subList(0, groupSize).toArray(new Aggregation.GroupElement[groupSize]));
        } else if (groupSize > 1) {
            clause.setGroups(groups.subList(1, groupSize).toArray(new Aggregation.GroupElement[groupSize - 1]));
        }
    }

    //TODO: grouping set expression
    return clause;
}