Example usage for java.util ArrayList subList

List of usage examples for java.util ArrayList subList

Introduction

In this page you can find the example usage for java.util ArrayList subList.

Prototype

public List<E> subList(int fromIndex, int toIndex) 

Source Link

Document

Returns a view of the portion of this list between the specified fromIndex , inclusive, and toIndex , exclusive.

Usage

From source file:com.vaquerosisd.projectmanager.NewProject.java

private void setStatusSpinner() {
    String statusOptions = fO.readFile(statusFileName);
    ArrayList<String> statusOptionsArray = fO.convertToStringList(statusOptions);
    Collections.sort(statusOptionsArray.subList(0, statusOptionsArray.size()));
    statusOptionsArray.add("Custom...");

    //Set the adapter for the status spinner 
    statusSpinnerAdapter = new ArrayAdapter<CharSequence>(this, android.R.layout.simple_spinner_item);
    statusSpinnerAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
    statusSpinnerAdapter.addAll(statusOptionsArray);
    statusSpinner.setAdapter(statusSpinnerAdapter);
}

From source file:com.vaquerosisd.projectmanager.NewProject.java

@Override
public void onDialogPositiveClick(DialogFragment dialog, String statusOption) {
    //Add new item
    fO.appendText(statusFileName, statusOption + "\n");
    String statusOptions = fO.readFile(statusFileName);
    ArrayList<String> statusOptionsArray = fO.convertToStringList(statusOptions);
    Collections.sort(statusOptionsArray.subList(0, statusOptionsArray.size()));
    statusOptionsArray.add("Custom...");
    statusSpinnerAdapter.clear();//  w ww  . j a  va 2s  .c o  m
    statusSpinnerAdapter.addAll(statusOptionsArray);
    statusSpinnerAdapter.notifyDataSetChanged();

    int statusPosition = statusSpinnerAdapter.getPosition(statusOption);
    statusSpinner.setSelection(statusPosition, true);
}

From source file:com.vaquerosisd.projectmanager.NewTask.java

private void setSpinner(String spinnerFileName, ArrayAdapter<CharSequence> spinnerAdapter,
        Spinner spinnerObject, boolean sort) {
    String spinnerItems = fO.readFile(spinnerFileName);
    ArrayList<String> spinnerItemsArray = fO.convertToStringList(spinnerItems);
    if (sort)// w w  w  .j  a  va 2 s  . c om
        Collections.sort(spinnerItemsArray.subList(0, spinnerItemsArray.size()));

    //Set the adapter for the status spinner 
    spinnerAdapter = new ArrayAdapter<CharSequence>(this, android.R.layout.simple_spinner_item);
    spinnerAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
    spinnerAdapter.addAll(spinnerItemsArray);
    spinnerObject.setAdapter(spinnerAdapter);
}

From source file:de.tudarmstadt.ukp.dkpro.core.dictionaryannotator.DictionaryAnnotator.java

@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
    Type type = getType(jcas.getCas(), annotationType);

    Feature f = null;//  w  ww .  j a v  a2s  .  c  o  m
    if ((valueFeature != null) && (value != null)) {
        f = type.getFeatureByBaseName(valueFeature);
        if (f == null) {
            throw new IllegalArgumentException(
                    "Undeclared feature [" + valueFeature + "] in type [" + annotationType + "]");
        }
    }

    for (Sentence currSentence : select(jcas, Sentence.class)) {
        ArrayList<Token> tokens = new ArrayList<Token>(selectCovered(Token.class, currSentence));

        for (int i = 0; i < tokens.size(); i++) {
            List<Token> tokensToSentenceEnd = tokens.subList(i, tokens.size() - 1);
            String[] sentenceToEnd = new String[tokens.size()];

            for (int j = 0; j < tokensToSentenceEnd.size(); j++) {
                sentenceToEnd[j] = tokensToSentenceEnd.get(j).getCoveredText();
            }

            String[] longestMatch = phrases.getLongestMatch(sentenceToEnd);

            if (longestMatch != null) {
                Token beginToken = tokens.get(i);
                Token endToken = tokens.get(i + longestMatch.length - 1);

                AnnotationFS newFound = jcas.getCas().createAnnotation(type, beginToken.getBegin(),
                        endToken.getEnd());

                if (f != null) {
                    newFound.setFeatureValueFromString(f, value);
                }

                jcas.getCas().addFsToIndexes(newFound);
            }
        }
    }
}

From source file:de.uhh.lt.lefex.Utils.DictionaryAnnotator.java

@Override
public void process(JCas jcas) throws AnalysisEngineProcessException {
    Type type = getType(jcas.getCas(), annotationType);

    Feature f = null;/*from ww  w.  j  av a 2  s  .  c  o m*/
    if ((valueFeature != null) && (value != null)) {
        f = type.getFeatureByBaseName(valueFeature);
        if (f == null) {
            throw new IllegalArgumentException(
                    "Undeclared feature [" + valueFeature + "] in type [" + annotationType + "]");
        }
    }

    for (Sentence currSentence : select(jcas, Sentence.class)) {
        ArrayList<Token> tokens = new ArrayList<Token>(selectCovered(Token.class, currSentence));

        for (int i = 0; i < tokens.size(); i++) {
            List<Token> tokensToSentenceEnd = tokens.subList(i, tokens.size() - 1);

            if (extendedMatch.toLowerCase().equals("true")) {
                String[] longestMatchTokens = findMatch(tokens, tokensToSentenceEnd, false, true);
                String[] longestMatchLemmas = findMatch(tokens, tokensToSentenceEnd, true, true);
                if (longestMatchTokens != null && longestMatchLemmas != null
                        && longestMatchTokens.length == longestMatchLemmas.length) {
                    annotateMatch(jcas, type, f, tokens, i, longestMatchLemmas);
                } else {
                    annotateMatch(jcas, type, f, tokens, i, longestMatchTokens);
                    annotateMatch(jcas, type, f, tokens, i, longestMatchLemmas);
                }
            } else {
                String[] longestMatch = findMatch(tokens, tokensToSentenceEnd, false, false);
                annotateMatch(jcas, type, f, tokens, i, longestMatch);
            }
        }
    }
}

From source file:org.apache.hadoop.hive.ql.plan.PlanUtils.java

/**
 * Create the reduce sink descriptor.// w w w. j  a  v  a 2 s  .  c o  m
 *
 * @param keyCols
 *          The columns to be stored in the key
 * @param numKeys  number of distribution keys. Equals to group-by-key
 *        numbers usually.
 * @param valueCols
 *          The columns to be stored in the value
 * @param distinctColIndices
 *          column indices for distinct aggregates
 * @param outputKeyColumnNames
 *          The output key columns names
 * @param outputValueColumnNames
 *          The output value columns names
 * @param tag
 *          The tag for this reducesink
 * @param numPartitionFields
 *          The first numPartitionFields of keyCols will be partition columns.
 *          If numPartitionFields=-1, then partition randomly.
 * @param numReducers
 *          The number of reducers, set to -1 for automatic inference based on
 *          input data size.
 * @param writeType Whether this is an Acid write, and if so whether it is insert, update,
 *                  or delete.
 * @return The reduceSinkDesc object.
 */
public static ReduceSinkDesc getReduceSinkDesc(ArrayList<ExprNodeDesc> keyCols, int numKeys,
        ArrayList<ExprNodeDesc> valueCols, List<List<Integer>> distinctColIndices,
        List<String> outputKeyColumnNames, List<String> outputValueColumnNames, boolean includeKey, int tag,
        int numPartitionFields, int numReducers, AcidUtils.Operation writeType) throws SemanticException {

    ArrayList<ExprNodeDesc> partitionCols = new ArrayList<ExprNodeDesc>();
    if (numPartitionFields >= keyCols.size()) {
        partitionCols.addAll(keyCols);
    } else if (numPartitionFields >= 0) {
        partitionCols.addAll(keyCols.subList(0, numPartitionFields));
    } else {
        // numPartitionFields = -1 means random partitioning
        partitionCols.add(TypeCheckProcFactory.DefaultExprProcessor.getFuncExprNodeDesc("rand"));
    }

    StringBuilder order = new StringBuilder();
    for (int i = 0; i < keyCols.size(); i++) {
        order.append("+");
    }
    return getReduceSinkDesc(keyCols, numKeys, valueCols, distinctColIndices, outputKeyColumnNames,
            outputValueColumnNames, includeKey, tag, partitionCols, order.toString(), numReducers, writeType);
}

From source file:org.hyperledger.fabric.sdk.ServiceDiscovery.java

private static List<SDEndorser> topNbyHeight(int required, List<SDEndorser> endorsers) {
    ArrayList<SDEndorser> ret = new ArrayList<>(endorsers);
    ret.sort(Comparator.comparingLong(SDEndorser::getLedgerHeight));
    return ret.subList(Math.max(ret.size() - required, 0), ret.size());
}

From source file:org.eurocarbdb.dataaccess.Eurocarb.java

/********************************** 
                              *//from  ww w .  j  a va  2  s .  c o  m
                              *   Returns a {@link List} of given length of the most recently 
                              *   {@link Contributed} objects to the current data store, in order
                              *   of most to least recent, or an empty list if there are no
                              *   {@link Contributed} objects. Note that this method only
                              *   returns additions -- modifications to older objects will not
                              *   be included.
                              *   
                              *   @see Contributor.getMyRecentContributions(int)
                              */
public static List<Contributed> getRecentContributions(int max_results) {
    // hibernate cannot limit polymorphic queries in the database
    // we have to do it one class by one
    log.debug("looking up all Contributed objects");

    // get all contributed objects
    ArrayList<Contributed> changes = new ArrayList<Contributed>();
    changes.addAll(getRecentlyContributed(GlycanSequence.class, max_results));
    changes.addAll(getRecentlyContributed(Evidence.class, max_results));
    /**
     * FIXME: I do have a fix in mind for this, but I want to make sure I catch
     * everything that was going via BiologicalContext before I reimplement the 
     * Contributed interface on this class.
     */
    //changes.addAll( getRecentlyContributed( BiologicalContext.class, max_results));
    changes.addAll(getRecentlyContributed(Reference.class, max_results));

    // sort by date
    Collections.sort(changes, new Comparator<Contributed>() {
        public int compare(Contributed o1, Contributed o2) {
            return -o1.getDateEntered().compareTo(o2.getDateEntered());
        }

        public boolean equals(Object obj) {
            return this == obj;
        }
    });

    // get sublist
    if (changes.size() < max_results) {
        return changes;
    }

    return changes.subList(0, max_results);
}

From source file:org.apache.activemq.leveldb.test.IndexRebuildTest.java

@Test(timeout = 1000 * 60 * 10)
public void testRebuildIndex() throws Exception {

    File masterDir = new File("target/activemq-data/leveldb-rebuild");
    FileSupport.toRichFile(masterDir).recursiveDelete();

    final LevelDBStore store = new LevelDBStore();
    store.setDirectory(masterDir);/*  w  w  w.j  a v  a2  s . c o  m*/
    store.setLogDirectory(masterDir);

    store.setLogSize(1024 * 10);
    store.start();
    stores.add(store);

    ArrayList<MessageId> inserts = new ArrayList<MessageId>();
    MessageStore ms = store.createQueueMessageStore(new ActiveMQQueue("TEST"));
    for (int i = 0; i < max; i++) {
        inserts.add(addMessage(ms, "m" + i).getMessageId());
    }
    int logFileCount = countLogFiles(store);
    assertTrue("more than one journal file", logFileCount > 1);

    for (MessageId id : inserts.subList(0, inserts.size() - toLeave)) {
        removeMessage(ms, id);
    }

    LevelDBStoreView view = new LevelDBStoreView(store);
    view.compact();

    int reducedLogFileCount = countLogFiles(store);
    assertTrue("log files deleted", logFileCount > reducedLogFileCount);

    store.stop();

    deleteTheIndex(store);

    assertEquals("log files remain", reducedLogFileCount, countLogFiles(store));

    // restart, recover and verify message read
    store.start();
    ms = store.createQueueMessageStore(new ActiveMQQueue("TEST"));

    assertEquals(toLeave + " messages remain", toLeave, getMessages(ms).size());
}

From source file:org.seqdoop.hadoop_bam.TestVCFOutputFormat.java

@Test
public void testVariantContextReadWrite() throws IOException, InterruptedException {
    // This is to check whether issue https://github.com/HadoopGenomics/Hadoop-BAM/issues/1 has been
    // resolved//www. j a v  a  2s .c  o  m
    VariantContextBuilder vctx_builder = new VariantContextBuilder();

    ArrayList<Allele> alleles = new ArrayList<Allele>();
    alleles.add(Allele.create("C", false));
    alleles.add(Allele.create("G", true));
    vctx_builder.alleles(alleles);

    ArrayList<Genotype> genotypes = new ArrayList<Genotype>();
    GenotypeBuilder builder = new GenotypeBuilder();
    genotypes.add(builder.alleles(alleles.subList(0, 1)).name("NA00001").GQ(48).DP(1).make());
    genotypes.add(builder.alleles(alleles.subList(0, 1)).name("NA00002").GQ(42).DP(2).make());
    genotypes.add(builder.alleles(alleles.subList(0, 1)).name("NA00003").GQ(39).DP(3).make());
    vctx_builder.genotypes(genotypes);

    HashSet<String> filters = new HashSet<String>();
    vctx_builder.filters(filters);

    HashMap<String, Object> attributes = new HashMap<String, Object>();
    attributes.put("NS", new Integer(4));
    vctx_builder.attributes(attributes);

    vctx_builder.loc("20", 2, 2);
    vctx_builder.log10PError(-8.0);

    VariantContext ctx = vctx_builder.make();
    VariantContextWithHeader ctxh = new VariantContextWithHeader(ctx, readHeader());
    writable.set(ctxh);

    DataOutputBuffer out = new DataOutputBuffer(1000);
    writable.write(out);

    byte[] data = out.getData();
    ByteArrayInputStream bis = new ByteArrayInputStream(data);

    writable = new VariantContextWritable();
    writable.readFields(new DataInputStream(bis));

    VariantContext vc = writable.get();
    Assert.assertArrayEquals("comparing Alleles", ctx.getAlleles().toArray(), vc.getAlleles().toArray());
    Assert.assertEquals("comparing Log10PError", ctx.getLog10PError(), vc.getLog10PError(), 0.01);
    Assert.assertArrayEquals("comparing Filters", ctx.getFilters().toArray(), vc.getFilters().toArray());
    Assert.assertEquals("comparing Attributes", ctx.getAttributes(), vc.getAttributes());

    // Now check the genotypes. Note: we need to make the header accessible before decoding the genotypes.
    GenotypesContext gc = vc.getGenotypes();
    assert (gc instanceof LazyVCFGenotypesContext);
    LazyVCFGenotypesContext.HeaderDataCache headerDataCache = new LazyVCFGenotypesContext.HeaderDataCache();
    headerDataCache.setHeader(readHeader());
    ((LazyVCFGenotypesContext) gc).getParser().setHeaderDataCache(headerDataCache);

    for (Genotype genotype : genotypes) {
        Assert.assertEquals("checking genotype name", genotype.getSampleName(),
                gc.get(genotypes.indexOf(genotype)).getSampleName());
        Assert.assertEquals("checking genotype quality", genotype.getGQ(),
                gc.get(genotypes.indexOf(genotype)).getGQ());
        Assert.assertEquals("checking genotype read depth", genotype.getDP(),
                gc.get(genotypes.indexOf(genotype)).getDP());
    }
}