Example usage for java.util Vector subList

List of usage examples for java.util Vector subList

Introduction

In this page you can find the example usage for java.util Vector subList.

Prototype

public synchronized List<E> subList(int fromIndex, int toIndex) 

Source Link

Document

Returns a view of the portion of this List between fromIndex, inclusive, and toIndex, exclusive.

Usage

From source file:MainClass.java

public static void main(String args[]) {
    Vector v1 = new Vector();
    v1.add("A");//from   w  w w .  j ava2  s. co  m
    v1.add("B");
    v1.add("C");
    List l = v1.subList(1, 2);

    for (int i = 0; i < l.size(); i++) {
        System.out.println(l.get(i));
    }
}

From source file:Main.java

public static void main(String args[]) {
    Vector<String> v1 = new Vector<String>();
    v1.add("A");/*from  w  ww .  ja  v a  2 s. c o m*/
    v1.add("B");
    v1.add("C");
    List l = v1.subList(1, 2);

    for (int i = 0; i < l.size(); i++) {
        System.out.println(l.get(i));
    }
}

From source file:Main.java

public static void main(String args[]) {
    Vector<String> v1 = new Vector<String>();
    v1.add("A");/*  w w w  .  ja  v a2 s. c o  m*/
    v1.add("B");
    v1.add("C");
    List l = v1.subList(1, 2);

    l.remove(0);

    System.out.println(l);
    System.out.println(v1);
}

From source file:Main.java

public static void main(String[] args) {
    Vector vec = new Vector(8);
    List sublist = new ArrayList(10);

    vec.add(4);/* www .  j av  a  2  s . c  o  m*/
    vec.add(3);
    vec.add(2);
    vec.add(1);
    vec.add(6);
    vec.add(7);
    vec.add(9);
    vec.add(5);

    sublist = vec.subList(2, 6);

    System.out.println(sublist);
}

From source file:edu.uga.cs.fluxbuster.clustering.hierarchicalclustering.DistanceMatrix.java

/**
 * Populate distance matrix from a list of matrix values specified in row
 * major order./*from   w  ww .ja v a  2  s . c om*/
 * 
 * @param vals
 *            the matrix values
 */
private void populateDistanceMatrix(Vector<Float> vals) {
    int matrixdim = (int) Math.ceil(Math.sqrt(2 * vals.size()));

    int length = matrixdim - 1;
    int start = 0;
    for (int i = 0; i < matrixdim - 1; i++) {
        Vector<Float> row = new Vector<Float>();
        row.addAll(vals.subList(start, start + length));
        distMatrix.add(row);
        start += length;
        length--;
    }
}

From source file:net.sf.jabref.exporter.layout.LayoutEntry.java

public LayoutEntry(Vector<StringInt> parsedEntries, final String classPrefix_, int layoutType) {
    classPrefix = classPrefix_;//  w ww .  j a  v  a 2s .c  o  m
    Vector<StringInt> blockEntries = null;
    Vector<LayoutEntry> tmpEntries = new Vector<>();
    LayoutEntry le;
    String blockStart = parsedEntries.get(0).s;
    String blockEnd = parsedEntries.get(parsedEntries.size() - 1).s;

    if (!blockStart.equals(blockEnd)) {
        LOGGER.warn("Field start and end entry must be equal.");
    }

    type = layoutType;
    text = blockEnd;
    for (StringInt parsedEntry : parsedEntries.subList(1, parsedEntries.size() - 1)) {
        if ((parsedEntry.i == LayoutHelper.IS_LAYOUT_TEXT) || (parsedEntry.i == LayoutHelper.IS_SIMPLE_FIELD)) {
            // Do nothing
        } else if ((parsedEntry.i == LayoutHelper.IS_FIELD_START)
                || (parsedEntry.i == LayoutHelper.IS_GROUP_START)) {
            blockEntries = new Vector<>();
            blockStart = parsedEntry.s;
        } else if ((parsedEntry.i == LayoutHelper.IS_FIELD_END)
                || (parsedEntry.i == LayoutHelper.IS_GROUP_END)) {
            if (blockStart.equals(parsedEntry.s)) {
                blockEntries.add(parsedEntry);
                if (parsedEntry.i == LayoutHelper.IS_GROUP_END) {
                    le = new LayoutEntry(blockEntries, classPrefix, LayoutHelper.IS_GROUP_START);
                } else {
                    le = new LayoutEntry(blockEntries, classPrefix, LayoutHelper.IS_FIELD_START);
                }
                tmpEntries.add(le);
                blockEntries = null;
            } else {
                LOGGER.warn("Nested field entries are not implemented !!!");
            }
        } else if (parsedEntry.i == LayoutHelper.IS_OPTION_FIELD) {
            // Do nothing
        }

        if (blockEntries == null) {
            tmpEntries.add(new LayoutEntry(parsedEntry, classPrefix));
        } else {
            blockEntries.add(parsedEntry);
        }
    }

    layoutEntries = new LayoutEntry[tmpEntries.size()];

    for (int i = 0; i < tmpEntries.size(); i++) {
        layoutEntries[i] = tmpEntries.get(i);

        // Note if one of the entries has an invalid formatter:
        if (layoutEntries[i].isInvalidFormatter()) {
            if (invalidFormatter == null) {
                invalidFormatter = new ArrayList<>(1);
            }
            invalidFormatter.addAll(layoutEntries[i].getInvalidFormatters());
        }

    }

}

From source file:org.apache.hadoop.raid.Encoder.java

/**
 * The interface to use to generate a parity file.
 * This method can be called multiple times with the same Encoder object,
 * thus allowing reuse of the buffers allocated by the Encoder object.
 *
 * @param fs The filesystem containing the source file.
 * @param srcFile The source file./*from w  w w  .  ja v  a  2  s  .c  o m*/
 * @param parityFile The parity file to be generated.
 * @throws InterruptedException 
 */
public boolean encodeFile(Configuration jobConf, FileSystem fs, FileSystem parityFs, Path parityFile,
        short parityRepl, long numStripes, long blockSize, Progressable reporter, StripeReader sReader,
        EncodingCandidate ec) throws IOException, InterruptedException {
    DistributedFileSystem dfs = DFSUtil.convertToDFS(parityFs);
    Path srcFile = ec.srcStat.getPath();
    long expectedParityFileSize = numStripes * blockSize * codec.parityLength;
    long expectedPartialParityBlocks = (sReader.stripeEndIdx - sReader.stripeStartIdx) * codec.parityLength;
    long expectedPartialParityFileSize = expectedPartialParityBlocks * blockSize;

    // Create a tmp file to which we will write first.
    String jobID = RaidNode.getJobID(jobConf);
    Path tmpDir = new Path(codec.tmpParityDirectory, jobID);
    if (!parityFs.mkdirs(tmpDir)) {
        throw new IOException("Could not create tmp dir " + tmpDir);
    }

    String partialParityName = "partial_" + MD5Hash.digest(srcFile.toUri().getPath()) + "_"
            + ec.srcStat.getModificationTime() + "_" + ec.encodingUnit + "_" + ec.encodingId;
    Path partialParityDir = new Path(tmpDir, partialParityName);
    Path tmpPartialParityDir = new Path(partialParityDir, "tmp");
    Path finalPartialParityDir = new Path(partialParityDir, "final");
    if (!parityFs.mkdirs(partialParityDir)) {
        throw new IOException("Could not create partial parity directory " + partialParityDir);
    }
    // If we write a parity for a large directory, 
    // Use 3 replicas to guarantee the durability by default
    short tmpRepl = (short) conf.getInt(RaidNode.RAID_PARITY_INITIAL_REPL_KEY,
            RaidNode.DEFAULT_RAID_PARITY_INITIAL_REPL);

    Path finalTmpParity = null;
    /**
     * To support retriable encoding, we use three checkpoints to represent
     * the last success state. 
     * 1. isEncoded: Set to true when partial partiy is generated
     * 2. isRenamed: Set to true when all partial parity are generated and
     *               tmpPartialParityDir is moved to finalPartialParityDir
     * 3. isConcated: Set to true when partial parities are concatenated into
     *                a final parity. 
     */
    if (!ec.isConcated) {
        if (!ec.isEncoded) {
            if (!parityFs.mkdirs(tmpPartialParityDir)) {
                throw new IOException("Could not create " + tmpPartialParityDir);
            }
            Path partialTmpParity = new Path(tmpPartialParityDir, Long.toString(sReader.getCurrentStripeIdx()));
            LOG.info("Encoding partial parity " + partialTmpParity);
            if (!encodeTmpParityFile(jobConf, sReader, dfs, partialTmpParity, parityFile, tmpRepl, blockSize,
                    expectedPartialParityBlocks, expectedPartialParityFileSize, reporter)) {
                return false;
            }
            LOG.info("Encoded partial parity " + partialTmpParity);
        }
        ec.isEncoded = true;
        long expectedNum = (long) Math.ceil(numStripes * 1.0 / ec.encodingUnit);
        if (!ec.isRenamed) {
            if (!finishAllPartialEncoding(parityFs, tmpPartialParityDir, expectedNum)) {
                return false;
            }
            InjectionHandler.processEventIO(InjectionEvent.RAID_ENCODING_FAILURE_RENAME_FILE);
            // Move the directory to final
            if (!dfs.rename(tmpPartialParityDir, finalPartialParityDir)) {
                LOG.info("Fail to rename " + tmpPartialParityDir + " to " + finalPartialParityDir);
                return false;
            }
            LOG.info("Renamed " + tmpPartialParityDir + " to " + finalPartialParityDir);
            ec.isRenamed = true;
        }
        FileStatus[] stats = parityFs.listStatus(finalPartialParityDir);
        // Verify partial parities are correct
        Vector<Path> partialPaths = getPartialPaths((int) ec.encodingUnit, (int) expectedNum, stats, codec,
                numStripes);
        finalTmpParity = partialPaths.get(0);
        InjectionHandler.processEventIO(InjectionEvent.RAID_ENCODING_FAILURE_CONCAT_FILE);
        if (partialPaths.size() > 1) {
            Path[] restPaths = partialPaths.subList(1, partialPaths.size())
                    .toArray(new Path[partialPaths.size() - 1]);
            try {
                // Concat requires source and target files are in the same directory
                dfs.concat(finalTmpParity, restPaths, true);
                LOG.info("Concated " + partialPaths.size() + " files into " + finalTmpParity);

            } catch (IOException ioe) {
                // Maybe other tasks already finish concating. 
                LOG.info("Fail to concat " + partialPaths.size() + " files into " + finalTmpParity, ioe);
                throw ioe;
            }
        }
        ec.isConcated = true;
    } else {
        FileStatus[] stats = parityFs.listStatus(finalPartialParityDir);
        if (stats == null || stats.length == 0) {
            return false;
        }
        if (stats.length > 1) {
            throw new IOException("We shouldn't have more than 1 files under" + finalPartialParityDir);
        }
        finalTmpParity = stats[0].getPath();
    }
    FileStatus tmpStat = parityFs.getFileStatus(finalTmpParity);
    if (tmpStat.getBlockSize() != blockSize) {
        throw new IOException("Expected parity block size " + blockSize + " does not match actual "
                + tmpStat.getBlockSize() + " in path " + finalTmpParity);
    }
    if (tmpStat.getLen() != expectedParityFileSize) {
        throw new IOException("Expected parity size " + expectedParityFileSize + " does not match actual "
                + tmpStat.getLen() + " in path " + finalTmpParity);
    }
    if (ec.srcStripes == null && stripeStore != null) {
        InjectionHandler.processEventIO(InjectionEvent.RAID_ENCODING_FAILURE_GET_SRC_STRIPES);
        ec.srcStripes = getSrcStripes(jobConf, dfs, srcFile, codec, numStripes, sReader, reporter);
        if (ec.srcStripes == null) {
            LOG.error("Cannot get srcStripes for " + srcFile);
            return false;
        }
    }

    // delete destination if exists
    if (dfs.exists(parityFile)) {
        dfs.delete(parityFile, false);
    }
    dfs.mkdirs(parityFile.getParent());
    if (!dfs.rename(finalTmpParity, parityFile)) {
        String msg = "Unable to rename file " + finalTmpParity + " to " + parityFile;
        throw new IOException(msg);
    }
    LOG.info("Wrote parity file " + parityFile);

    if (stripeStore != null) {
        this.writeToStripeStore(ec.srcStripes, dfs, fs, srcFile, parityFs, parityFile, expectedParityFileSize,
                reporter, finalTmpParity);
    }
    if (tmpRepl != parityRepl) {
        dfs.setReplication(parityFile, parityRepl);
        LOG.info("Reduce replication of " + parityFile + " to " + parityRepl);
    }
    dfs.delete(partialParityDir, true);
    return true;
}

From source file:org.chiba.tools.schemabuilder.AbstractSchemaFormBuilder.java

/**
 * __UNDOCUMENTED__/*ww w  .jav a 2  s  .com*/
 *
 * @param xForm          __UNDOCUMENTED__
 * @param choicesElement __UNDOCUMENTED__
 * @param choiceValues   __UNDOCUMENTED__
 */
protected void addChoicesForSelectControl(Document xForm, Element choicesElement, Vector choiceValues) {
    // sort the enums values and then add them as choices
    //
    // TODO: Should really put the default value (if any) at the top of the list.
    //
    List sortedList = choiceValues.subList(0, choiceValues.size());
    Collections.sort(sortedList);

    Iterator iterator = sortedList.iterator();

    while (iterator.hasNext()) {
        String textValue = (String) iterator.next();
        Element item = xForm.createElementNS(XFORMS_NS, getXFormsNSPrefix() + "item");
        this.setXFormsId(item);
        choicesElement.appendChild(item);

        Element captionElement = xForm.createElementNS(XFORMS_NS, getXFormsNSPrefix() + "label");
        this.setXFormsId(captionElement);
        item.appendChild(captionElement);
        captionElement.appendChild(xForm.createTextNode(createCaption(textValue)));

        Element value = xForm.createElementNS(XFORMS_NS, getXFormsNSPrefix() + "value");
        this.setXFormsId(value);
        item.appendChild(value);
        value.appendChild(xForm.createTextNode(textValue));
    }
}

From source file:org.sakaiproject.util.BaseDbDoubleStorage.java

/**
 * Get resources filtered by date and count and drafts, in descending (latest first) order
 * //from  www .  j a v a2s  . c om
 * @param afterDate
 *        if null, no date limit, else limited to only messages after this date.
 * @param limitedToLatest
 *        if 0, no count limit, else limited to only the latest this number of messages.
 * @param draftsForId
 *        how to handle drafts: null means no drafts, "*" means all, otherwise drafts only if created by this userId.
 * @param pubViewOnly
 *        if true, include only messages marked pubview, else include any.
 * @return A list of Message objects that meet the criteria; may be empty
 */
public List getResources(final Entity container, Time afterDate, int limitedToLatest, String draftsForId,
        boolean pubViewOnly) {
    // if we are limiting, and are filtering out drafts or doing pubview, and don't have draft/owner/pubview support, filter here after
    boolean canLimit = true;
    boolean filterAfter = false;
    if ((limitedToLatest > 0) && ((((m_resourceTableDraftField == null) || (m_resourceTableOwnerField == null))
            && (!"*".equals(draftsForId))) || ((m_resourceTablePubViewField == null) && pubViewOnly))) {
        canLimit = false;
        filterAfter = true;
    }

    StringBuilder buf = new StringBuilder();
    int numFields = 1;

    // start the outer statement, later finished with a limiting clause
    if ((limitedToLatest > 0) && canLimit) {
        if ("oracle".equals(m_sql.getVendor())) {
            buf.append("select XML from (");
            buf.append("select XML from " + m_resourceTableName);
        } else if ("mysql".equals(m_sql.getVendor())) {
            buf.append("select messages.XML from (");
            buf.append("select XML from " + m_resourceTableName);
        } else
        // if ("hsqldb".equals(m_sql.getVendor()))
        {
            // according to SQL2000 specification (used by HSQLDB) the limit clause appears first
            buf.append("select limit 0 " + limitedToLatest + " XML from " + m_resourceTableName);
        }
    } else {
        buf.append("select XML from " + m_resourceTableName);
    }

    buf.append(" where (" + m_resourceTableContainerIdField + " = ?");

    if ((m_resourceTableOrderField != null) && (afterDate != null)) {
        buf.append(" and " + m_resourceTableOrderField + " > ?");
        numFields++;
    }

    // deal with drafts if we can
    if ((m_resourceTableDraftField != null) && (m_resourceTableOwnerField != null)) {
        // if draftsForId is null, we don't want any drafts
        if (draftsForId == null) {
            buf.append(" and " + m_resourceTableDraftField + " = '0'");
        }
        // else a "*" means we take all drafts
        else if (!"*".equals(draftsForId)) {
            // we want only drafts if the owner field matches
            buf.append(" and ( " + m_resourceTableDraftField + " = '0' or " + m_resourceTableOwnerField
                    + " = ? )");
            numFields++;
        }
    }

    // pubview
    if ((m_resourceTablePubViewField != null) && pubViewOnly) {
        buf.append(" and " + m_resourceTablePubViewField + " = '1'");
    }

    // close the where
    buf.append(")");

    if (m_resourceTableOrderField != null) {
        buf.append(" order by " + m_resourceTableOrderField + " desc");
    }

    boolean useLimitField = false;
    if ((limitedToLatest > 0) && canLimit) {
        if ("oracle".equals(m_sql.getVendor())) {
            buf.append(" ) where rownum <= ?");
            numFields++;
            useLimitField = true;
        } else if ("mysql".equals(m_sql.getVendor())) {
            buf.append(" ) AS messages LIMIT " + limitedToLatest);
            useLimitField = false;
        } else
        // if ("hsqldb".equals(m_sql.getVendor()))
        {
            // the limit clause appears elsewhere in HSQLDB SQL statements, not here.
        }
    }

    // build up the fields
    Object fields[] = new Object[numFields];
    fields[0] = container.getReference();
    int pos = 1;
    if ((m_resourceTableOrderField != null) && (afterDate != null)) {
        fields[pos++] = afterDate;
    }
    if ((m_resourceTableDraftField != null) && (m_resourceTableOwnerField != null) && (draftsForId != null)
            && (!"*".equals(draftsForId))) {
        fields[pos++] = draftsForId;
    }
    if (useLimitField) {
        fields[pos++] = Integer.valueOf(limitedToLatest);
    }

    List all = m_sql.dbRead(buf.toString(), fields, new SqlReader() {
        public Object readSqlResultRecord(ResultSet result) {
            try {
                // get the xml and parse into a Resource
                String xml = result.getString(1);
                Entity entry = readResource(container, xml);
                return entry;
            } catch (SQLException ignore) {
                return null;
            }
        }
    });

    // after filter for draft / pubview and limit
    if (filterAfter) {
        Vector v = new Vector();

        // deal with drafts / pubview
        for (Iterator i = all.iterator(); i.hasNext();) {
            Entity r = (Entity) i.next();
            Entity candidate = null;
            if (m_user.isDraft(r)) {
                // if some drafts
                if ((draftsForId != null) && (m_user.getOwnerId(r).equals(draftsForId))) {
                    candidate = r;
                }
            } else {
                candidate = r;
            }

            // if we have a candidate to add, and we need pub view only
            if ((candidate != null) && pubViewOnly) {
                // if this is not pub view, skip it
                if ((candidate.getProperties().getProperty(ResourceProperties.PROP_PUBVIEW) == null)) {
                    candidate = null;
                }
            }

            if (candidate != null) {
                v.add(candidate);
            }
        }

        // pick what we need
        if (limitedToLatest < v.size()) {
            all = v.subList(0, limitedToLatest);
        } else {
            all = v;
        }
    }

    return all;
}

From source file:org.sakaiproject.util.BaseXmlFileStorage.java

/**
 * Get resources filtered by date and count and drafts, in descending (latest first) order
 * //from   w ww.j  av  a2 s . c  o  m
 * @param container
 *        The container id.
 * @param afterDate
 *        if null, no date limit, else limited to only messages after this date.
 * @param limitedToLatest
 *        if 0, no count limit, else limited to only the latest this number of messages.
 * @param draftsForId
 *        how to handle drafts: null means no drafts, "*" means all, otherwise drafts only if created by this userId.
 * @param pubViewOnly
 *        if true, include only messages marked pubview, else include any.
 * @return A list of Message objects that meet the criteria; may be empty
 */
public List getResources(String container, Time afterDate, int limitedToLatest, String draftsForId,
        boolean pubViewOnly) {
    if (container == null)
        container = "";
    Container c = ((Container) m_store.get(container));
    if (c == null)
        return new Vector();
    if (c.contained.size() == 0)
        return new Vector();

    List all = new Vector();
    all.addAll(c.contained.values());

    // sort latest date first
    Collections.sort(all, new Comparator() {
        public int compare(Object o1, Object o2) {
            // if the same object
            if (o1 == o2)
                return 0;

            // assume they are Resource
            Entity r1 = (Entity) o1;
            Entity r2 = (Entity) o2;

            // get each one's date
            Time t1 = m_user.getDate(r1);
            Time t2 = m_user.getDate(r2);

            // compare based on date
            int compare = t2.compareTo(t1);

            return compare;
        }
    });

    // early out - if no filtering needed
    if ((limitedToLatest == 0) && (afterDate == null) && ("*".equals(draftsForId)) && !pubViewOnly) {
        return all;
    }

    Vector selected = new Vector();

    // deal with drafts / date / pubview
    for (Iterator i = all.iterator(); i.hasNext();) {
        Entity r = (Entity) i.next();
        Entity candidate = null;
        if (m_user.isDraft(r)) {
            // if some drafts
            if ((draftsForId != null) && (m_user.getOwnerId(r).equals(draftsForId))) {
                candidate = r;
            }
        } else {
            candidate = r;
        }

        // deal with date if it passes the draft criteria
        if ((candidate != null) && (afterDate != null)) {
            if (m_user.getDate(candidate).before(afterDate)) {
                candidate = null;
            }
        }

        // if we want pub view only
        if ((candidate != null) && pubViewOnly) {
            if (candidate.getProperties().getProperty(ResourceProperties.PROP_PUBVIEW) == null) {
                candidate = null;
            }
        }

        // add it if it passes all criteria
        if (candidate != null) {
            selected.add(candidate);
        }
    }

    // pick what we need
    if ((limitedToLatest > 0) && (limitedToLatest < selected.size())) {
        all = selected.subList(0, limitedToLatest);
    } else {
        all = selected;
    }

    return all;
}