Example usage for java.util Stack Stack

List of usage examples for java.util Stack Stack

Introduction

In this page you can find the example usage for java.util Stack Stack.

Prototype

public Stack() 

Source Link

Document

Creates an empty Stack.

Usage

From source file:edu.umn.msi.tropix.persistence.service.impl.TropixObjectServiceImpl.java

public void addToSharedFolder(final Iterable<String> inputObjectIds, final String inputFolderId,
        final boolean recursive) {
    final Stack<String> objectIds = new Stack<String>();
    final Stack<String> virtualFolderIds = new Stack<String>();

    for (final String inputObjectId : inputObjectIds) {
        objectIds.add(inputObjectId);/*w w w  .  j a v  a2 s  . co m*/
        virtualFolderIds.add(inputFolderId);
    }

    while (!objectIds.isEmpty()) {
        final String objectId = objectIds.pop();
        final String folderId = virtualFolderIds.pop();
        final TropixObject object = getTropixObjectDao().loadTropixObject(objectId);
        if (!(object instanceof Folder)) {
            getTropixObjectDao().addToVirtualFolder(folderId, objectId);
            TreeUtils.applyPermissionChange(getTropixObjectDao().loadTropixObject(objectId),
                    new CopyVirtualPermissions(folderId));
        } else {
            final Folder sourceFolder = (Folder) object;
            final VirtualFolder destinationFolder = new VirtualFolder();
            destinationFolder.setName(sourceFolder.getName());
            destinationFolder.setDescription(sourceFolder.getDescription());
            // System.out.println(String.format("Destination is %s", folderId));
            final String destinationId = createNewChildVirtualFolder(folderId, destinationFolder).getId();
            for (final TropixObject child : sourceFolder.getContents()) {
                objectIds.add(child.getId());
                virtualFolderIds.add(destinationId);
            }
        }
    }
}

From source file:edu.umn.cs.spatialHadoop.indexing.RTree.java

/**
 * Searches the RTree starting from the given start position. This is either
 * a node number or offset of an element. If it's a node number, it performs
 * the search in the subtree rooted at this node. If it's an offset number,
 * it searches only the object found there.
 * It is assumed that the openQuery() has been called before this function
 * and that endQuery() will be called afterwards.
 * @param query_shape/*from  w  ww .ja  va 2s.  c o m*/
 * @param output
 * @param start where to start searching
 * @param end where to end searching. Only used when start is an offset of
 *   an object.
 * @return
 * @throws IOException
 */
protected int search(Shape query_shape, ResultCollector<T> output, int start, int end) throws IOException {
    Rectangle query_mbr = query_shape.getMBR();
    int resultSize = 0;
    // Special case for an empty tree
    if (height == 0)
        return 0;

    Stack<Integer> toBeSearched = new Stack<Integer>();
    // Start from the given node
    toBeSearched.push(start);
    if (start >= nodeCount) {
        toBeSearched.push(end);
    }

    // Holds one data line from tree data
    Text line = new Text2();

    while (!toBeSearched.isEmpty()) {
        int searchNumber = toBeSearched.pop();

        if (searchNumber < nodeCount) {
            // Searching a node
            int nodeID = searchNumber;
            if (query_mbr.isIntersected(nodes[nodeID])) {
                boolean is_leaf = nodeID >= nonLeafNodeCount;
                if (is_leaf) {
                    // Check all objects under this node
                    int start_offset = this.dataOffset[nodeID];
                    int end_offset = this.dataOffset[nodeID + 1];
                    toBeSearched.add(start_offset);
                    toBeSearched.add(end_offset);
                } else {
                    // Add all child nodes
                    for (int iChild = 0; iChild < this.degree; iChild++) {
                        toBeSearched.add(nodeID * this.degree + iChild + 1);
                    }
                }
            }
        } else {
            // searchNumber is the end offset of data search. Start offset is next
            // in stack
            int end_offset = searchNumber;
            int start_offset = toBeSearched.pop();
            // All data offsets are relative to tree start (typically 4)
            this.data.seek(start_offset + this.treeStartOffset);
            // Should not close the line reader because we do not want to close
            // the underlying data stream now. In case future searches are done
            @SuppressWarnings("resource")
            LineReader lineReader = new LineReader(data);
            while (start_offset < end_offset) {
                start_offset += lineReader.readLine(line);
                stockObject.fromText(line);
                if (stockObject.isIntersected(query_shape)) {
                    resultSize++;
                    if (output != null)
                        output.collect(stockObject);
                }
            }
        }
    }
    return resultSize;
}

From source file:de.tudarmstadt.ukp.dkpro.lexsemresource.graph.EntityGraphJGraphT.java

/**
 * Computes the depth of the graph, i.e. the maximum path length starting with the root node (if
 * a single root exists)/*from  ww w. java2s  .c  o m*/
 *
 * @return The depth of the hierarchy.
 * @throws UnsupportedOperationException
 * @throws LexicalSemanticResourceException
 */
private double computeDepth() throws LexicalSemanticResourceException {
    List<Entity> roots = new Stack<Entity>();
    roots.addAll(getRoots());
    if (roots.size() == 0) {
        logger.error("There is no root for this lexical semantic resource.");
        return Double.NaN;
    } else if (roots.size() > 1) {
        logger.warn("There are " + roots.size() + " roots for this lexical semantic resource.");
        logger.info("Trying to get root from underlying lexical semantic resource.");

        Entity root = lexSemRes.getRoot();
        if (root == null) {
            EntityGraph lcc = getLargestConnectedComponent();
            int nrOfLccNodes = lcc.getNumberOfNodes();
            int nrOfGraphNodes = this.getNumberOfNodes();

            double ratio = (double) nrOfLccNodes / (double) nrOfGraphNodes;

            logger.info("Falling back to the depth of the LCC.");

            if (ratio < 0.7) {
                logger.warn("The largest connected component contains only " + ratio * 100
                        + "% of all nodes. Depth might not be meaningful.");
            }

            return lcc.getDepth();
        } else {
            roots.clear(); // we know the real root, so remove the others
            roots.add(root);
        }
    }

    Entity root = roots.get(0);
    BigInteger bigMaxPathLength = BigInteger.valueOf(0);
    BigInteger[] returnValues = computeShortestPathLengths(root, BigInteger.ZERO, bigMaxPathLength,
            new HashSet<Entity>());
    bigMaxPathLength = returnValues[1];
    return bigMaxPathLength.doubleValue();

}

From source file:com.haulmont.cuba.web.WebWindowManager.java

protected WindowBreadCrumbs createWindowBreadCrumbs(Window window) {
    WebAppWorkArea appWorkArea = getConfiguredWorkArea(createWorkAreaContext(window));
    WindowBreadCrumbs windowBreadCrumbs = new WindowBreadCrumbs(appWorkArea);

    boolean showBreadCrumbs = webConfig.getShowBreadCrumbs() || Mode.SINGLE == appWorkArea.getMode();
    windowBreadCrumbs.setVisible(showBreadCrumbs);

    stacks.put(windowBreadCrumbs, new Stack<>());
    return windowBreadCrumbs;
}

From source file:com.nextep.designer.sqlgen.ui.editors.sql.SQLCompletionProcessor.java

/**
 * This method parses the SQL statement defined at the current start offset. The method will
 * retrieve any SQL statement which encapsulate the start offset, parse it and return the result
 * of this parse for completion proposals.
 * /*from   w  w  w  .  ja  v  a 2  s. c  o m*/
 * @param viewer viewer of the document to parse
 * @param start start offset
 * @return a {@link DMLParseResult} which contains information about the parse of the found SQL
 *         statement, or <code>null</code> if no SQL statement has been found from the given
 *         start offset.
 */
private DMLParseResult parseSQL(ITextViewer viewer, int start) {
    // Retrieving the corresponding statement start
    IDocument doc = new Document();
    doc.set(viewer.getDocument().get() + " "); //$NON-NLS-1$

    FindReplaceDocumentAdapter finder = new FindReplaceDocumentAdapter(doc);
    try {
        IRegion lastSemicolonRegion = finder.find(start - 1, ";", false, false, false, false); //$NON-NLS-1$
        if (lastSemicolonRegion == null) {
            lastSemicolonRegion = new Region(0, 1);
        }
        IRegion selectRegion = finder.find(lastSemicolonRegion.getOffset(), "SELECT|INSERT|UPDATE|DELETE", true, //$NON-NLS-1$
                false, false, true);

        IRegion endSemicolonRegion = finder.find(start == doc.getLength() ? start - 1 : start, ";", true, false, //$NON-NLS-1$
                false, false);
        if (endSemicolonRegion == null) {
            endSemicolonRegion = new Region(doc.getLength() - 1, 0);
        }
        if (selectRegion == null || lastSemicolonRegion == null || endSemicolonRegion == null) {
            return null;
        }
        // The select must be found after the first semicolon, else it is not the
        // same SQL statement
        if (selectRegion.getOffset() >= lastSemicolonRegion.getOffset()
                && endSemicolonRegion.getOffset() >= selectRegion.getOffset()) {
            DMLScanner scanner = new DMLScanner(parser);
            scanner.setRange(doc, selectRegion.getOffset(),
                    endSemicolonRegion.getOffset() - selectRegion.getOffset());
            IToken token = scanner.nextToken();
            DMLParseResult result = new DMLParseResult();
            Stack<DMLParseResult> stack = new Stack<DMLParseResult>();
            Map<Segment, DMLParseResult> results = new HashMap<Segment, DMLParseResult>();
            while (!token.isEOF()) {
                // Counting parenthethis
                if (token == DMLScanner.LEFTPAR_TOKEN) {
                    result.parCount++;
                } else if (token == DMLScanner.RIGHTPAR_TOKEN) {
                    result.parCount--;
                }

                if (token == DMLScanner.SELECT_TOKEN) { // && (result.tableSegStart>0 ||
                    // result.whereSegStart>0)) {
                    stack.push(result);
                    result = new DMLParseResult();
                    result.stackStart = scanner.getTokenOffset();
                } else if (token == DMLScanner.RIGHTPAR_TOKEN && result.parCount < 0) { // &&
                    // stack.size()>0)
                    // {
                    results.put(new Segment(result.stackStart, scanner.getTokenOffset() - result.stackStart),
                            result);
                    result = stack.pop();
                } else if (token == DMLScanner.INSERT_TOKEN) {
                    result.ignoreInto = false;
                } else if (token == DMLScanner.FROM_TOKEN || token == DMLScanner.UPDATE_TOKEN
                        || (token == DMLScanner.INTO_TOKEN && !result.ignoreInto)) {
                    result.ignoreInto = true;
                    // We have a table segment start
                    result.tableSegStart = scanner.getTokenOffset();
                    result.tableStartToken = token;
                } else if (token == DMLScanner.WORD_TOKEN && result.tableSegStart > 0) {
                    // We are in a table segment so we instantiate appropriate table references
                    // and aliases
                    // in the parse result
                    if (result.lastAlias == null) {
                        // This is a new table definition, we add it
                        result.lastAlias = new TableAlias(
                                doc.get(scanner.getTokenOffset(), scanner.getTokenLength()).toUpperCase());
                        result.lastAlias.setTable(tablesMap.get(result.lastAlias.getTableName()));
                        result.addFromTable(result.lastAlias);
                    } else if (result.lastAlias.getTableAlias() == null) {
                        // This is an alias of a defined table
                        final String alias = doc.get(scanner.getTokenOffset(), scanner.getTokenLength());
                        final List<String> reservedWords = parser.getTypedTokens().get(ISQLParser.DML);
                        if (!reservedWords.contains(alias.toUpperCase())) {
                            result.lastAlias.setAlias(alias);
                        } else {
                            result.lastAlias = null;
                        }
                    }
                } else if (token == DMLScanner.COMMA_TOKEN) {
                    // On a comma, we reset any table reference
                    result.lastAlias = null;
                } else if (token == DMLScanner.DML_TOKEN) {
                    result.lastAlias = null;
                    if (result.tableSegStart != -1) {
                        int tableSegEnd = scanner.getTokenOffset();
                        result.addTableSegment(
                                new Segment(result.tableSegStart, tableSegEnd - result.tableSegStart));
                        result.tableSegStart = -1;
                    }
                } else if (result.tableSegStart != -1
                        && ((result.tableStartToken == DMLScanner.FROM_TOKEN && token == DMLScanner.WHERE_TOKEN)
                                || (result.tableStartToken == DMLScanner.UPDATE_TOKEN
                                        && token == DMLScanner.SET_TOKEN)
                                || (result.tableStartToken == DMLScanner.INTO_TOKEN
                                        && token == DMLScanner.LEFTPAR_TOKEN))) {
                    // We have matched a table segment end, so we close the segment
                    // and we add it to the parse result's table segments
                    int tableSegEnd = scanner.getTokenOffset();
                    result.addTableSegment(
                            new Segment(result.tableSegStart, tableSegEnd - result.tableSegStart));
                    result.tableSegStart = -1;
                    if (token == DMLScanner.WHERE_TOKEN) {
                        result.whereSegStart = scanner.getTokenOffset() + scanner.getTokenLength();
                    }
                }
                token = scanner.nextToken();
            }
            // If the table segment is still opened, we close it at the end of the SQL statement
            if (result.tableSegStart > -1) {
                int tableSegEnd = endSemicolonRegion.getOffset();
                result.addTableSegment(
                        new Segment(result.tableSegStart, tableSegEnd - result.tableSegStart + 1));
            }
            // Locating the appropriate result
            for (Segment s : results.keySet()) {
                if (s.getOffset() <= start && s.getOffset() + s.getLength() > start) {
                    return results.get(s);
                }
            }
            return result;
        }
    } catch (BadLocationException e) {
        LOGGER.debug("Problems while retrieving SQL statement");
    }
    return null;
}

From source file:com.amazonaws.services.s3.transfer.TransferManager.java

/**
 * Downloads all objects in the virtual directory designated by the
 * keyPrefix given to the destination directory given. All virtual
 * subdirectories will be downloaded recursively.
 *
 * @param bucketName/*from www  .ja v a 2s. c om*/
 *            The bucket containing the virtual directory
 * @param keyPrefix
 *            The key prefix for the virtual directory, or null for the
 *            entire bucket. All subdirectories will be downloaded
 *            recursively.
 * @param destinationDirectory
 *            The directory to place downloaded files. Subdirectories will
 *            be created as necessary.
 */
public MultipleFileDownload downloadDirectory(String bucketName, String keyPrefix, File destinationDirectory) {
    if (keyPrefix == null)
        keyPrefix = "";
    List<S3ObjectSummary> objectSummaries = new LinkedList<S3ObjectSummary>();
    Stack<String> commonPrefixes = new Stack<String>();
    commonPrefixes.add(keyPrefix);
    long totalSize = 0;
    // Recurse all virtual subdirectories to get a list of object summaries.
    // This is a depth-first search.
    do {
        String prefix = commonPrefixes.pop();
        ObjectListing listObjectsResponse = null;

        do {
            if (listObjectsResponse == null) {
                ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName)
                        .withDelimiter(DEFAULT_DELIMITER).withPrefix(prefix);
                listObjectsResponse = s3.listObjects(listObjectsRequest);
            } else {
                listObjectsResponse = s3.listNextBatchOfObjects(listObjectsResponse);
            }

            for (S3ObjectSummary s : listObjectsResponse.getObjectSummaries()) {
                // Skip any files that are also virtual directories, since
                // we can't save both a directory and a file of the same
                // name.
                if (!s.getKey().equals(prefix)
                        && !listObjectsResponse.getCommonPrefixes().contains(s.getKey() + DEFAULT_DELIMITER)) {
                    objectSummaries.add(s);
                    totalSize += s.getSize();
                } else {
                    log.debug("Skipping download for object " + s.getKey()
                            + " since it is also a virtual directory");
                }
            }

            commonPrefixes.addAll(listObjectsResponse.getCommonPrefixes());
        } while (listObjectsResponse.isTruncated());
    } while (!commonPrefixes.isEmpty());

    /* This is the hook for adding additional progress listeners */
    ProgressListenerChain additionalListeners = new ProgressListenerChain();

    TransferProgress transferProgress = new TransferProgress();
    transferProgress.setTotalBytesToTransfer(totalSize);
    /*
     * Bind additional progress listeners to this
     * MultipleFileTransferProgressUpdatingListener to receive
     * ByteTransferred events from each single-file download implementation.
     */
    ProgressListener listener = new MultipleFileTransferProgressUpdatingListener(transferProgress,
            additionalListeners);

    List<DownloadImpl> downloads = new ArrayList<DownloadImpl>();

    String description = "Downloading from " + bucketName + "/" + keyPrefix;
    final MultipleFileDownloadImpl multipleFileDownload = new MultipleFileDownloadImpl(description,
            transferProgress, additionalListeners, keyPrefix, bucketName, downloads);
    multipleFileDownload.setMonitor(new MultipleFileTransferMonitor(multipleFileDownload, downloads));

    final CountDownLatch latch = new CountDownLatch(1);
    MultipleFileTransferStateChangeListener transferListener = new MultipleFileTransferStateChangeListener(
            latch, multipleFileDownload);

    for (S3ObjectSummary summary : objectSummaries) {
        // TODO: non-standard delimiters
        File f = new File(destinationDirectory, summary.getKey());
        File parentFile = f.getParentFile();
        if (!parentFile.exists() && !parentFile.mkdirs()) {
            throw new RuntimeException("Couldn't create parent directories for " + f.getAbsolutePath());
        }

        // All the single-file downloads share the same
        // MultipleFileTransferProgressUpdatingListener and
        // MultipleFileTransferStateChangeListener
        downloads.add((DownloadImpl) doDownload(
                new GetObjectRequest(summary.getBucketName(), summary.getKey())
                        .<GetObjectRequest>withGeneralProgressListener(listener),
                f, transferListener, null, false));
    }

    if (downloads.isEmpty()) {
        multipleFileDownload.setState(TransferState.Completed);
        return multipleFileDownload;
    }

    // Notify all state changes waiting for the downloads to all be queued
    // to wake up and continue.
    latch.countDown();
    return multipleFileDownload;
}

From source file:geogebra.kernel.Kernel.java

private void storeTemporaryRoundingInfoInList() {
    if (useSignificantFiguresList == null) {
        useSignificantFiguresList = new Stack<Boolean>();
        noOfSignificantFiguresList = new Stack<Integer>();
        noOfDecimalPlacesList = new Stack<Integer>();
    }/*ww w . j av  a2s  .c  o  m*/

    useSignificantFiguresList.push(new Boolean(useSignificantFigures));
    noOfSignificantFiguresList.push(new Integer(sf.getSigDigits()));
    noOfDecimalPlacesList.push(new Integer(nf.getMaximumFractionDigits()));
}

From source file:com.amazonaws.mobileconnectors.s3.transfermanager.TransferManager.java

/**
 * Downloads all objects in the virtual directory designated by the
 * keyPrefix given to the destination directory given. All virtual
 * subdirectories will be downloaded recursively.
 *
 * @param bucketName The bucket containing the virtual directory
 * @param keyPrefix The key prefix for the virtual directory, or null for
 *            the entire bucket. All subdirectories will be downloaded
 *            recursively.//www.j  a  va 2 s .  c  o m
 * @param destinationDirectory The directory to place downloaded files.
 *            Subdirectories will be created as necessary.
 */
public MultipleFileDownload downloadDirectory(String bucketName, String keyPrefix, File destinationDirectory) {

    if (keyPrefix == null) {
        keyPrefix = "";
    }

    final List<S3ObjectSummary> objectSummaries = new LinkedList<S3ObjectSummary>();
    final Stack<String> commonPrefixes = new Stack<String>();
    commonPrefixes.add(keyPrefix);
    long totalSize = 0;

    // Recurse all virtual subdirectories to get a list of object summaries.
    // This is a depth-first search.
    do {
        final String prefix = commonPrefixes.pop();
        ObjectListing listObjectsResponse = null;

        do {
            if (listObjectsResponse == null) {
                final ListObjectsRequest listObjectsRequest = new ListObjectsRequest()
                        .withBucketName(bucketName).withDelimiter(DEFAULT_DELIMITER).withPrefix(prefix);
                listObjectsResponse = s3.listObjects(listObjectsRequest);
            } else {
                listObjectsResponse = s3.listNextBatchOfObjects(listObjectsResponse);
            }

            for (final S3ObjectSummary s : listObjectsResponse.getObjectSummaries()) {
                // Skip any files that are also virtual directories, since
                // we can't save both a directory and a file of the same
                // name.
                if (!s.getKey().equals(prefix)
                        && !listObjectsResponse.getCommonPrefixes().contains(s.getKey() + DEFAULT_DELIMITER)) {
                    objectSummaries.add(s);
                    totalSize += s.getSize();
                } else {
                    log.debug("Skipping download for object " + s.getKey()
                            + " since it is also a virtual directory");
                }
            }

            commonPrefixes.addAll(listObjectsResponse.getCommonPrefixes());
        } while (listObjectsResponse.isTruncated());
    } while (!commonPrefixes.isEmpty());

    /* This is the hook for adding additional progress listeners */
    final ProgressListenerChain additionalListeners = new ProgressListenerChain();

    final TransferProgress transferProgress = new TransferProgress();
    transferProgress.setTotalBytesToTransfer(totalSize);
    /*
     * Bind additional progress listeners to this
     * MultipleFileTransferProgressUpdatingListener to receive
     * ByteTransferred events from each single-file download implementation.
     */
    final ProgressListener listener = new MultipleFileTransferProgressUpdatingListener(transferProgress,
            additionalListeners);

    final List<DownloadImpl> downloads = new ArrayList<DownloadImpl>();

    final String description = "Downloading from " + bucketName + "/" + keyPrefix;
    final MultipleFileDownloadImpl multipleFileDownload = new MultipleFileDownloadImpl(description,
            transferProgress, additionalListeners, keyPrefix, bucketName, downloads);
    multipleFileDownload.setMonitor(new MultipleFileTransferMonitor(multipleFileDownload, downloads));

    final CountDownLatch latch = new CountDownLatch(1);
    final MultipleFileTransferStateChangeListener transferListener = new MultipleFileTransferStateChangeListener(
            latch, multipleFileDownload);

    for (final S3ObjectSummary summary : objectSummaries) {
        // TODO: non-standard delimiters
        final File f = new File(destinationDirectory, summary.getKey());
        final File parentFile = f.getParentFile();
        if (!parentFile.exists() && !parentFile.mkdirs()) {
            throw new RuntimeException("Couldn't create parent directories for " + f.getAbsolutePath());
        }

        // All the single-file downloads share the same
        // MultipleFileTransferProgressUpdatingListener and
        // MultipleFileTransferStateChangeListener
        downloads.add((DownloadImpl) doDownload(new GetObjectRequest(summary.getBucketName(), summary.getKey())
                .withGeneralProgressListener(listener), f, transferListener, null, false));
    }

    if (downloads.isEmpty()) {
        multipleFileDownload.setState(TransferState.Completed);
        return multipleFileDownload;
    }

    // Notify all state changes waiting for the downloads to all be queued
    // to wake up and continue.
    latch.countDown();

    return multipleFileDownload;
}

From source file:fi.ni.IFC_ClassModel.java

/**
 * Parse_ if c_ line statement.//  w  w w. j  a  va2 s  .co m
 * 
 * @param line
 *            the line
 */
private void parse_IFC_LineStatement(String line) {
    IFC_X3_VO ifcvo = new IFC_X3_VO();
    int state = 0;
    StringBuffer sb = new StringBuffer();
    int cl_count = 0;
    LinkedList<Object> current = ifcvo.getList();
    Stack<LinkedList<Object>> list_stack = new Stack<LinkedList<Object>>();
    for (int i = 0; i < line.length(); i++) {
        char ch = line.charAt(i);
        switch (state) {
        case 0:
            if (ch == '=') {
                ifcvo.setLine_num(toLong(sb.toString()));
                sb.setLength(0);
                state++;
                continue;
            } else if (Character.isDigit(ch))
                sb.append(ch);
            break;
        case 1: // (
            if (ch == '(') {
                ifcvo.setName(sb.toString());
                sb.setLength(0);
                state++;
                continue;
            } else if (ch == ';') {
                ifcvo.setName(sb.toString());
                sb.setLength(0);
                state = Integer.MAX_VALUE;
            } else if (!Character.isWhitespace(ch))
                sb.append(ch);
            break;
        case 2: // (... line started and doing (...
            if (ch == '\'') {
                state++;
            }
            if (ch == '(') {
                list_stack.push(current);
                LinkedList<Object> tmp = new LinkedList<Object>();
                if (sb.toString().trim().length() > 0)
                    current.add(sb.toString().trim());
                sb.setLength(0);
                current.add(tmp); // listaan listn lista
                current = tmp;
                cl_count++;
                // sb.append(ch);
            } else if (ch == ')') {
                if (cl_count == 0) {
                    if (sb.toString().trim().length() > 0)
                        current.add(sb.toString().trim());
                    sb.setLength(0);
                    state = Integer.MAX_VALUE; // line is done
                    continue;
                } else {
                    if (sb.toString().trim().length() > 0)
                        current.add(sb.toString().trim());
                    sb.setLength(0);
                    cl_count--;
                    current = list_stack.pop();
                }
            } else if (ch == ',') {
                if (sb.toString().trim().length() > 0)
                    current.add(sb.toString().trim());
                current.add(Character.valueOf(ch));

                sb.setLength(0);
            } else {
                sb.append(ch);

            }
            break;
        case 3: // (...
            if (ch == '\'') {
                state--;
            } else {
                sb.append(ch);

            }
            break;
        default:
            // Do nothing
        }
    }
    linemap.put(ifcvo.line_num, ifcvo);
}

From source file:com.datatorrent.stram.plan.physical.PhysicalPlan.java

private void redoPartitions(PMapping currentMapping, String note) {
    Partitioner<Operator> partitioner = getPartitioner(currentMapping);
    if (partitioner == null) {
        LOG.warn("No partitioner for {}", currentMapping.logicalOperator);
        return;//from  w ww  .java 2 s .com
    }

    RepartitionContext mainPC = new RepartitionContext(partitioner, currentMapping, 0);
    if (mainPC.newPartitions.isEmpty()) {
        LOG.warn("Empty partition list after repartition: {}", currentMapping.logicalOperator);
        return;
    }

    int memoryPerPartition = currentMapping.logicalOperator.getValue(OperatorContext.MEMORY_MB);
    for (Map.Entry<OutputPortMeta, StreamMeta> stream : currentMapping.logicalOperator.getOutputStreams()
            .entrySet()) {
        if (stream.getValue().getLocality() != Locality.THREAD_LOCAL
                && stream.getValue().getLocality() != Locality.CONTAINER_LOCAL) {
            memoryPerPartition += stream.getKey().getValue(PortContext.BUFFER_MEMORY_MB);
        }
    }
    for (OperatorMeta pp : currentMapping.parallelPartitions) {
        for (Map.Entry<OutputPortMeta, StreamMeta> stream : pp.getOutputStreams().entrySet()) {
            if (stream.getValue().getLocality() != Locality.THREAD_LOCAL
                    && stream.getValue().getLocality() != Locality.CONTAINER_LOCAL) {
                memoryPerPartition += stream.getKey().getValue(PortContext.BUFFER_MEMORY_MB);
            }
        }
        memoryPerPartition += pp.getValue(OperatorContext.MEMORY_MB);
    }
    int requiredMemoryMB = (mainPC.newPartitions.size() - mainPC.currentPartitions.size()) * memoryPerPartition;
    if (requiredMemoryMB > availableMemoryMB) {
        LOG.warn("Insufficient headroom for repartitioning: available {}m required {}m", availableMemoryMB,
                requiredMemoryMB);
        return;
    }

    List<Partition<Operator>> addedPartitions = new ArrayList<Partition<Operator>>();
    // determine modifications of partition set, identify affected operator instance(s)
    for (Partition<Operator> newPartition : mainPC.newPartitions) {
        PTOperator op = mainPC.currentPartitionMap.remove(newPartition);
        if (op == null) {
            addedPartitions.add(newPartition);
        } else {
            // check whether mapping was changed
            for (DefaultPartition<Operator> pi : mainPC.currentPartitions) {
                if (pi == newPartition && pi.isModified()) {
                    // existing partition changed (operator or partition keys)
                    // remove/add to update subscribers and state
                    mainPC.currentPartitionMap.put(newPartition, op);
                    addedPartitions.add(newPartition);
                }
            }
        }
    }

    // remaining entries represent deprecated partitions
    this.undeployOpers.addAll(mainPC.currentPartitionMap.values());
    // downstream dependencies require redeploy, resolve prior to modifying plan
    Set<PTOperator> deps = this.getDependents(mainPC.currentPartitionMap.values());
    this.undeployOpers.addAll(deps);
    // dependencies need redeploy, except operators excluded in remove
    this.deployOpers.addAll(deps);

    // process parallel partitions before removing operators from the plan
    LinkedHashMap<PMapping, RepartitionContext> partitionContexts = Maps.newLinkedHashMap();
    Stack<OperatorMeta> parallelPartitions = new Stack<LogicalPlan.OperatorMeta>();
    parallelPartitions.addAll(currentMapping.parallelPartitions);
    pendingLoop: while (!parallelPartitions.isEmpty()) {
        OperatorMeta ppMeta = parallelPartitions.pop();
        for (StreamMeta s : ppMeta.getInputStreams().values()) {
            if (currentMapping.parallelPartitions.contains(s.getSource().getOperatorMeta())
                    && parallelPartitions.contains(s.getSource().getOperatorMeta())) {
                parallelPartitions.push(ppMeta);
                parallelPartitions.remove(s.getSource().getOperatorMeta());
                parallelPartitions.push(s.getSource().getOperatorMeta());
                continue pendingLoop;
            }
        }
        LOG.debug("Processing parallel partition {}", ppMeta);

        PMapping ppm = this.logicalToPTOperator.get(ppMeta);
        Partitioner<Operator> ppp = getPartitioner(ppm);
        if (ppp == null) {
            partitionContexts.put(ppm, null);
        } else {
            RepartitionContext pc = new RepartitionContext(ppp, ppm, mainPC.newPartitions.size());
            if (pc.newPartitions == null) {
                throw new IllegalStateException(
                        "Partitioner returns null for parallel partition " + ppm.logicalOperator);
            }
            partitionContexts.put(ppm, pc);
        }
    }

    // plan updates start here, after all changes were identified
    // remove obsolete operators first, any freed resources
    // can subsequently be used for new/modified partitions
    List<PTOperator> copyPartitions = Lists.newArrayList(currentMapping.partitions);
    // remove deprecated partitions from plan
    for (PTOperator p : mainPC.currentPartitionMap.values()) {
        copyPartitions.remove(p);
        removePartition(p, currentMapping);
        mainPC.operatorIdToPartition.remove(p.getId());
    }
    currentMapping.partitions = copyPartitions;

    // add new operators
    for (Partition<Operator> newPartition : addedPartitions) {
        PTOperator p = addPTOperator(currentMapping, newPartition, mainPC.minCheckpoint);
        mainPC.operatorIdToPartition.put(p.getId(), newPartition);
    }

    // process parallel partition changes
    for (Map.Entry<PMapping, RepartitionContext> e : partitionContexts.entrySet()) {
        if (e.getValue() == null) {
            // no partitioner, add required operators
            for (int i = 0; i < addedPartitions.size(); i++) {
                LOG.debug("Automatically adding to parallel partition {}", e.getKey());
                // set activation windowId to confirm to upstream checkpoints
                addPTOperator(e.getKey(), null, mainPC.minCheckpoint);
            }
        } else {
            RepartitionContext pc = e.getValue();
            // track previous parallel partition mapping
            Map<Partition<Operator>, Partition<Operator>> prevMapping = Maps.newHashMap();
            for (int i = 0; i < mainPC.currentPartitions.size(); i++) {
                prevMapping.put(pc.currentPartitions.get(i), mainPC.currentPartitions.get(i));
            }
            // determine which new partitions match upstream, remaining to be treated as new operator
            Map<Partition<Operator>, Partition<Operator>> newMapping = Maps.newHashMap();
            Iterator<Partition<Operator>> itMain = mainPC.newPartitions.iterator();
            Iterator<Partition<Operator>> itParallel = pc.newPartitions.iterator();
            while (itMain.hasNext() && itParallel.hasNext()) {
                newMapping.put(itParallel.next(), itMain.next());
            }

            for (Partition<Operator> newPartition : pc.newPartitions) {
                PTOperator op = pc.currentPartitionMap.remove(newPartition);
                if (op == null) {
                    pc.addedPartitions.add(newPartition);
                } else if (prevMapping.get(newPartition) != newMapping.get(newPartition)) {
                    // upstream partitions don't match, remove/add to replace with new operator
                    pc.currentPartitionMap.put(newPartition, op);
                    pc.addedPartitions.add(newPartition);
                } else {
                    // check whether mapping was changed - based on DefaultPartition implementation
                    for (DefaultPartition<Operator> pi : pc.currentPartitions) {
                        if (pi == newPartition && pi.isModified()) {
                            // existing partition changed (operator or partition keys)
                            // remove/add to update subscribers and state
                            mainPC.currentPartitionMap.put(newPartition, op);
                            pc.addedPartitions.add(newPartition);
                        }
                    }
                }
            }

            if (!pc.currentPartitionMap.isEmpty()) {
                // remove obsolete partitions
                List<PTOperator> cowPartitions = Lists.newArrayList(e.getKey().partitions);
                for (PTOperator p : pc.currentPartitionMap.values()) {
                    cowPartitions.remove(p);
                    removePartition(p, e.getKey());
                    pc.operatorIdToPartition.remove(p.getId());
                }
                e.getKey().partitions = cowPartitions;
            }
            // add new partitions
            for (Partition<Operator> newPartition : pc.addedPartitions) {
                PTOperator oper = addPTOperator(e.getKey(), newPartition, mainPC.minCheckpoint);
                pc.operatorIdToPartition.put(oper.getId(), newPartition);
            }

            getPartitioner(e.getKey()).partitioned(pc.operatorIdToPartition);
        }
    }

    updateStreamMappings(currentMapping);
    for (PMapping pp : partitionContexts.keySet()) {
        updateStreamMappings(pp);
    }

    deployChanges();

    if (mainPC.currentPartitions.size() != mainPC.newPartitions.size()) {
        StramEvent ev = new StramEvent.PartitionEvent(currentMapping.logicalOperator.getName(),
                mainPC.currentPartitions.size(), mainPC.newPartitions.size());
        ev.setReason(note);
        this.ctx.recordEventAsync(ev);
    }

    partitioner.partitioned(mainPC.operatorIdToPartition);
}