Example usage for java.util Deque pop

List of usage examples for java.util Deque pop

Introduction

In this page you can find the example usage for java.util Deque pop.

Prototype

E pop();

Source Link

Document

Pops an element from the stack represented by this deque.

Usage

From source file:org.apache.hadoop.hive.ql.parse.GenTezUtils.java

public static void removeUnionOperators(Configuration conf, GenTezProcContext context, BaseWork work)
        throws SemanticException {

    List<Operator<?>> roots = new ArrayList<Operator<?>>();
    roots.addAll(work.getAllRootOperators());
    if (work.getDummyOps() != null) {
        roots.addAll(work.getDummyOps());
    }//  ww  w  .  j av  a2s. c om
    roots.addAll(context.eventOperatorSet);

    // need to clone the plan.
    List<Operator<?>> newRoots = Utilities.cloneOperatorTree(conf, roots);

    // we're cloning the operator plan but we're retaining the original work. That means
    // that root operators have to be replaced with the cloned ops. The replacement map
    // tells you what that mapping is.
    BiMap<Operator<?>, Operator<?>> replacementMap = HashBiMap.create();

    // there's some special handling for dummyOps required. Mapjoins won't be properly
    // initialized if their dummy parents aren't initialized. Since we cloned the plan
    // we need to replace the dummy operators in the work with the cloned ones.
    List<HashTableDummyOperator> dummyOps = new LinkedList<HashTableDummyOperator>();

    Iterator<Operator<?>> it = newRoots.iterator();
    for (Operator<?> orig : roots) {
        Set<FileSinkOperator> fsOpSet = OperatorUtils.findOperators(orig, FileSinkOperator.class);
        for (FileSinkOperator fsOp : fsOpSet) {
            context.fileSinkSet.remove(fsOp);
        }

        Operator<?> newRoot = it.next();

        replacementMap.put(orig, newRoot);

        if (newRoot instanceof HashTableDummyOperator) {
            // dummy ops need to be updated to the cloned ones.
            dummyOps.add((HashTableDummyOperator) newRoot);
            it.remove();
        } else if (newRoot instanceof AppMasterEventOperator) {
            // event operators point to table scan operators. When cloning these we
            // need to restore the original scan.
            if (newRoot.getConf() instanceof DynamicPruningEventDesc) {
                TableScanOperator ts = ((DynamicPruningEventDesc) orig.getConf()).getTableScan();
                if (ts == null) {
                    throw new AssertionError("No table scan associated with dynamic event pruning. " + orig);
                }
                ((DynamicPruningEventDesc) newRoot.getConf()).setTableScan(ts);
            }
            it.remove();
        } else {
            if (newRoot instanceof TableScanOperator) {
                if (context.tsToEventMap.containsKey(orig)) {
                    // we need to update event operators with the cloned table scan
                    for (AppMasterEventOperator event : context.tsToEventMap.get(orig)) {
                        ((DynamicPruningEventDesc) event.getConf()).setTableScan((TableScanOperator) newRoot);
                    }
                }
            }
            context.rootToWorkMap.remove(orig);
            context.rootToWorkMap.put(newRoot, work);
        }
    }

    // now we remove all the unions. we throw away any branch that's not reachable from
    // the current set of roots. The reason is that those branches will be handled in
    // different tasks.
    Deque<Operator<?>> operators = new LinkedList<Operator<?>>();
    operators.addAll(newRoots);

    Set<Operator<?>> seen = new HashSet<Operator<?>>();

    while (!operators.isEmpty()) {
        Operator<?> current = operators.pop();
        seen.add(current);

        if (current instanceof FileSinkOperator) {
            FileSinkOperator fileSink = (FileSinkOperator) current;

            // remember it for additional processing later
            context.fileSinkSet.add(fileSink);

            FileSinkDesc desc = fileSink.getConf();
            Path path = desc.getDirName();
            List<FileSinkDesc> linked;

            if (!context.linkedFileSinks.containsKey(path)) {
                linked = new ArrayList<FileSinkDesc>();
                context.linkedFileSinks.put(path, linked);
            }
            linked = context.linkedFileSinks.get(path);
            linked.add(desc);

            desc.setDirName(new Path(path, "" + linked.size()));
            desc.setLinkedFileSink(true);
            desc.setParentDir(path);
            desc.setLinkedFileSinkDesc(linked);
        }

        if (current instanceof AppMasterEventOperator) {
            // remember for additional processing later
            context.eventOperatorSet.add((AppMasterEventOperator) current);

            // mark the original as abandoned. Don't need it anymore.
            context.abandonedEventOperatorSet
                    .add((AppMasterEventOperator) replacementMap.inverse().get(current));
        }

        if (current instanceof UnionOperator) {
            Operator<?> parent = null;
            int count = 0;

            for (Operator<?> op : current.getParentOperators()) {
                if (seen.contains(op)) {
                    ++count;
                    parent = op;
                }
            }

            // we should have been able to reach the union from only one side.
            assert count <= 1;

            if (parent == null) {
                // root operator is union (can happen in reducers)
                replacementMap.put(current, current.getChildOperators().get(0));
            } else {
                parent.removeChildAndAdoptItsChildren(current);
            }
        }

        if (current instanceof FileSinkOperator || current instanceof ReduceSinkOperator) {
            current.setChildOperators(null);
        } else {
            operators.addAll(current.getChildOperators());
        }
    }
    work.setDummyOps(dummyOps);
    work.replaceRoots(replacementMap);
}

From source file:org.apache.hadoop.hive.ql.parse.mr3.GenMR3Utils.java

public void removeUnionOperators(Configuration conf, GenMR3ProcContext context, BaseWork work)
        throws SemanticException {

    List<Operator<?>> roots = new ArrayList<Operator<?>>();
    roots.addAll(work.getAllRootOperators());
    if (work.getDummyOps() != null) {
        roots.addAll(work.getDummyOps());
    }/*  w w w.  j  a v  a2s  .  c  o  m*/
    roots.addAll(context.eventOperatorSet);

    // need to clone the plan.
    List<Operator<?>> newRoots = Utilities.cloneOperatorTree(conf, roots);

    // we're cloning the operator plan but we're retaining the original work. That means
    // that root operators have to be replaced with the cloned ops. The replacement map
    // tells you what that mapping is.
    BiMap<Operator<?>, Operator<?>> replacementMap = HashBiMap.create();

    // there's some special handling for dummyOps required. Mapjoins won't be properly
    // initialized if their dummy parents aren't initialized. Since we cloned the plan
    // we need to replace the dummy operators in the work with the cloned ones.
    List<HashTableDummyOperator> dummyOps = new LinkedList<HashTableDummyOperator>();

    Iterator<Operator<?>> it = newRoots.iterator();
    for (Operator<?> orig : roots) {
        Operator<?> newRoot = it.next();

        replacementMap.put(orig, newRoot);

        if (newRoot instanceof HashTableDummyOperator) {
            // dummy ops need to be updated to the cloned ones.
            dummyOps.add((HashTableDummyOperator) newRoot);
            it.remove();
        } else if (newRoot instanceof AppMasterEventOperator) {
            // event operators point to table scan operators. When cloning these we
            // need to restore the original scan.
            if (newRoot.getConf() instanceof DynamicPruningEventDesc) {
                TableScanOperator ts = ((DynamicPruningEventDesc) orig.getConf()).getTableScan();
                if (ts == null) {
                    throw new AssertionError("No table scan associated with dynamic event pruning. " + orig);
                }
                ((DynamicPruningEventDesc) newRoot.getConf()).setTableScan(ts);
            }
            it.remove();
        } else {
            if (newRoot instanceof TableScanOperator) {
                if (context.tsToEventMap.containsKey(orig)) {
                    // we need to update event operators with the cloned table scan
                    for (AppMasterEventOperator event : context.tsToEventMap.get(orig)) {
                        ((DynamicPruningEventDesc) event.getConf()).setTableScan((TableScanOperator) newRoot);
                    }
                }
            }
            context.rootToWorkMap.remove(orig);
            context.rootToWorkMap.put(newRoot, work);
        }
    }

    // now we remove all the unions. we throw away any branch that's not reachable from
    // the current set of roots. The reason is that those branches will be handled in
    // different tasks.
    Deque<Operator<?>> operators = new LinkedList<Operator<?>>();
    operators.addAll(newRoots);

    Set<Operator<?>> seen = new HashSet<Operator<?>>();

    while (!operators.isEmpty()) {
        Operator<?> current = operators.pop();
        seen.add(current);

        if (current instanceof FileSinkOperator) {
            FileSinkOperator fileSink = (FileSinkOperator) current;

            // remember it for additional processing later
            context.fileSinkSet.add(fileSink);

            FileSinkDesc desc = fileSink.getConf();
            Path path = desc.getDirName();
            List<FileSinkDesc> linked;

            if (!context.linkedFileSinks.containsKey(path)) {
                linked = new ArrayList<FileSinkDesc>();
                context.linkedFileSinks.put(path, linked);
            }
            linked = context.linkedFileSinks.get(path);
            linked.add(desc);

            desc.setDirName(new Path(path, "" + linked.size()));
            desc.setLinkedFileSinkDesc(linked);
        }

        if (current instanceof AppMasterEventOperator) {
            // remember for additional processing later
            context.eventOperatorSet.add((AppMasterEventOperator) current);

            // mark the original as abandoned. Don't need it anymore.
            context.abandonedEventOperatorSet
                    .add((AppMasterEventOperator) replacementMap.inverse().get(current));
        }

        if (current instanceof UnionOperator) {
            Operator<?> parent = null;
            int count = 0;

            for (Operator<?> op : current.getParentOperators()) {
                if (seen.contains(op)) {
                    ++count;
                    parent = op;
                }
            }

            // we should have been able to reach the union from only one side.
            assert count <= 1;

            if (parent == null) {
                // root operator is union (can happen in reducers)
                replacementMap.put(current, current.getChildOperators().get(0));
            } else {
                parent.removeChildAndAdoptItsChildren(current);
            }
        }

        if (current instanceof FileSinkOperator || current instanceof ReduceSinkOperator) {
            current.setChildOperators(null);
        } else {
            operators.addAll(current.getChildOperators());
        }
    }
    work.setDummyOps(dummyOps);
    work.replaceRoots(replacementMap);
}

From source file:org.apache.hadoop.hive.ql.parse.PTFTranslator.java

private void translatePTFChain() throws SemanticException {

    Deque<PTFInputSpec> ptfChain = new ArrayDeque<PTFInvocationSpec.PTFInputSpec>();
    PTFInputSpec currentSpec = ptfInvocation.getFunction();
    while (currentSpec != null) {
        ptfChain.push(currentSpec);//from  w w w .j a  va 2 s . co m
        currentSpec = currentSpec.getInput();
    }

    int inputNum = 0;
    PTFInputDef currentDef = null;
    while (!ptfChain.isEmpty()) {
        currentSpec = ptfChain.pop();

        if (currentSpec instanceof PTFQueryInputSpec) {
            currentDef = translate((PTFQueryInputSpec) currentSpec, inputNum);
        } else {
            currentDef = translate((PartitionedTableFunctionSpec) currentSpec, currentDef, inputNum);
        }
        inputNum++;
    }
    ptfDesc.setFuncDef((PartitionedTableFunctionDef) currentDef);
}

From source file:org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.java

public void processPositionAlias(ASTNode ast) throws SemanticException {
    boolean isBothByPos = HiveConf.getBoolVar(conf, ConfVars.HIVE_GROUPBY_ORDERBY_POSITION_ALIAS);
    boolean isGbyByPos = isBothByPos || HiveConf.getBoolVar(conf, ConfVars.HIVE_GROUPBY_POSITION_ALIAS);
    boolean isObyByPos = isBothByPos || HiveConf.getBoolVar(conf, ConfVars.HIVE_ORDERBY_POSITION_ALIAS);

    Deque<ASTNode> stack = new ArrayDeque<ASTNode>();
    stack.push(ast);/*w ww . j  ava 2 s . c  o  m*/

    while (!stack.isEmpty()) {
        ASTNode next = stack.pop();

        if (next.getChildCount() == 0) {
            continue;
        }

        boolean isAllCol;
        ASTNode selectNode = null;
        ASTNode groupbyNode = null;
        ASTNode orderbyNode = null;

        // get node type
        int child_count = next.getChildCount();
        for (int child_pos = 0; child_pos < child_count; ++child_pos) {
            ASTNode node = (ASTNode) next.getChild(child_pos);
            int type = node.getToken().getType();
            if (type == HiveParser.TOK_SELECT) {
                selectNode = node;
            } else if (type == HiveParser.TOK_GROUPBY) {
                groupbyNode = node;
            } else if (type == HiveParser.TOK_ORDERBY) {
                orderbyNode = node;
            }
        }

        if (selectNode != null) {
            int selectExpCnt = selectNode.getChildCount();

            // replace each of the position alias in GROUPBY with the actual column name
            if (groupbyNode != null) {
                for (int child_pos = 0; child_pos < groupbyNode.getChildCount(); ++child_pos) {
                    ASTNode node = (ASTNode) groupbyNode.getChild(child_pos);
                    if (node.getToken().getType() == HiveParser.Number) {
                        if (isGbyByPos) {
                            int pos = Integer.parseInt(node.getText());
                            if (pos > 0 && pos <= selectExpCnt) {
                                groupbyNode.setChild(child_pos, selectNode.getChild(pos - 1).getChild(0));
                            } else {
                                throw new SemanticException(ErrorMsg.INVALID_POSITION_ALIAS_IN_GROUPBY
                                        .getMsg("Position alias: " + pos + " does not exist\n"
                                                + "The Select List is indexed from 1 to " + selectExpCnt));
                            }
                        } else {
                            warn("Using constant number  " + node.getText()
                                    + " in group by. If you try to use position alias when hive.groupby.position.alias is false, the position alias will be ignored.");
                        }
                    }
                }
            }

            // orderby position will be processed in genPlan
        }

        for (int i = next.getChildren().size() - 1; i >= 0; i--) {
            stack.push((ASTNode) next.getChildren().get(i));
        }
    }
}

From source file:org.apache.hadoop.hive.ql.parse.spark.GenSparkUtils.java

public void removeUnionOperators(Configuration conf, GenSparkProcContext context, BaseWork work)
        throws SemanticException {

    List<Operator<?>> roots = new ArrayList<Operator<?>>();

    // For MapWork, getAllRootOperators is not suitable, since it checks
    // getPathToAliases, and will return null if this is empty. Here we are
    // replacing getAliasToWork, so should use that information instead.
    if (work instanceof MapWork) {
        roots.addAll(((MapWork) work).getAliasToWork().values());
    } else {/*w w w . j  ava  2 s  . c  o m*/
        roots.addAll(work.getAllRootOperators());
    }
    if (work.getDummyOps() != null) {
        roots.addAll(work.getDummyOps());
    }

    // need to clone the plan.
    List<Operator<?>> newRoots = Utilities.cloneOperatorTree(conf, roots);

    // Build a map to map the original FileSinkOperator and the cloned FileSinkOperators
    // This map is used for set the stats flag for the cloned FileSinkOperators in later process
    Iterator<Operator<?>> newRootsIt = newRoots.iterator();
    for (Operator<?> root : roots) {
        Operator<?> newRoot = newRootsIt.next();
        List<Operator<?>> newOpQueue = new LinkedList<Operator<?>>();
        collectOperators(newRoot, newOpQueue);
        List<Operator<?>> opQueue = new LinkedList<Operator<?>>();
        collectOperators(root, opQueue);
        Iterator<Operator<?>> newOpQueueIt = newOpQueue.iterator();
        for (Operator<?> op : opQueue) {
            Operator<?> newOp = newOpQueueIt.next();

            // We need to update rootToWorkMap in case the op is a key, since even
            // though we clone the op tree, we're still using the same MapWork/ReduceWork.
            if (context.rootToWorkMap.containsKey(op)) {
                context.rootToWorkMap.put(newOp, context.rootToWorkMap.get(op));
            }
            // Don't remove the old entry - in SparkPartitionPruningSink it still
            // refers to the old TS, and we need to lookup it later in
            // processPartitionPruningSink.

            if (op instanceof FileSinkOperator) {
                List<FileSinkOperator> fileSinkList = context.fileSinkMap.get(op);
                if (fileSinkList == null) {
                    fileSinkList = new LinkedList<FileSinkOperator>();
                }
                fileSinkList.add((FileSinkOperator) newOp);
                context.fileSinkMap.put((FileSinkOperator) op, fileSinkList);
            } else if (op instanceof SparkPartitionPruningSinkOperator) {
                SparkPartitionPruningSinkOperator oldPruningSink = (SparkPartitionPruningSinkOperator) op;
                SparkPartitionPruningSinkOperator newPruningSink = (SparkPartitionPruningSinkOperator) newOp;
                newPruningSink.getConf().setTableScan(oldPruningSink.getConf().getTableScan());
                context.pruningSinkSet.add(newPruningSink);
                context.pruningSinkSet.remove(oldPruningSink);
            }
        }
    }

    // we're cloning the operator plan but we're retaining the original work. That means
    // that root operators have to be replaced with the cloned ops. The replacement map
    // tells you what that mapping is.
    Map<Operator<?>, Operator<?>> replacementMap = new HashMap<Operator<?>, Operator<?>>();

    // there's some special handling for dummyOps required. Mapjoins won't be properly
    // initialized if their dummy parents aren't initialized. Since we cloned the plan
    // we need to replace the dummy operators in the work with the cloned ones.
    List<HashTableDummyOperator> dummyOps = new LinkedList<HashTableDummyOperator>();

    Iterator<Operator<?>> it = newRoots.iterator();
    for (Operator<?> orig : roots) {
        Operator<?> newRoot = it.next();
        if (newRoot instanceof HashTableDummyOperator) {
            dummyOps.add((HashTableDummyOperator) newRoot);
            it.remove();
        } else {
            replacementMap.put(orig, newRoot);
        }
    }

    // now we remove all the unions. we throw away any branch that's not reachable from
    // the current set of roots. The reason is that those branches will be handled in
    // different tasks.
    Deque<Operator<?>> operators = new LinkedList<Operator<?>>();
    operators.addAll(newRoots);

    Set<Operator<?>> seen = new HashSet<Operator<?>>();

    while (!operators.isEmpty()) {
        Operator<?> current = operators.pop();
        seen.add(current);

        if (current instanceof UnionOperator) {
            Operator<?> parent = null;
            int count = 0;

            for (Operator<?> op : current.getParentOperators()) {
                if (seen.contains(op)) {
                    ++count;
                    parent = op;
                }
            }

            // we should have been able to reach the union from only one side.
            Preconditions.checkArgument(count <= 1,
                    "AssertionError: expected count to be <= 1, but was " + count);

            if (parent == null) {
                // root operator is union (can happen in reducers)
                replacementMap.put(current, current.getChildOperators().get(0));
            } else {
                parent.removeChildAndAdoptItsChildren(current);
            }
        }

        if (current instanceof FileSinkOperator || current instanceof ReduceSinkOperator) {
            current.setChildOperators(null);
        } else {
            operators.addAll(current.getChildOperators());
        }
    }
    work.setDummyOps(dummyOps);
    work.replaceRoots(replacementMap);
}

From source file:org.apache.hadoop.hive.ql.QTestUtil.java

/**
 * Given the current configurations (e.g., hadoop version and execution mode), return
 * the correct file name to compare with the current test run output.
 * @param outDir The directory where the reference log files are stored.
 * @param testName The test file name (terminated by ".out").
 * @return The file name appended with the configuration values if it exists.
 */// w  w w  . j a va2s .  c om
public String outPath(String outDir, String testName) {
    String ret = (new File(outDir, testName)).getPath();
    // List of configurations. Currently the list consists of hadoop version and execution mode only
    List<String> configs = new ArrayList<String>();
    configs.add(this.hadoopVer);

    Deque<String> stack = new LinkedList<String>();
    StringBuilder sb = new StringBuilder();
    sb.append(testName);
    stack.push(sb.toString());

    // example file names are input1.q.out_0.20.0_minimr or input2.q.out_0.17
    for (String s : configs) {
        sb.append('_');
        sb.append(s);
        stack.push(sb.toString());
    }
    while (stack.size() > 0) {
        String fileName = stack.pop();
        File f = new File(outDir, fileName);
        if (f.exists()) {
            ret = f.getPath();
            break;
        }
    }
    return ret;
}

From source file:org.apache.hadoop.hive.ql.QTestUtil2.java

/**
 * Given the current configurations (e.g., hadoop version and execution
 * mode), return the correct file name to compare with the current test run
 * output.//from   www  . ja v  a2  s. c o m
 * 
 * @param outDir
 *            The directory where the reference log files are stored.
 * @param testName
 *            The test file name (terminated by ".out").
 * @return The file name appended with the configuration values if it
 *         exists.
 */
public String outPath(String outDir, String testName) {
    String ret = (new File(outDir, testName)).getPath();
    // List of configurations. Currently the list consists of hadoop version
    // and execution mode only
    List<String> configs = new ArrayList<String>();
    configs.add(this.hadoopVer);

    Deque<String> stack = new LinkedList<String>();
    StringBuilder sb = new StringBuilder();
    sb.append(testName);
    stack.push(sb.toString());

    // example file names are input1.q.out_0.20.0_minimr or
    // input2.q.out_0.17
    for (String s : configs) {
        sb.append('_');
        sb.append(s);
        stack.push(sb.toString());
    }
    while (stack.size() > 0) {
        String fileName = stack.pop();
        File f = new File(outDir, fileName);
        if (f.exists()) {
            ret = f.getPath();
            break;
        }
    }
    return ret;
}

From source file:org.apache.metron.common.stellar.StellarCompiler.java

private Token<?> popDeque(Deque<Token<?>> tokenDeque) {
    if (tokenDeque.isEmpty()) {
        throw new ParseException("Unable to pop an empty stack");
    }//from ww w .ja  v a  2  s.  c  o m
    return tokenDeque.pop();
}

From source file:org.apache.nifi.processors.standard.util.FTPUtils.java

/**
 * Handles the logic required to change to the given directory RELATIVE TO THE CURRENT DIRECTORY which can include creating new directories needed.
 *
 * This will first attempt to change to the full path of the given directory outright. If that fails, then it will attempt to change from the top of the tree of the given directory all the way
 * down to the final leaf node of the given directory.
 *
 * @param client - the ftp client with an already active connection
 * @param dirPath - the path to change or create directories to
 * @param createDirs - if true will attempt to create any missing directories
 * @param processor - used solely for targeting logging output.
 * @throws IOException if any access problem occurs
 *//*from ww  w .j  av a  2 s. c o m*/
public static void changeWorkingDirectory(final FTPClient client, final String dirPath,
        final boolean createDirs, final Processor processor) throws IOException {
    final String currentWorkingDirectory = client.printWorkingDirectory();
    final File dir = new File(dirPath);
    logger.debug(processor + " attempting to change directory from " + currentWorkingDirectory + " to "
            + dir.getPath());
    boolean dirExists = false;
    final String forwardPaths = dir.getPath().replaceAll(Matcher.quoteReplacement("\\"),
            Matcher.quoteReplacement("/"));
    //always use forward paths for long string attempt
    try {
        dirExists = client.changeWorkingDirectory(forwardPaths);
        if (dirExists) {
            logger.debug(processor + " changed working directory to '" + forwardPaths + "' from '"
                    + currentWorkingDirectory + "'");
        } else {
            logger.debug(processor + " could not change directory to '" + forwardPaths + "' from '"
                    + currentWorkingDirectory + "' so trying the hard way.");
        }
    } catch (final IOException ioe) {
        logger.debug(processor + " could not change directory to '" + forwardPaths + "' from '"
                + currentWorkingDirectory + "' so trying the hard way.");
    }
    if (!dirExists) { //couldn't navigate directly...begin hard work
        final Deque<String> stack = new LinkedList<>();
        File fakeFile = new File(dir.getPath());
        do {
            stack.push(fakeFile.getName());
        } while ((fakeFile = fakeFile.getParentFile()) != null);

        String dirName = null;
        while ((dirName = stack.peek()) != null) {
            stack.pop();
            //find out if exists, if not make it if configured to do so or throw exception
            dirName = ("".equals(dirName.trim())) ? "/" : dirName;
            boolean exists = false;
            try {
                exists = client.changeWorkingDirectory(dirName);
            } catch (final IOException ioe) {
                exists = false;
            }
            if (!exists && createDirs) {
                logger.debug(processor + " creating new directory and changing to it " + dirName);
                client.makeDirectory(dirName);
                if (!(client.makeDirectory(dirName) || client.changeWorkingDirectory(dirName))) {
                    throw new IOException(
                            processor + " could not create and change to newly created directory " + dirName);
                } else {
                    logger.debug(processor + " successfully changed working directory to " + dirName);
                }
            } else if (!exists) {
                throw new IOException(processor + " could not change directory to '" + dirName + "' from '"
                        + currentWorkingDirectory + "'");
            }
        }
    }
}

From source file:org.apache.nifi.processors.standard.util.SFTPUtils.java

public static void changeWorkingDirectory(final ChannelSftp sftp, final String dirPath,
        final boolean createDirs, final Processor proc) throws IOException {
    final Deque<String> stack = new LinkedList<>();
    File dir = new File(dirPath);
    String currentWorkingDirectory = null;
    boolean dirExists = false;
    final String forwardPaths = dir.getPath().replaceAll(Matcher.quoteReplacement("\\"),
            Matcher.quoteReplacement("/"));
    try {/*from w  w  w  . j  a  v a 2  s  . co m*/
        currentWorkingDirectory = sftp.pwd();
        logger.debug(proc + " attempting to change directory from " + currentWorkingDirectory + " to "
                + dir.getPath());
        //always use forward paths for long string attempt
        sftp.cd(forwardPaths);
        dirExists = true;
        logger.debug(proc + " changed working directory to '" + forwardPaths + "' from '"
                + currentWorkingDirectory + "'");
    } catch (final SftpException sftpe) {
        logger.debug(proc + " could not change directory to '" + forwardPaths + "' from '"
                + currentWorkingDirectory + "' so trying the hard way.");
    }
    if (dirExists) {
        return;
    }
    if (!createDirs) {
        throw new IOException("Unable to change to requested working directory \'" + forwardPaths
                + "\' but not configured to create dirs.");
    }

    do {
        stack.push(dir.getName());
    } while ((dir = dir.getParentFile()) != null);

    String dirName = null;
    while ((dirName = stack.peek()) != null) {
        stack.pop();
        //find out if exists, if not make it if configured to do so or throw exception
        dirName = ("".equals(dirName.trim())) ? "/" : dirName;
        try {
            sftp.cd(dirName);
        } catch (final SftpException sftpe) {
            logger.debug(proc + " creating new directory and changing to it " + dirName);
            try {
                sftp.mkdir(dirName);
                sftp.cd(dirName);
            } catch (final SftpException e) {
                throw new IOException(proc + " could not make/change directory to [" + dirName + "] ["
                        + e.getLocalizedMessage() + "]", e);
            }
        }
    }
}