Example usage for java.util.concurrent CompletionService submit

List of usage examples for java.util.concurrent CompletionService submit

Introduction

In this page you can find the example usage for java.util.concurrent CompletionService submit.

Prototype

Future<V> submit(Callable<V> task);

Source Link

Document

Submits a value-returning task for execution and returns a Future representing the pending results of the task.

Usage

From source file:ml.shifu.shifu.core.dtrain.dt.DTWorker.java

@Override
public DTWorkerParams doCompute(WorkerContext<DTMasterParams, DTWorkerParams> context) {
    if (context.isFirstIteration()) {
        return new DTWorkerParams();
    }/*ww w.ja  va 2  s  .  c o m*/

    DTMasterParams lastMasterResult = context.getLastMasterResult();
    final List<TreeNode> trees = lastMasterResult.getTrees();
    final Map<Integer, TreeNode> todoNodes = lastMasterResult.getTodoNodes();
    if (todoNodes == null) {
        return new DTWorkerParams();
    }

    LOG.info("Start to work: todoNodes size is {}", todoNodes.size());

    Map<Integer, NodeStats> statistics = initTodoNodeStats(todoNodes);

    double trainError = 0d, validationError = 0d;
    double weightedTrainCount = 0d, weightedValidationCount = 0d;
    // renew random seed
    if (this.isGBDT && !this.gbdtSampleWithReplacement && lastMasterResult.isSwitchToNextTree()) {
        this.baggingRandomMap = new HashMap<Integer, Random>();
    }

    long start = System.nanoTime();
    for (Data data : this.trainingData) {
        if (this.isRF) {
            for (TreeNode treeNode : trees) {
                if (treeNode.getNode().getId() == Node.INVALID_INDEX) {
                    continue;
                }

                Node predictNode = predictNodeIndex(treeNode.getNode(), data, true);
                if (predictNode.getPredict() != null) {
                    // only update when not in first node, for treeNode, no predict statistics at that time
                    float weight = data.subsampleWeights[treeNode.getTreeId()];
                    if (Float.compare(weight, 0f) == 0) {
                        // oob data, no need to do weighting
                        validationError += data.significance * loss
                                .computeError((float) (predictNode.getPredict().getPredict()), data.label);
                        weightedValidationCount += data.significance;
                    } else {
                        trainError += weight * data.significance * loss
                                .computeError((float) (predictNode.getPredict().getPredict()), data.label);
                        weightedTrainCount += weight * data.significance;
                    }
                }
            }
        }

        if (this.isGBDT) {
            if (this.isContinuousEnabled && lastMasterResult.isContinuousRunningStart()) {
                recoverGBTData(context, data.output, data.predict, data, false);
                trainError += data.significance * loss.computeError(data.predict, data.label);
                weightedTrainCount += data.significance;
            } else {
                if (isNeedRecoverGBDTPredict) {
                    if (this.recoverTrees == null) {
                        this.recoverTrees = recoverCurrentTrees();
                    }
                    // recover gbdt data for fail over
                    recoverGBTData(context, data.output, data.predict, data, true);
                }
                int currTreeIndex = trees.size() - 1;

                if (lastMasterResult.isSwitchToNextTree()) {
                    if (currTreeIndex >= 1) {
                        Node node = trees.get(currTreeIndex - 1).getNode();
                        Node predictNode = predictNodeIndex(node, data, false);
                        if (predictNode.getPredict() != null) {
                            double predict = predictNode.getPredict().getPredict();
                            // first tree logic, master must set it to first tree even second tree with ROOT is
                            // sending
                            if (context.getLastMasterResult().isFirstTree()) {
                                data.predict = (float) predict;
                            } else {
                                // random drop
                                boolean drop = (this.dropOutRate > 0.0
                                        && dropOutRandom.nextDouble() < this.dropOutRate);
                                if (!drop) {
                                    data.predict += (float) (this.learningRate * predict);
                                }
                            }
                            data.output = -1f * loss.computeGradient(data.predict, data.label);
                        }
                        // if not sampling with replacement in gbdt, renew bagging sample rate in next tree
                        if (!this.gbdtSampleWithReplacement) {
                            Random random = null;
                            int classValue = (int) (data.label + 0.01f);
                            if (this.isStratifiedSampling) {
                                random = baggingRandomMap.get(classValue);
                                if (random == null) {
                                    random = DTrainUtils.generateRandomBySampleSeed(
                                            modelConfig.getTrain().getBaggingSampleSeed(),
                                            CommonConstants.NOT_CONFIGURED_BAGGING_SEED);
                                    baggingRandomMap.put(classValue, random);
                                }
                            } else {
                                random = baggingRandomMap.get(0);
                                if (random == null) {
                                    random = DTrainUtils.generateRandomBySampleSeed(
                                            modelConfig.getTrain().getBaggingSampleSeed(),
                                            CommonConstants.NOT_CONFIGURED_BAGGING_SEED);
                                    baggingRandomMap.put(0, random);
                                }
                            }
                            if (random.nextDouble() <= modelConfig.getTrain().getBaggingSampleRate()) {
                                data.subsampleWeights[currTreeIndex % data.subsampleWeights.length] = 1f;
                            } else {
                                data.subsampleWeights[currTreeIndex % data.subsampleWeights.length] = 0f;
                            }
                        }
                    }
                }

                if (context.getLastMasterResult().isFirstTree() && !lastMasterResult.isSwitchToNextTree()) {
                    Node currTree = trees.get(currTreeIndex).getNode();
                    Node predictNode = predictNodeIndex(currTree, data, true);
                    if (predictNode.getPredict() != null) {
                        trainError += data.significance * loss
                                .computeError((float) (predictNode.getPredict().getPredict()), data.label);
                        weightedTrainCount += data.significance;
                    }
                } else {
                    trainError += data.significance * loss.computeError(data.predict, data.label);
                    weightedTrainCount += data.significance;
                }
            }
        }
    }
    LOG.debug("Compute train error time is {}ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));

    if (validationData != null) {
        start = System.nanoTime();
        for (Data data : this.validationData) {
            if (this.isRF) {
                for (TreeNode treeNode : trees) {
                    if (treeNode.getNode().getId() == Node.INVALID_INDEX) {
                        continue;
                    }
                    Node predictNode = predictNodeIndex(treeNode.getNode(), data, true);
                    if (predictNode.getPredict() != null) {
                        // only update when not in first node, for treeNode, no predict statistics at that time
                        validationError += data.significance * loss
                                .computeError((float) (predictNode.getPredict().getPredict()), data.label);
                        weightedValidationCount += data.significance;
                    }
                }
            }

            if (this.isGBDT) {
                if (this.isContinuousEnabled && lastMasterResult.isContinuousRunningStart()) {
                    recoverGBTData(context, data.output, data.predict, data, false);
                    validationError += data.significance * loss.computeError(data.predict, data.label);
                    weightedValidationCount += data.significance;
                } else {
                    if (isNeedRecoverGBDTPredict) {
                        if (this.recoverTrees == null) {
                            this.recoverTrees = recoverCurrentTrees();
                        }
                        // recover gbdt data for fail over
                        recoverGBTData(context, data.output, data.predict, data, true);
                    }
                    int currTreeIndex = trees.size() - 1;
                    if (lastMasterResult.isSwitchToNextTree()) {
                        if (currTreeIndex >= 1) {
                            Node node = trees.get(currTreeIndex - 1).getNode();
                            Node predictNode = predictNodeIndex(node, data, false);
                            if (predictNode.getPredict() != null) {
                                double predict = predictNode.getPredict().getPredict();
                                if (context.getLastMasterResult().isFirstTree()) {
                                    data.predict = (float) predict;
                                } else {
                                    data.predict += (float) (this.learningRate * predict);
                                }
                                data.output = -1f * loss.computeGradient(data.predict, data.label);
                            }
                        }
                    }
                    if (context.getLastMasterResult().isFirstTree() && !lastMasterResult.isSwitchToNextTree()) {
                        Node predictNode = predictNodeIndex(trees.get(currTreeIndex).getNode(), data, true);
                        if (predictNode.getPredict() != null) {
                            validationError += data.significance * loss
                                    .computeError((float) (predictNode.getPredict().getPredict()), data.label);
                            weightedValidationCount += data.significance;
                        }
                    } else {
                        validationError += data.significance * loss.computeError(data.predict, data.label);
                        weightedValidationCount += data.significance;
                    }
                }
            }
        }
        LOG.debug("Compute val error time is {}ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
    }

    if (this.isGBDT) {
        // reset trees to null to save memory
        this.recoverTrees = null;
        if (this.isNeedRecoverGBDTPredict) {
            // no need recover again
            this.isNeedRecoverGBDTPredict = false;
        }
    }

    start = System.nanoTime();
    CompletionService<Map<Integer, NodeStats>> completionService = new ExecutorCompletionService<Map<Integer, NodeStats>>(
            this.threadPool);

    int realThreadCount = 0;
    LOG.debug("while todo size {}", todoNodes.size());

    int realRecords = this.trainingData.size();
    int realThreads = this.workerThreadCount > realRecords ? realRecords : this.workerThreadCount;

    int[] trainLows = new int[realThreads];
    int[] trainHighs = new int[realThreads];

    int stepCount = realRecords / realThreads;
    if (realRecords % realThreads != 0) {
        // move step count to append last gap to avoid last thread worse 2*stepCount-1
        stepCount += (realRecords % realThreads) / stepCount;
    }
    for (int i = 0; i < realThreads; i++) {
        trainLows[i] = i * stepCount;
        if (i != realThreads - 1) {
            trainHighs[i] = trainLows[i] + stepCount - 1;
        } else {
            trainHighs[i] = realRecords - 1;
        }
    }

    for (int i = 0; i < realThreads; i++) {
        final Map<Integer, TreeNode> localTodoNodes = new HashMap<Integer, TreeNode>(todoNodes);
        final Map<Integer, NodeStats> localStatistics = initTodoNodeStats(todoNodes);

        final int startIndex = trainLows[i];
        final int endIndex = trainHighs[i];
        LOG.info("Thread {} todo size {} stats size {} start index {} end index {}", i, localTodoNodes.size(),
                localStatistics.size(), startIndex, endIndex);

        if (localTodoNodes.size() == 0) {
            continue;
        }
        realThreadCount += 1;
        completionService.submit(new Callable<Map<Integer, NodeStats>>() {
            @Override
            public Map<Integer, NodeStats> call() throws Exception {
                long start = System.nanoTime();
                List<Integer> nodeIndexes = new ArrayList<Integer>(trees.size());
                for (int j = startIndex; j <= endIndex; j++) {
                    Data data = DTWorker.this.trainingData.get(j);
                    nodeIndexes.clear();
                    if (DTWorker.this.isRF) {
                        for (TreeNode treeNode : trees) {
                            if (treeNode.getNode().getId() == Node.INVALID_INDEX) {
                                nodeIndexes.add(Node.INVALID_INDEX);
                            } else {
                                Node predictNode = predictNodeIndex(treeNode.getNode(), data, false);
                                nodeIndexes.add(predictNode.getId());
                            }
                        }
                    }

                    if (DTWorker.this.isGBDT) {
                        int currTreeIndex = trees.size() - 1;
                        Node predictNode = predictNodeIndex(trees.get(currTreeIndex).getNode(), data, false);
                        // update node index
                        nodeIndexes.add(predictNode.getId());
                    }
                    for (Map.Entry<Integer, TreeNode> entry : localTodoNodes.entrySet()) {
                        // only do statistics on effective data
                        Node todoNode = entry.getValue().getNode();
                        int treeId = entry.getValue().getTreeId();
                        int currPredictIndex = 0;
                        if (DTWorker.this.isRF) {
                            currPredictIndex = nodeIndexes.get(entry.getValue().getTreeId());
                        }
                        if (DTWorker.this.isGBDT) {
                            currPredictIndex = nodeIndexes.get(0);
                        }

                        if (todoNode.getId() == currPredictIndex) {
                            List<Integer> features = entry.getValue().getFeatures();
                            if (features.isEmpty()) {
                                features = getAllValidFeatures();
                            }
                            for (Integer columnNum : features) {
                                double[] featuerStatistic = localStatistics.get(entry.getKey())
                                        .getFeatureStatistics().get(columnNum);
                                float weight = data.subsampleWeights[treeId % data.subsampleWeights.length];
                                if (Float.compare(weight, 0f) != 0) {
                                    // only compute weight is not 0
                                    short binIndex = data.inputs[DTWorker.this.inputIndexMap.get(columnNum)];
                                    DTWorker.this.impurity.featureUpdate(featuerStatistic, binIndex,
                                            data.output, data.significance, weight);
                                }
                            }
                        }
                    }
                }
                LOG.debug("Thread computing stats time is {}ms in thread {}",
                        TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start),
                        Thread.currentThread().getName());
                return localStatistics;
            }
        });
    }

    int rCnt = 0;
    while (rCnt < realThreadCount) {
        try {
            Map<Integer, NodeStats> currNodeStatsmap = completionService.take().get();
            if (rCnt == 0) {
                statistics = currNodeStatsmap;
            } else {
                for (Entry<Integer, NodeStats> entry : statistics.entrySet()) {
                    NodeStats resultNodeStats = entry.getValue();
                    mergeNodeStats(resultNodeStats, currNodeStatsmap.get(entry.getKey()));
                }
            }
        } catch (ExecutionException e) {
            throw new RuntimeException(e);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
        rCnt += 1;
    }
    LOG.debug("Compute stats time is {}ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));

    LOG.info(
            "worker count is {}, error is {}, and stats size is {}. weightedTrainCount {}, weightedValidationCount {}, trainError {}, validationError {}",
            count, trainError, statistics.size(), weightedTrainCount, weightedValidationCount, trainError,
            validationError);
    return new DTWorkerParams(weightedTrainCount, weightedValidationCount, trainError, validationError,
            statistics);
}

From source file:org.apache.hadoop.hbase.client.transactional.TransactionManager.java

/**
 * Prepare to commit a transaction.//from   w  w  w . j  av  a  2  s  .co  m
 *
 * @param transactionState
 * @return commitStatusCode (see {@link TransactionalRegionInterface})
 * @throws IOException
 * @throws CommitUnsuccessfulException
 */
public int prepareCommit(final TransactionState transactionState)
        throws CommitUnsuccessfulException, IOException {
    if (LOG.isTraceEnabled())
        LOG.trace("Enter prepareCommit, txid: " + transactionState.getTransactionId());

    if (batchRegionServer && (TRANSACTION_ALGORITHM == AlgorithmType.MVCC)) {
        boolean allReadOnly = true;
        int loopCount = 0;
        if (transactionState.islocalTransaction()) {
            if (LOG.isTraceEnabled())
                LOG.trace("TransactionManager.prepareCommit local transaction "
                        + transactionState.getTransactionId());
        } else if (LOG.isTraceEnabled())
            LOG.trace("TransactionManager.prepareCommit global transaction "
                    + transactionState.getTransactionId());
        // (need one CompletionService per request for thread safety, can share pool of threads
        CompletionService<Integer> compPool = new ExecutorCompletionService<Integer>(threadPool);

        try {
            ServerName servername;
            List<TransactionRegionLocation> regionList;
            Map<ServerName, List<TransactionRegionLocation>> locations = new HashMap<ServerName, List<TransactionRegionLocation>>();
            for (TransactionRegionLocation location : transactionState.getParticipatingRegions()) {
                servername = location.getServerName();
                if (!locations.containsKey(servername)) {
                    regionList = new ArrayList<TransactionRegionLocation>();
                    locations.put(servername, regionList);
                } else {
                    regionList = locations.get(servername);
                }
                regionList.add(location);
            }

            for (final Map.Entry<ServerName, List<TransactionRegionLocation>> entry : locations.entrySet()) {
                loopCount++;
                compPool.submit(new TransactionManagerCallable(transactionState,
                        entry.getValue().iterator().next(), connection) {
                    public Integer call() throws CommitUnsuccessfulException, IOException {
                        return doPrepareX(entry.getValue(), transactionState.getTransactionId());
                    }
                });
            }
        } catch (Exception e) {
            throw new CommitUnsuccessfulException(e);
        }

        // loop to retrieve replies
        int commitError = 0;
        try {
            for (int loopIndex = 0; loopIndex < loopCount; loopIndex++) {
                Integer canCommit = compPool.take().get();
                switch (canCommit) {
                case TM_COMMIT_TRUE:
                    allReadOnly = false;
                    break;
                case TM_COMMIT_READ_ONLY:
                    break;
                case TM_COMMIT_FALSE_CONFLICT:
                    commitError = TransactionalReturn.COMMIT_CONFLICT;
                    break;
                case TM_COMMIT_FALSE:
                    // Commit conflict takes precedence
                    if (commitError != TransactionalReturn.COMMIT_CONFLICT)
                        commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL;
                    break;
                default:
                    LOG.error("Unexpected value of canCommit in prepareCommit (during completion processing): "
                            + canCommit);
                    commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL;
                    ;
                }
            }
            loopCount = 0;
            if (transactionState.getRegionsRetryCount() > 0) {
                for (TransactionRegionLocation location : transactionState.getRetryRegions()) {
                    loopCount++;
                    compPool.submit(new TransactionManagerCallable(transactionState, location, connection) {
                        public Integer call() throws CommitUnsuccessfulException, IOException {

                            return doPrepareX(location.getRegionInfo().getRegionName(),
                                    transactionState.getTransactionId(), location);
                        }
                    });
                }
                transactionState.clearRetryRegions();
            }
            for (int loopIndex = 0; loopIndex < loopCount; loopIndex++) {
                Integer canCommit = compPool.take().get();
                switch (canCommit) {
                case TM_COMMIT_TRUE:
                    allReadOnly = false;
                    break;
                case TM_COMMIT_READ_ONLY:
                    break;
                case TM_COMMIT_FALSE_CONFLICT:
                    commitError = TransactionalReturn.COMMIT_CONFLICT;
                    break;
                case TM_COMMIT_FALSE:
                    // Commit conflict takes precedence
                    if (commitError != TransactionalReturn.COMMIT_CONFLICT)
                        commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL;
                    break;
                default:
                    commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL;
                    ;
                }
            }
        } catch (Exception e) {
            throw new CommitUnsuccessfulException(e);
        }
        if (commitError != 0)
            return commitError;

        return allReadOnly ? TransactionalReturn.COMMIT_OK_READ_ONLY : TransactionalReturn.COMMIT_OK;
    } else {
        boolean allReadOnly = true;
        int loopCount = 0;
        ServerName servername;
        List<TransactionRegionLocation> regionList;
        Map<ServerName, List<TransactionRegionLocation>> locations = null;

        if (transactionState.islocalTransaction()) {
            //System.out.println("prepare islocal");
            if (LOG.isTraceEnabled())
                LOG.trace("TransactionManager.prepareCommit local transaction "
                        + transactionState.getTransactionId());
        } else if (LOG.isTraceEnabled())
            LOG.trace("TransactionManager.prepareCommit global transaction "
                    + transactionState.getTransactionId());

        // (need one CompletionService per request for thread safety, can share pool of threads
        CompletionService<Integer> compPool = new ExecutorCompletionService<Integer>(threadPool);
        try {
            if (batchRSMetricsFlag)
                locations = new HashMap<ServerName, List<TransactionRegionLocation>>();

            for (TransactionRegionLocation location : transactionState.getParticipatingRegions()) {
                if (batchRSMetricsFlag) {
                    servername = location.getServerName();
                    if (!locations.containsKey(servername)) {
                        regionList = new ArrayList<TransactionRegionLocation>();
                        locations.put(servername, regionList);
                    } else {
                        regionList = locations.get(servername);
                    }
                    regionList.add(location);
                }

                loopCount++;
                final TransactionRegionLocation myLocation = location;
                final byte[] regionName = location.getRegionInfo().getRegionName();

                compPool.submit(new TransactionManagerCallable(transactionState, location, connection) {
                    public Integer call() throws IOException, CommitUnsuccessfulException {
                        return doPrepareX(regionName, transactionState.getTransactionId(), myLocation);
                    }
                });
            }

            if (batchRSMetricsFlag) {
                this.regions += transactionState.getParticipatingRegions().size();
                this.regionServers += locations.size();
                String rsToRegion = locations.size() + " RS / "
                        + transactionState.getParticipatingRegions().size() + " Regions";
                if (batchRSMetrics.get(rsToRegion) == null) {
                    batchRSMetrics.put(rsToRegion, 1L);
                } else {
                    batchRSMetrics.put(rsToRegion, batchRSMetrics.get(rsToRegion) + 1);
                }
                if (metricsCount >= 10000) {
                    metricsCount = 0;
                    if (LOG.isInfoEnabled())
                        LOG.info("---------------------- BatchRS metrics ----------------------");
                    if (LOG.isInfoEnabled())
                        LOG.info("Number of total Region calls: " + this.regions);
                    if (LOG.isInfoEnabled())
                        LOG.info("Number of total RegionServer calls: " + this.regionServers);
                    if (LOG.isInfoEnabled())
                        LOG.info("---------------- Total number of calls by ratio: ------------");
                    for (Map.Entry<String, Long> entry : batchRSMetrics.entrySet()) {
                        if (LOG.isInfoEnabled())
                            LOG.info(entry.getKey() + ": " + entry.getValue());
                    }
                    if (LOG.isInfoEnabled())
                        LOG.info("-------------------------------------------------------------");
                }
                metricsCount++;
            }

        } catch (Exception e) {
            LOG.error("exception in prepareCommit (during submit to pool): " + e);
            throw new CommitUnsuccessfulException(e);
        }

        // loop to retrieve replies
        int commitError = 0;
        try {
            for (int loopIndex = 0; loopIndex < loopCount; loopIndex++) {
                int canCommit = compPool.take().get();
                switch (canCommit) {
                case TM_COMMIT_TRUE:
                    allReadOnly = false;
                    break;
                case TM_COMMIT_READ_ONLY:
                    break;
                case TM_COMMIT_FALSE_CONFLICT:
                    commitError = TransactionalReturn.COMMIT_CONFLICT;
                    break;
                case TM_COMMIT_FALSE:
                    // Commit conflict takes precedence
                    if (commitError != TransactionalReturn.COMMIT_CONFLICT)
                        commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL;
                    break;
                default:
                    LOG.error("Unexpected value of canCommit in prepareCommit (during completion processing): "
                            + canCommit);
                    commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL;
                    ;
                }
            }
        } catch (Exception e) {
            LOG.error("exception in prepareCommit (during completion processing): " + e);
            throw new CommitUnsuccessfulException(e);
        }
        if (commitError != 0)
            return commitError;

        //Before replying prepare success, check for DDL transaction.
        //If prepare already has errors (commitError != 0), an abort is automatically
        //triggered by TM which would take care of ddl abort.
        //if prepare is success upto this point, DDL operation needs to check if any
        //drop table requests were recorded as part of phase 0. If any drop table
        //requests is recorded, then those tables need to disabled as part of prepare.
        if (transactionState.hasDDLTx()) {
            //if tables were created, then nothing else needs to be done.
            //if tables were recorded dropped, then they need to be disabled.
            //Disabled tables will ultimately be deleted in commit phase.
            ArrayList<String> createList = new ArrayList<String>(); //This list is ignored.
            ArrayList<String> dropList = new ArrayList<String>();
            ArrayList<String> truncateList = new ArrayList<String>();
            StringBuilder state = new StringBuilder();
            try {
                tmDDL.getRow(transactionState.getTransactionId(), state, createList, dropList, truncateList);
            } catch (Exception e) {
                LOG.error("exception in doPrepare getRow: " + e);
                if (LOG.isTraceEnabled())
                    LOG.trace("exception in doPrepare getRow: txID: " + transactionState.getTransactionId());
                state.append("INVALID"); //to avoid processing further down this path.
                commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL;
            }

            //Return if error at this point.
            if (commitError != 0)
                return commitError;

            if (state.toString().equals("VALID") && dropList.size() > 0) {
                Iterator<String> di = dropList.iterator();
                while (di.hasNext()) {
                    try {
                        //physical drop of table from hbase.
                        disableTable(transactionState, di.next());
                    } catch (Exception e) {
                        if (LOG.isTraceEnabled())
                            LOG.trace("exception in doPrepare disableTable: txID: "
                                    + transactionState.getTransactionId());
                        LOG.error("exception in doCommit, Step : DeleteTable: " + e);

                        //Any error at this point should be considered prepareCommit as unsuccessful.
                        //Retry logic can be added only if it is retryable error: TODO.
                        commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL;
                        break;
                    }
                }
            }
        }

        if (commitError != 0)
            return commitError;

        return allReadOnly ? TransactionalReturn.COMMIT_OK_READ_ONLY : TransactionalReturn.COMMIT_OK;
    }
}

From source file:org.ugent.caagt.genestacker.search.bb.BranchAndBound.java

@Override
public ParetoFrontier runSearch(long runtimeLimit, int numThreads) throws GenestackerException {

    // create list to store previously generated schemes
    previousSchemes = new ArrayList<>();
    // create set to store previously generated scheme alternatives
    previousSchemeAlternatives = new HashSet<>();
    // create queue for schemes to be considered
    schemeQueue = new LinkedList<>();

    // reset ids/*from   w  ww .  ja  va2s . c om*/
    SeedLotNode.resetIDs();
    PlantNode.resetIDs();
    CrossingNode.resetIDs();
    CrossingSchemeAlternatives.resetIDs();

    // create thread pool and completion service for scheme extension

    // inform user about number of cross workers used (verbose)
    logger.info(VERBOSE, "Number of threads used for extending partial schemes: {}", numThreads);
    ExecutorService extPool = Executors.newFixedThreadPool(numThreads);
    CompletionService<List<CrossingSchemeAlternatives>> extCompletionService = new ExecutorCompletionService<>(
            extPool);

    // initialize solution manager
    BranchAndBoundSolutionManager solutionManager = new BranchAndBoundSolutionManager(dominatesRelation,
            ideotype, popSizeTools, maxNumSeedsPerCrossing, constraints, heuristics, seedLotFilters,
            homozygousIdeotypeParents);
    // set initial Pareto frontier, if any
    if (initialFrontier != null) {
        solutionManager.setFrontier(initialFrontier);
    }

    // apply initial plant filter, if any
    if (initialPlantFilter != null) {

        // verbose
        logger.info(VERBOSE, "Filtering initial plants ...");

        initialPlants = initialPlantFilter.filter(initialPlants);

        //verbose
        logger.info(VERBOSE, "Retained {} initial plants (see below)", initialPlants.size());
        for (Plant p : initialPlants) {
            logger.info(VERBOSE, "\n{}", p);
        }

    }

    // create initial partial schemes from initial plants
    List<CrossingSchemeAlternatives> initialParentSchemes = new ArrayList<>();
    for (Plant p : initialPlants) {
        // create uniform seed lot
        SeedLot sl = new SeedLot(p.getGenotype());
        // create seedlot node
        SeedLotNode sln = new SeedLotNode(sl, 0);
        // create and attach plant node
        PlantNode pn = new PlantNode(p, 0, sln);
        // create partial crossing scheme
        CrossingScheme s = new CrossingScheme(popSizeTools, pn);
        initialParentSchemes.add(new CrossingSchemeAlternatives(s));
    }
    registerNewSchemes(initialParentSchemes, solutionManager);

    // now iteratively cross schemes with previous schemes to create larger schemes,
    // until all solutions have been inspected or pruned
    while (!runtimeLimitExceeded() && !schemeQueue.isEmpty()) {

        // get next scheme from queue
        CrossingSchemeAlternatives cur = schemeQueue.poll();

        // fire progression message (verbose)
        logger.info(VERBOSE, "num solutions: {} ### prog: {} ({}) ### cur scheme: {} - T = {}",
                solutionManager.getFrontier().getNumSchemes(), previousSchemes.size(), schemeQueue.size(), cur,
                TimeFormatting.formatTime(System.currentTimeMillis() - getStart()));
        // debug: create diagram of current scheme (all alternatives)
        if (logger.isDebugEnabled()) {
            for (int i = 0; i < cur.nrOfAlternatives(); i++) {
                logger.debug("Cur scheme (alternative {}): {}", i + 1,
                        writeDiagram(cur.getAlternatives().get(i)));
            }
            // wait for enter
            DebugUtils.waitForEnter();
        }

        // delete possible pruned alternatives
        Iterator<CrossingScheme> it = cur.iterator();
        int numForCrossing = 0;
        int numForSelfing = 0;
        while (it.hasNext()) {
            CrossingScheme alt = it.next();
            // check if alternative should be removed
            if (previousSchemeAlternatives.contains(alt)) {
                // equivalent scheme alternative generated before, delete current alternative
                it.remove();
            } else if (solutionManager.pruneDequeueScheme(alt)) {
                // prune dequeued scheme (e.g. by the optimal subscheme heuristic)
                it.remove();
            } else {
                // check pruning for crossing/selfing
                boolean pruneCross = solutionManager.pruneCrossCurrentScheme(alt);
                boolean pruneSelf = solutionManager.pruneSelfCurrentScheme(alt);
                if (pruneCross && pruneSelf) {
                    // alternative not useful anymore
                    it.remove();
                } else {
                    // count nr of alternatives useful for crossing or selfing
                    if (!pruneCross) {
                        numForCrossing++;
                    }
                    if (!pruneSelf) {
                        numForSelfing++;
                    }
                }
            }
        }

        if (cur.nrOfAlternatives() > 0) {

            // if useful, self current scheme
            if (numForSelfing > 0) {
                registerNewSchemes(selfScheme(cur, map, solutionManager), solutionManager);
            }

            // if useful, cross with previous schemes
            if (numForCrossing > 0) {
                // launch workers to combine with previous schemes
                Iterator<CrossingSchemeAlternatives> previousSchemesIterator = previousSchemes.iterator();
                for (int w = 0; w < numThreads; w++) {
                    // submit worker
                    extCompletionService
                            .submit(new CrossWorker(previousSchemesIterator, cur, solutionManager, map));
                    // very verbose
                    logger.info(VERY_VERBOSE, "Launched cross worker {} of {}", w + 1, numThreads);
                }
                // handle results of completed workers in the order in which they complete
                for (int w = 0; w < numThreads; w++) {
                    try {
                        // wait for next worker to complete and register its solutions
                        registerNewSchemes(extCompletionService.take().get(), solutionManager);
                        // very verbose
                        logger.info(VERY_VERBOSE, "{}/{} cross workers finished", w + 1, numThreads);
                    } catch (InterruptedException | ExecutionException ex) {
                        // something went wrong with the cross workers
                        throw new SearchException("An error occured while extending the current scheme.", ex);
                    }
                }
            }

            // put the scheme in the sorted set with previously considered schemes (only done if useful for later crossings)
            previousSchemes.add(cur);
            // register scheme alternatives
            previousSchemeAlternatives.addAll(cur.getAlternatives());
        }
    }

    if (runtimeLimitExceeded()) {
        // info
        logger.info("Runtime limit exceeded");
    }

    // shutdown thread pool
    extPool.shutdownNow();

    return solutionManager.getFrontier();
}

From source file:org.apache.hadoop.hbase.regionserver.HRegion.java

private long initializeRegionStores(final CancelableProgressable reporter, MonitoredTask status)
        throws IOException, UnsupportedEncodingException {
    // Load in all the HStores.

    long maxSeqId = -1;
    // initialized to -1 so that we pick up MemstoreTS from column families
    long maxMemstoreTS = -1;

    if (!htableDescriptor.getFamilies().isEmpty()) {
        // initialize the thread pool for opening stores in parallel.
        ThreadPoolExecutor storeOpenerThreadPool = getStoreOpenAndCloseThreadPool(
                "StoreOpener-" + this.getRegionInfo().getShortNameToLog());
        CompletionService<HStore> completionService = new ExecutorCompletionService<HStore>(
                storeOpenerThreadPool);//from w  w w  .  j av a 2s.c o m

        // initialize each store in parallel
        for (final HColumnDescriptor family : htableDescriptor.getFamilies()) {
            status.setStatus("Instantiating store for column family " + family);
            completionService.submit(new Callable<HStore>() {
                @Override
                public HStore call() throws IOException {
                    return instantiateHStore(family);
                }
            });
        }
        boolean allStoresOpened = false;
        try {
            for (int i = 0; i < htableDescriptor.getFamilies().size(); i++) {
                Future<HStore> future = completionService.take();
                HStore store = future.get();
                this.stores.put(store.getColumnFamilyName().getBytes(), store);

                long storeMaxSequenceId = store.getMaxSequenceId();
                maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), storeMaxSequenceId);
                if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) {
                    maxSeqId = storeMaxSequenceId;
                }
                long maxStoreMemstoreTS = store.getMaxMemstoreTS();
                if (maxStoreMemstoreTS > maxMemstoreTS) {
                    maxMemstoreTS = maxStoreMemstoreTS;
                }
            }
            allStoresOpened = true;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e.getCause());
        } finally {
            storeOpenerThreadPool.shutdownNow();
            if (!allStoresOpened) {
                // something went wrong, close all opened stores
                LOG.error("Could not initialize all stores for the region=" + this);
                for (Store store : this.stores.values()) {
                    try {
                        store.close();
                    } catch (IOException e) {
                        LOG.warn(e.getMessage());
                    }
                }
            }
        }
    }
    mvcc.initialize(maxMemstoreTS + 1);
    // Recover any edits if available.
    maxSeqId = Math.max(maxSeqId,
            replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, status));
    return maxSeqId;
}

From source file:org.apache.hadoop.hbase.regionserver.HRegion.java

private Map<byte[], List<StoreFile>> doClose(final boolean abort, MonitoredTask status) throws IOException {
    if (isClosed()) {
        LOG.warn("Region " + this + " already closed");
        return null;
    }/*from  w ww.  j a  v a 2  s  . c  o  m*/

    if (coprocessorHost != null) {
        status.setStatus("Running coprocessor pre-close hooks");
        this.coprocessorHost.preClose(abort);
    }

    status.setStatus("Disabling compacts and flushes for region");
    synchronized (writestate) {
        // Disable compacting and flushing by background threads for this
        // region.
        writestate.writesEnabled = false;
        LOG.debug("Closing " + this + ": disabling compactions & flushes");
        waitForFlushesAndCompactions();
    }
    // If we were not just flushing, is it worth doing a preflush...one
    // that will clear out of the bulk of the memstore before we put up
    // the close flag?
    if (!abort && worthPreFlushing()) {
        status.setStatus("Pre-flushing region before close");
        LOG.info("Running close preflush of " + this.getRegionNameAsString());
        try {
            internalFlushcache(status);
        } catch (IOException ioe) {
            // Failed to flush the region. Keep going.
            status.setStatus("Failed pre-flush " + this + "; " + ioe.getMessage());
        }
    }

    this.closing.set(true);
    status.setStatus("Disabling writes for close");
    // block waiting for the lock for closing
    lock.writeLock().lock();
    try {
        if (this.isClosed()) {
            status.abort("Already got closed by another process");
            // SplitTransaction handles the null
            return null;
        }
        LOG.debug("Updates disabled for region " + this);
        // Don't flush the cache if we are aborting
        if (!abort) {
            int flushCount = 0;
            while (this.getMemstoreSize().get() > 0) {
                try {
                    if (flushCount++ > 0) {
                        int actualFlushes = flushCount - 1;
                        if (actualFlushes > 5) {
                            // If we tried 5 times and are unable to clear memory, abort
                            // so we do not lose data
                            throw new DroppedSnapshotException("Failed clearing memory after " + actualFlushes
                                    + " attempts on region: " + Bytes.toStringBinary(getRegionName()));
                        }
                        LOG.info("Running extra flush, " + actualFlushes + " (carrying snapshot?) " + this);
                    }
                    internalFlushcache(status);
                } catch (IOException ioe) {
                    status.setStatus("Failed flush " + this + ", putting online again");
                    synchronized (writestate) {
                        writestate.writesEnabled = true;
                    }
                    // Have to throw to upper layers.  I can't abort server from here.
                    throw ioe;
                }
            }
        }

        Map<byte[], List<StoreFile>> result = new TreeMap<byte[], List<StoreFile>>(Bytes.BYTES_COMPARATOR);
        if (!stores.isEmpty()) {
            // initialize the thread pool for closing stores in parallel.
            ThreadPoolExecutor storeCloserThreadPool = getStoreOpenAndCloseThreadPool(
                    "StoreCloserThread-" + this.getRegionNameAsString());
            CompletionService<Pair<byte[], Collection<StoreFile>>> completionService = new ExecutorCompletionService<Pair<byte[], Collection<StoreFile>>>(
                    storeCloserThreadPool);

            // close each store in parallel
            for (final Store store : stores.values()) {
                assert abort || store.getFlushableSize() == 0;
                completionService.submit(new Callable<Pair<byte[], Collection<StoreFile>>>() {
                    @Override
                    public Pair<byte[], Collection<StoreFile>> call() throws IOException {
                        return new Pair<byte[], Collection<StoreFile>>(store.getFamily().getName(),
                                store.close());
                    }
                });
            }
            try {
                for (int i = 0; i < stores.size(); i++) {
                    Future<Pair<byte[], Collection<StoreFile>>> future = completionService.take();
                    Pair<byte[], Collection<StoreFile>> storeFiles = future.get();
                    List<StoreFile> familyFiles = result.get(storeFiles.getFirst());
                    if (familyFiles == null) {
                        familyFiles = new ArrayList<StoreFile>();
                        result.put(storeFiles.getFirst(), familyFiles);
                    }
                    familyFiles.addAll(storeFiles.getSecond());
                }
            } catch (InterruptedException e) {
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            } catch (ExecutionException e) {
                throw new IOException(e.getCause());
            } finally {
                storeCloserThreadPool.shutdownNow();
            }
        }
        this.closed.set(true);
        if (memstoreSize.get() != 0)
            LOG.error("Memstore size is " + memstoreSize.get());
        if (coprocessorHost != null) {
            status.setStatus("Running coprocessor post-close hooks");
            this.coprocessorHost.postClose(abort);
        }
        if (this.metricsRegion != null) {
            this.metricsRegion.close();
        }
        if (this.metricsRegionWrapper != null) {
            Closeables.closeQuietly(this.metricsRegionWrapper);
        }
        status.markComplete("Closed");
        LOG.info("Closed " + this);
        return result;
    } finally {
        lock.writeLock().unlock();
    }
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.engines.internal.CheckinEngine.java

/**
 * Uploads the file for the given change, unless the MD5 sum of the local
 * file matches the upload hash and we can skip the upload for this file
 * entirely./*  w ww  . j a v  a  2s. c  o  m*/
 *
 * @param change
 *        the pending change whose file should be uploaded.
 * @param completionService
 *        where the uploads were submitted (must not be <code>null</code>)
 * @param state
 *        the state kept during checkin to note errors.
 * @throws CheckinException
 *         if the local file was missing.
 * @throws CoreCancelException
 *         if the upload was cancelled by the user.
 */
private void uploadFile(PendingChange change, final CompletionService<WorkerStatus> completionService,
        final AsyncCheckinOperation state) throws CheckinException, CoreCancelException {
    Check.notNull(change, "change"); //$NON-NLS-1$

    /*
     * Callers should only use these methods for pending adds or edits, and
     * we always have a local item for these.
     */
    Check.notNull(change.getLocalItem(), "change.getLocalItem()"); //$NON-NLS-1$

    final String localItem = change.getLocalItem();
    final FileSystemAttributes attrs = FileSystemUtils.getInstance().getAttributes(localItem);
    if (new File(change.getLocalItem()).exists() == false && !attrs.isSymbolicLink()) {
        throw new CheckinException(null, false, false,
                MessageFormat.format(Messages.getString("CheckinEngine.LocalItemNoLongerExistsFormat"), //$NON-NLS-1$
                        change.getLocalItem()));
    }

    /*
     * Handle tpattributes: EOL and AppleSingle encoding. The change
     * variable is set to a cloned change so we can modify the local item
     * for the upload without affecting the caller's use of the original
     * change.
     */
    String filterTempFile = null;
    final GetEngine getEngine = new GetEngine(client);
    final FileAttributesCollection attributes = getEngine.getAttributesForFile(localItem,
            change.getServerItem(), (FileEncoding.BINARY.getCodePage() != change.getEncoding()));

    if (attributes != null) {
        /*
         * Convert end-of-line characters for files that have the extended
         * attribute set.
         */
        final StringPairFileAttribute eolAttribute = attributes
                .getStringPairFileAttribute(FileAttributeNames.SERVER_EOL);

        if (eolAttribute != null && eolAttribute.getValue() != null) {
            final String desiredNewlineSequence = FileAttributeValues
                    .getEndOfLineStringForAttributeValue(eolAttribute);

            if (desiredNewlineSequence == null) {
                throw new CheckinException(null, false, false, MessageFormat.format(
                        Messages.getString("CheckinEngine.UnsupportedServerEOLStyleFormat"), //$NON-NLS-1$
                        eolAttribute.getValue(), change.getLocalItem(), FileAttributesFile.DEFAULT_FILENAME));
            } else if (desiredNewlineSequence.equals("")) //$NON-NLS-1$
            {
                log.debug(MessageFormat.format("Not converting line endings in {0}", change.getLocalItem())); //$NON-NLS-1$
            } else {
                log.debug(MessageFormat.format("Converting line endings for {0} to {1}", //$NON-NLS-1$
                        change.getLocalItem(), eolAttribute.getValue()));

                /*
                 * Create a temporary file for the conversion so we don't
                 * modify the working folder file.
                 */

                try {
                    if (filterTempFile == null) {
                        filterTempFile = createTempFile(change);
                    }

                    Charset charset = CodePageMapping.getCharset(change.getEncoding(), false);

                    if (charset == null) {
                        charset = Charset.defaultCharset();
                    }

                    NewlineUtils.convertFile(new File(filterTempFile), charset, desiredNewlineSequence);

                    log.info(MessageFormat.format("Converted line endings in {0} to {1}", //$NON-NLS-1$
                            filterTempFile, eolAttribute.getValue(), charset.name()));
                } catch (final UnsupportedEncodingException e) {
                    final String message = MessageFormat.format(
                            Messages.getString("CheckinEngine.CouldNotChangeEOLStyleUnknownJavaEncodingFormat"), //$NON-NLS-1$
                            change.getLocalItem(), e.getLocalizedMessage());

                    log.error(message, e);
                    throw new CheckinException(null, false, false, message);
                } catch (final IOException e) {
                    final String message = MessageFormat.format(
                            Messages.getString("CheckinEngine.CouldNotChangeEOLStyleIOExceptionFormat"), //$NON-NLS-1$
                            change.getLocalItem(), e.getLocalizedMessage());

                    log.error(message, e);
                    throw new CheckinException(null, false, false, message);
                }
            }
        }

        /*
         * Encode data fork and resource fork into an AppleSingle file if
         * requested. This should come last (as other filters, above, may
         * modify the data fork and should not modify the AppleSingle file.)
         */
        final StringPairFileAttribute transformAttribute = attributes
                .getStringPairFileAttribute(FileAttributeNames.TRANSFORM);

        if (transformAttribute != null && "apple".equals(transformAttribute.getValue())) //$NON-NLS-1$
        {
            if (Platform.isCurrentPlatform(Platform.MAC_OS_X)) {
                try {
                    if (filterTempFile == null) {
                        filterTempFile = createTempFile(change);
                    }

                    AppleSingleUtil.encodeFile(new File(filterTempFile), change.getLocalItem());
                } catch (final IOException e) {
                    final String message = MessageFormat.format(
                            Messages.getString("CheckinEngine.CouldNotDecodeAppleSingleFileFormat"), //$NON-NLS-1$
                            change.getLocalItem(), e.getLocalizedMessage());

                    log.error(message, e);
                    throw new CheckinException(null, false, false, message);
                }
            } else {
                log.warn(MessageFormat.format("Not preserving Apple metadata for {0} on platform {1}", //$NON-NLS-1$
                        change.getLocalItem(), Platform.getCurrentPlatformString()));
            }
        }
    }

    if (attrs.isSymbolicLink()) {
        /*
         * for symlinks, create temporary file containing the symlink info;
         * upload the temporary file rather than the symlinks
         */
        try {
            final String link = FileSystemUtils.getInstance().getSymbolicLink(localItem);
            filterTempFile = createTempFileForSymbolicLink(localItem, link);
        } catch (final IOException e) {
            final String message = MessageFormat.format(
                    Messages.getString("CheckinEngine.CouldNotCreateTempFileForSymlinkFormat"), //$NON-NLS-1$
                    localItem, e.getLocalizedMessage());

            log.error(message, e);
            throw new CheckinException(null, false, false, message);
        }
    }

    /*
     * We may have done some filtering (EOL conversion or AppleSingle
     * encoding), update the change)
     */
    if (filterTempFile != null) {
        /**
         * Clone the pending change for the upload process, so we can change
         * the local item to the temp item and not affect the working folder
         * updates applied after the upload process finishes (which uses the
         * original change object.
         */
        change = new PendingChange(change);
        change.setLocalItem(filterTempFile);
    }

    // See if we can skip the upload for non-symbolic files.
    final byte[] localMD5Hash = computeMD5Hash(change.getLocalItem(), TaskMonitorService.getTaskMonitor());
    byte[] serverHash = change.getUploadContentHashValue();
    if (serverHash == null) {
        serverHash = change.getHashValue();
    }

    if (serverHash != null && serverHash.length > 0 && Arrays.equals(serverHash, localMD5Hash)) {
        log.trace(
                MessageFormat.format("skipped upload of {0} because hash codes match", change.getLocalItem())); //$NON-NLS-1$

        /*
         * We may have done some sort of upload filtering (EOL conversion or
         * AppleSingle encoding), clean up the file in this case.
         */
        if (filterTempFile != null) {
            TempStorageService.getInstance().cleanUpItem(new File(filterTempFile));
        }

        return;
    }

    /*
     * Let our thread pool execute this task. submit() will block if all the
     * workers are busy (because the completion service wraps a
     * BoundedExecutor), which is what we want. This keeps our upload
     * connections limited so we don't saturate the network with TCP/IP or
     * HTTP overhead or the TFS server with connections.
     *
     * We don't do the MD5 checkin in these threads because keeping that
     * serial is pretty efficient. Parallel MD5 checking may cause us to go
     * disk-bound when we have many small files spread all over the disk
     * (out of cache).
     */
    completionService.submit(new CheckinWorker(TaskMonitorService.getTaskMonitor(), client, workspace, change,
            localMD5Hash, state));
}

From source file:org.apache.hadoop.hive.llap.cli.LlapServiceDriver.java

private int run(String[] args) throws Exception {
    LlapOptionsProcessor optionsProcessor = new LlapOptionsProcessor();
    final LlapOptions options = optionsProcessor.processOptions(args);

    final Properties propsDirectOptions = new Properties();

    if (options == null) {
        // help// www  .  j  a  v a  2  s. c  o m
        return 1;
    }

    // Working directory.
    Path tmpDir = new Path(options.getDirectory());

    if (conf == null) {
        throw new Exception("Cannot load any configuration to run command");
    }

    final long t0 = System.nanoTime();

    final FileSystem fs = FileSystem.get(conf);
    final FileSystem lfs = FileSystem.getLocal(conf).getRawFileSystem();

    int threadCount = Math.max(1, Runtime.getRuntime().availableProcessors() / 2);
    final ExecutorService executor = Executors.newFixedThreadPool(threadCount,
            new ThreadFactoryBuilder().setNameFormat("llap-pkg-%d").build());
    final CompletionService<Void> asyncRunner = new ExecutorCompletionService<Void>(executor);

    int rc = 0;
    try {

        // needed so that the file is actually loaded into configuration.
        for (String f : NEEDED_CONFIGS) {
            conf.addResource(f);
            if (conf.getResource(f) == null) {
                throw new Exception("Unable to find required config file: " + f);
            }
        }
        for (String f : OPTIONAL_CONFIGS) {
            conf.addResource(f);
        }

        conf.reloadConfiguration();

        populateConfWithLlapProperties(conf, options.getConfig());

        if (options.getName() != null) {
            // update service registry configs - caveat: this has nothing to do with the actual settings
            // as read by the AM
            // if needed, use --hiveconf llap.daemon.service.hosts=@llap0 to dynamically switch between
            // instances
            conf.set(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + options.getName());
            propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + options.getName());
        }

        if (options.getLogger() != null) {
            HiveConf.setVar(conf, ConfVars.LLAP_DAEMON_LOGGER, options.getLogger());
            propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_LOGGER.varname, options.getLogger());
        }
        boolean isDirect = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_DIRECT);

        if (options.getSize() != -1) {
            if (options.getCache() != -1) {
                if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_MAPPED) == false) {
                    // direct heap allocations need to be safer
                    Preconditions.checkArgument(options.getCache() < options.getSize(),
                            "Cache size (" + LlapUtil.humanReadableByteCount(options.getCache())
                                    + ") has to be smaller" + " than the container sizing ("
                                    + LlapUtil.humanReadableByteCount(options.getSize()) + ")");
                } else if (options.getCache() < options.getSize()) {
                    LOG.warn("Note that this might need YARN physical memory monitoring to be turned off "
                            + "(yarn.nodemanager.pmem-check-enabled=false)");
                }
            }
            if (options.getXmx() != -1) {
                Preconditions.checkArgument(options.getXmx() < options.getSize(),
                        "Working memory (Xmx=" + LlapUtil.humanReadableByteCount(options.getXmx())
                                + ") has to be" + " smaller than the container sizing ("
                                + LlapUtil.humanReadableByteCount(options.getSize()) + ")");
            }
            if (isDirect && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_MAPPED)) {
                // direct and not memory mapped
                Preconditions.checkArgument(options.getXmx() + options.getCache() <= options.getSize(),
                        "Working memory (Xmx=" + LlapUtil.humanReadableByteCount(options.getXmx())
                                + ") + cache size (" + LlapUtil.humanReadableByteCount(options.getCache())
                                + ") has to be smaller than the container sizing ("
                                + LlapUtil.humanReadableByteCount(options.getSize()) + ")");
            }
        }

        if (options.getExecutors() != -1) {
            conf.setLong(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname, options.getExecutors());
            propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname,
                    String.valueOf(options.getExecutors()));
            // TODO: vcpu settings - possibly when DRFA works right
        }

        if (options.getIoThreads() != -1) {
            conf.setLong(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname, options.getIoThreads());
            propsDirectOptions.setProperty(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname,
                    String.valueOf(options.getIoThreads()));
        }

        long cache = -1, xmx = -1;
        if (options.getCache() != -1) {
            cache = options.getCache();
            conf.set(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname, Long.toString(cache));
            propsDirectOptions.setProperty(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname,
                    Long.toString(cache));
        }

        if (options.getXmx() != -1) {
            // Needs more explanation here
            // Xmx is not the max heap value in JDK8. You need to subtract 50% of the survivor fraction
            // from this, to get actual usable memory before it goes into GC
            xmx = options.getXmx();
            long xmxMb = (xmx / (1024L * 1024L));
            conf.setLong(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname, xmxMb);
            propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname,
                    String.valueOf(xmxMb));
        }

        long size = options.getSize();
        if (size == -1) {
            long heapSize = xmx;
            if (!isDirect) {
                heapSize += cache;
            }
            size = Math.min((long) (heapSize * 1.2), heapSize + 1024L * 1024 * 1024);
            if (isDirect) {
                size += cache;
            }
        }
        long containerSize = size / (1024 * 1024);
        final long minAlloc = conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, -1);
        Preconditions.checkArgument(containerSize >= minAlloc,
                "Container size (" + LlapUtil.humanReadableByteCount(options.getSize()) + ") should be greater"
                        + " than minimum allocation("
                        + LlapUtil.humanReadableByteCount(minAlloc * 1024L * 1024L) + ")");
        conf.setLong(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname, containerSize);
        propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname,
                String.valueOf(containerSize));

        LOG.info("Memory settings: container memory: {} executor memory: {} cache memory: {}",
                LlapUtil.humanReadableByteCount(options.getSize()),
                LlapUtil.humanReadableByteCount(options.getXmx()),
                LlapUtil.humanReadableByteCount(options.getCache()));

        if (options.getLlapQueueName() != null && !options.getLlapQueueName().isEmpty()) {
            conf.set(ConfVars.LLAP_DAEMON_QUEUE_NAME.varname, options.getLlapQueueName());
            propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_QUEUE_NAME.varname, options.getLlapQueueName());
        }

        final URL logger = conf.getResource(LlapConstants.LOG4j2_PROPERTIES_FILE);

        if (null == logger) {
            throw new Exception("Unable to find required config file: llap-daemon-log4j2.properties");
        }

        Path home = new Path(System.getenv("HIVE_HOME"));
        Path scriptParent = new Path(new Path(home, "scripts"), "llap");
        Path scripts = new Path(scriptParent, "bin");

        if (!lfs.exists(home)) {
            throw new Exception("Unable to find HIVE_HOME:" + home);
        } else if (!lfs.exists(scripts)) {
            LOG.warn("Unable to find llap scripts:" + scripts);
        }

        final Path libDir = new Path(tmpDir, "lib");
        final Path tezDir = new Path(libDir, "tez");
        final Path udfDir = new Path(libDir, "udfs");
        final Path confPath = new Path(tmpDir, "conf");
        if (!lfs.mkdirs(confPath)) {
            LOG.warn("mkdirs for " + confPath + " returned false");
        }
        if (!lfs.mkdirs(tezDir)) {
            LOG.warn("mkdirs for " + tezDir + " returned false");
        }
        if (!lfs.mkdirs(udfDir)) {
            LOG.warn("mkdirs for " + udfDir + " returned false");
        }

        NamedCallable<Void> downloadTez = new NamedCallable<Void>("downloadTez") {
            @Override
            public Void call() throws Exception {
                synchronized (fs) {
                    String tezLibs = conf.get(TezConfiguration.TEZ_LIB_URIS);
                    if (tezLibs == null) {
                        LOG.warn("Missing tez.lib.uris in tez-site.xml");
                    }
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Copying tez libs from " + tezLibs);
                    }
                    lfs.mkdirs(tezDir);
                    fs.copyToLocalFile(new Path(tezLibs), new Path(libDir, "tez.tar.gz"));
                    CompressionUtils.unTar(new Path(libDir, "tez.tar.gz").toString(), tezDir.toString(), true);
                    lfs.delete(new Path(libDir, "tez.tar.gz"), false);
                }
                return null;
            }
        };

        NamedCallable<Void> copyLocalJars = new NamedCallable<Void>("copyLocalJars") {
            @Override
            public Void call() throws Exception {
                Class<?>[] dependencies = new Class<?>[] { LlapDaemonProtocolProtos.class, // llap-common
                        LlapTezUtils.class, // llap-tez
                        LlapInputFormat.class, // llap-server
                        HiveInputFormat.class, // hive-exec
                        SslContextFactory.class, // hive-common (https deps)
                        Rule.class, // Jetty rewrite class
                        RegistryUtils.ServiceRecordMarshal.class, // ZK registry
                        // log4j2
                        com.lmax.disruptor.RingBuffer.class, // disruptor
                        org.apache.logging.log4j.Logger.class, // log4j-api
                        org.apache.logging.log4j.core.Appender.class, // log4j-core
                        org.apache.logging.slf4j.Log4jLogger.class, // log4j-slf4j
                        // log4j-1.2-API needed for NDC
                        org.apache.log4j.NDC.class, };

                for (Class<?> c : dependencies) {
                    Path jarPath = new Path(Utilities.jarFinderGetJar(c));
                    lfs.copyFromLocalFile(jarPath, libDir);
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Copying " + jarPath + " to " + libDir);
                    }
                }
                return null;
            }
        };

        // copy default aux classes (json/hbase)

        NamedCallable<Void> copyAuxJars = new NamedCallable<Void>("copyAuxJars") {
            @Override
            public Void call() throws Exception {
                for (String className : DEFAULT_AUX_CLASSES) {
                    localizeJarForClass(lfs, libDir, className, false);
                }
                Collection<String> codecs = conf.getStringCollection("io.compression.codecs");
                if (codecs != null) {
                    for (String codecClassName : codecs) {
                        localizeJarForClass(lfs, libDir, codecClassName, false);
                    }
                }

                if (options.getIsHBase()) {
                    try {
                        localizeJarForClass(lfs, libDir, HBASE_SERDE_CLASS, true);
                        Job fakeJob = new Job(new JobConf()); // HBase API is convoluted.
                        TableMapReduceUtil.addDependencyJars(fakeJob);
                        Collection<String> hbaseJars = fakeJob.getConfiguration()
                                .getStringCollection("tmpjars");
                        for (String jarPath : hbaseJars) {
                            if (!jarPath.isEmpty()) {
                                lfs.copyFromLocalFile(new Path(jarPath), libDir);
                            }
                        }
                    } catch (Throwable t) {
                        String err = "Failed to add HBase jars. Use --auxhbase=false to avoid localizing them";
                        LOG.error(err);
                        System.err.println(err);
                        throw new RuntimeException(t);
                    }
                }

                HashSet<String> auxJars = new HashSet<>();
                // There are many ways to have AUX jars in Hive... sigh
                if (options.getIsHiveAux()) {
                    // Note: we don't add ADDED jars, RELOADABLE jars, etc. That is by design; there are too many ways
                    // to add jars in Hive, some of which are session/etc. specific. Env + conf + arg should be enough.
                    addAuxJarsToSet(auxJars, conf.getAuxJars());
                    addAuxJarsToSet(auxJars, System.getenv("HIVE_AUX_JARS_PATH"));
                    LOG.info("Adding the following aux jars from the environment and configs: " + auxJars);
                }

                addAuxJarsToSet(auxJars, options.getAuxJars());
                for (String jarPath : auxJars) {
                    lfs.copyFromLocalFile(new Path(jarPath), libDir);
                }
                return null;
            }

            private void addAuxJarsToSet(HashSet<String> auxJarSet, String auxJars) {
                if (auxJars != null && !auxJars.isEmpty()) {
                    // TODO: transitive dependencies warning?
                    String[] jarPaths = auxJars.split(",");
                    for (String jarPath : jarPaths) {
                        if (!jarPath.isEmpty()) {
                            auxJarSet.add(jarPath);
                        }
                    }
                }
            }
        };

        NamedCallable<Void> copyUdfJars = new NamedCallable<Void>("copyUdfJars") {
            @Override
            public Void call() throws Exception {
                // UDFs
                final Set<String> allowedUdfs;

                if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOW_PERMANENT_FNS)) {
                    synchronized (fs) {
                        allowedUdfs = downloadPermanentFunctions(conf, udfDir);
                    }
                } else {
                    allowedUdfs = Collections.emptySet();
                }

                PrintWriter udfStream = new PrintWriter(lfs
                        .create(new Path(confPath, StaticPermanentFunctionChecker.PERMANENT_FUNCTIONS_LIST)));
                for (String udfClass : allowedUdfs) {
                    udfStream.println(udfClass);
                }

                udfStream.close();
                return null;
            }
        };

        String java_home;
        if (options.getJavaPath() == null || options.getJavaPath().isEmpty()) {
            java_home = System.getenv("JAVA_HOME");
            String jre_home = System.getProperty("java.home");
            if (java_home == null) {
                java_home = jre_home;
            } else if (!java_home.equals(jre_home)) {
                LOG.warn("Java versions might not match : JAVA_HOME=[{}],process jre=[{}]", java_home,
                        jre_home);
            }
        } else {
            java_home = options.getJavaPath();
        }
        if (java_home == null || java_home.isEmpty()) {
            throw new RuntimeException(
                    "Could not determine JAVA_HOME from command line parameters, environment or system properties");
        }
        LOG.info("Using [{}] for JAVA_HOME", java_home);

        NamedCallable<Void> copyConfigs = new NamedCallable<Void>("copyConfigs") {
            @Override
            public Void call() throws Exception {
                // Copy over the mandatory configs for the package.
                for (String f : NEEDED_CONFIGS) {
                    copyConfig(lfs, confPath, f);
                }
                for (String f : OPTIONAL_CONFIGS) {
                    try {
                        copyConfig(lfs, confPath, f);
                    } catch (Throwable t) {
                        LOG.info("Error getting an optional config " + f + "; ignoring: " + t.getMessage());
                    }
                }
                createLlapDaemonConfig(lfs, confPath, conf, propsDirectOptions, options.getConfig());
                setUpLogAndMetricConfigs(lfs, logger, confPath);
                return null;
            }
        };

        @SuppressWarnings("unchecked")
        final NamedCallable<Void>[] asyncWork = new NamedCallable[] { downloadTez, copyUdfJars, copyLocalJars,
                copyAuxJars, copyConfigs };
        @SuppressWarnings("unchecked")
        final Future<Void>[] asyncResults = new Future[asyncWork.length];
        for (int i = 0; i < asyncWork.length; i++) {
            asyncResults[i] = asyncRunner.submit(asyncWork[i]);
        }

        // TODO: need to move from Python to Java for the rest of the script.
        JSONObject configs = createConfigJson(containerSize, cache, xmx, java_home);
        writeConfigJson(tmpDir, lfs, configs);

        if (LOG.isDebugEnabled()) {
            LOG.debug("Config generation took " + (System.nanoTime() - t0) + " ns");
        }
        for (int i = 0; i < asyncWork.length; i++) {
            final long t1 = System.nanoTime();
            asyncResults[i].get();
            final long t2 = System.nanoTime();
            if (LOG.isDebugEnabled()) {
                LOG.debug(asyncWork[i].getName() + " waited for " + (t2 - t1) + " ns");
            }
        }
        if (options.isStarting()) {
            String version = System.getenv("HIVE_VERSION");
            if (version == null || version.isEmpty()) {
                version = DateTime.now().toString("ddMMMyyyy");
            }

            String outputDir = options.getOutput();
            Path packageDir = null;
            if (outputDir == null) {
                outputDir = OUTPUT_DIR_PREFIX + version;
                packageDir = new Path(Paths.get(".").toAbsolutePath().toString(), OUTPUT_DIR_PREFIX + version);
            } else {
                packageDir = new Path(outputDir);
            }
            rc = runPackagePy(args, tmpDir, scriptParent, version, outputDir);
            if (rc == 0) {
                LlapSliderUtils.startCluster(conf, options.getName(), "llap-" + version + ".zip", packageDir,
                        HiveConf.getVar(conf, ConfVars.LLAP_DAEMON_QUEUE_NAME));
            }
        } else {
            rc = 0;
        }
    } finally {
        executor.shutdown();
        lfs.close();
        fs.close();
    }

    if (rc == 0) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Exiting successfully");
        }
    } else {
        LOG.info("Exiting with rc = " + rc);
    }
    return rc;
}