Example usage for java.lang.management ManagementFactory getMemoryMXBean

List of usage examples for java.lang.management ManagementFactory getMemoryMXBean

Introduction

In this page you can find the example usage for java.lang.management ManagementFactory getMemoryMXBean.

Prototype

public static MemoryMXBean getMemoryMXBean() 

Source Link

Document

Returns the managed bean for the memory system of the Java virtual machine.

Usage

From source file:org.apache.hadoop.hive.ql.exec.ExecMapper.java

@Override
public void configure(JobConf job) {
    // Allocate the bean at the beginning -
    memoryMXBean = ManagementFactory.getMemoryMXBean();
    l4j.info("maximum memory = " + memoryMXBean.getHeapMemoryUsage().getMax());

    isLogInfoEnabled = l4j.isInfoEnabled();

    try {//  w  w w. j a  v a 2 s .  c  o m
        l4j.info("conf classpath = " + Arrays.asList(((URLClassLoader) job.getClassLoader()).getURLs()));
        l4j.info("thread classpath = "
                + Arrays.asList(((URLClassLoader) Thread.currentThread().getContextClassLoader()).getURLs()));
    } catch (Exception e) {
        l4j.info("cannot get classpath: " + e.getMessage());
    }
    try {
        jc = job;
        execContext.setJc(jc);
        // create map and fetch operators
        MapredWork mrwork = Utilities.getMapRedWork(job);
        mo = new MapOperator();
        mo.setConf(mrwork);
        // initialize map operator
        mo.setChildren(job);
        l4j.info(mo.dump(0));
        // initialize map local work
        localWork = mrwork.getMapLocalWork();
        execContext.setLocalWork(localWork);

        mo.setExecContext(execContext);
        mo.initializeLocalWork(jc);
        mo.initialize(jc, null);

        if (localWork == null) {
            return;
        }

        //The following code is for mapjoin
        //initialize all the dummy ops
        l4j.info("Initializing dummy operator");
        List<Operator<? extends Serializable>> dummyOps = localWork.getDummyParentOp();
        for (Operator<? extends Serializable> dummyOp : dummyOps) {
            dummyOp.setExecContext(execContext);
            dummyOp.initialize(jc, null);
        }

    } catch (Throwable e) {
        abort = true;
        if (e instanceof OutOfMemoryError) {
            // will this be true here?
            // Don't create a new object if we are already out of memory
            throw (OutOfMemoryError) e;
        } else {
            throw new RuntimeException("Map operator initialization failed", e);
        }
    }
}

From source file:org.apache.hadoop.hive.ql.exec.ExecReducer.java

@Override
public void configure(JobConf job) {
    rowObjectInspector = new ObjectInspector[Byte.MAX_VALUE];
    ObjectInspector[] valueObjectInspector = new ObjectInspector[Byte.MAX_VALUE];
    ObjectInspector keyObjectInspector;/*  ww  w .j  ava  2 s.co  m*/

    // Allocate the bean at the beginning -
    memoryMXBean = ManagementFactory.getMemoryMXBean();
    l4j.info("maximum memory = " + memoryMXBean.getHeapMemoryUsage().getMax());

    isLogInfoEnabled = l4j.isInfoEnabled();

    try {
        l4j.info("conf classpath = " + Arrays.asList(((URLClassLoader) job.getClassLoader()).getURLs()));
        l4j.info("thread classpath = "
                + Arrays.asList(((URLClassLoader) Thread.currentThread().getContextClassLoader()).getURLs()));
    } catch (Exception e) {
        l4j.info("cannot get classpath: " + e.getMessage());
    }
    jc = job;
    MapredWork gWork = Utilities.getMapRedWork(job);
    reducer = gWork.getReducer();
    reducer.setParentOperators(null); // clear out any parents as reducer is the
    // root
    isTagged = gWork.getNeedsTagging();
    try {
        keyTableDesc = gWork.getKeyDesc();
        inputKeyDeserializer = (SerDe) ReflectionUtils.newInstance(keyTableDesc.getDeserializerClass(), null);
        inputKeyDeserializer.initialize(null, keyTableDesc.getProperties());
        keyObjectInspector = inputKeyDeserializer.getObjectInspector();
        valueTableDesc = new TableDesc[gWork.getTagToValueDesc().size()];
        for (int tag = 0; tag < gWork.getTagToValueDesc().size(); tag++) {
            // We should initialize the SerDe with the TypeInfo when available.
            valueTableDesc[tag] = gWork.getTagToValueDesc().get(tag);
            inputValueDeserializer[tag] = (SerDe) ReflectionUtils
                    .newInstance(valueTableDesc[tag].getDeserializerClass(), null);
            inputValueDeserializer[tag].initialize(null, valueTableDesc[tag].getProperties());
            valueObjectInspector[tag] = inputValueDeserializer[tag].getObjectInspector();

            ArrayList<ObjectInspector> ois = new ArrayList<ObjectInspector>();
            ois.add(keyObjectInspector);
            ois.add(valueObjectInspector[tag]);
            ois.add(PrimitiveObjectInspectorFactory.writableByteObjectInspector);
            rowObjectInspector[tag] = ObjectInspectorFactory
                    .getStandardStructObjectInspector(Arrays.asList(fieldNames), ois);
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    // initialize reduce operator tree
    try {
        l4j.info(reducer.dump(0));
        reducer.initialize(jc, rowObjectInspector);
    } catch (Throwable e) {
        abort = true;
        if (e instanceof OutOfMemoryError) {
            // Don't create a new object if we are already out of memory
            throw (OutOfMemoryError) e;
        } else {
            throw new RuntimeException("Reduce operator initialization failed", e);
        }
    }
}

From source file:org.apache.hadoop.hive.ql.exec.GroupByOperator.java

@Override
protected void initializeOp(Configuration hconf) throws HiveException {
    totalMemory = Runtime.getRuntime().totalMemory();
    numRowsInput = 0;/*from  w ww.  j  av a 2 s . co m*/
    numRowsHashTbl = 0;

    heartbeatInterval = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVESENDHEARTBEAT);
    countAfterReport = 0;

    ObjectInspector rowInspector = inputObjInspectors[0];

    // init keyFields
    keyFields = new ExprNodeEvaluator[conf.getKeys().size()];
    keyObjectInspectors = new ObjectInspector[conf.getKeys().size()];
    currentKeyObjectInspectors = new ObjectInspector[conf.getKeys().size()];
    for (int i = 0; i < keyFields.length; i++) {
        keyFields[i] = ExprNodeEvaluatorFactory.get(conf.getKeys().get(i));
        keyObjectInspectors[i] = keyFields[i].initialize(rowInspector);
        currentKeyObjectInspectors[i] = ObjectInspectorUtils.getStandardObjectInspector(keyObjectInspectors[i],
                ObjectInspectorCopyOption.WRITABLE);
    }

    // initialize unionExpr for reduce-side
    // reduce KEY has union field as the last field if there are distinct
    // aggregates in group-by.
    List<? extends StructField> sfs = ((StandardStructObjectInspector) rowInspector).getAllStructFieldRefs();
    if (sfs.size() > 0) {
        StructField keyField = sfs.get(0);
        if (keyField.getFieldName().toUpperCase().equals(Utilities.ReduceField.KEY.name())) {
            ObjectInspector keyObjInspector = keyField.getFieldObjectInspector();
            if (keyObjInspector instanceof StandardStructObjectInspector) {
                List<? extends StructField> keysfs = ((StandardStructObjectInspector) keyObjInspector)
                        .getAllStructFieldRefs();
                if (keysfs.size() > 0) {
                    // the last field is the union field, if any
                    StructField sf = keysfs.get(keysfs.size() - 1);
                    if (sf.getFieldObjectInspector().getCategory().equals(ObjectInspector.Category.UNION)) {
                        unionExprEval = ExprNodeEvaluatorFactory.get(new ExprNodeColumnDesc(
                                TypeInfoUtils.getTypeInfoFromObjectInspector(sf.getFieldObjectInspector()),
                                keyField.getFieldName() + "." + sf.getFieldName(), null, false));
                        unionExprEval.initialize(rowInspector);
                    }
                }
            }
        }
    }
    // init aggregationParameterFields
    ArrayList<AggregationDesc> aggrs = conf.getAggregators();
    aggregationParameterFields = new ExprNodeEvaluator[aggrs.size()][];
    aggregationParameterObjectInspectors = new ObjectInspector[aggrs.size()][];
    aggregationParameterStandardObjectInspectors = new ObjectInspector[aggrs.size()][];
    aggregationParameterObjects = new Object[aggrs.size()][];
    aggregationIsDistinct = new boolean[aggrs.size()];
    for (int i = 0; i < aggrs.size(); i++) {
        AggregationDesc aggr = aggrs.get(i);
        ArrayList<ExprNodeDesc> parameters = aggr.getParameters();
        aggregationParameterFields[i] = new ExprNodeEvaluator[parameters.size()];
        aggregationParameterObjectInspectors[i] = new ObjectInspector[parameters.size()];
        aggregationParameterStandardObjectInspectors[i] = new ObjectInspector[parameters.size()];
        aggregationParameterObjects[i] = new Object[parameters.size()];
        for (int j = 0; j < parameters.size(); j++) {
            aggregationParameterFields[i][j] = ExprNodeEvaluatorFactory.get(parameters.get(j));
            aggregationParameterObjectInspectors[i][j] = aggregationParameterFields[i][j]
                    .initialize(rowInspector);
            if (unionExprEval != null) {
                String[] names = parameters.get(j).getExprString().split("\\.");
                // parameters of the form : KEY.colx:t.coly
                if (Utilities.ReduceField.KEY.name().equals(names[0])) {
                    String name = names[names.length - 2];
                    int tag = Integer.parseInt(name.split("\\:")[1]);
                    if (aggr.getDistinct()) {
                        // is distinct
                        Set<Integer> set = distinctKeyAggrs.get(tag);
                        if (null == set) {
                            set = new HashSet<Integer>();
                            distinctKeyAggrs.put(tag, set);
                        }
                        if (!set.contains(i)) {
                            set.add(i);
                        }
                    } else {
                        Set<Integer> set = nonDistinctKeyAggrs.get(tag);
                        if (null == set) {
                            set = new HashSet<Integer>();
                            nonDistinctKeyAggrs.put(tag, set);
                        }
                        if (!set.contains(i)) {
                            set.add(i);
                        }
                    }
                } else {
                    // will be VALUE._COLx
                    if (!nonDistinctAggrs.contains(i)) {
                        nonDistinctAggrs.add(i);
                    }
                }
            } else {
                if (aggr.getDistinct()) {
                    aggregationIsDistinct[i] = true;
                }
            }
            aggregationParameterStandardObjectInspectors[i][j] = ObjectInspectorUtils
                    .getStandardObjectInspector(aggregationParameterObjectInspectors[i][j],
                            ObjectInspectorCopyOption.WRITABLE);
            aggregationParameterObjects[i][j] = null;
        }
        if (parameters.size() == 0) {
            // for ex: count(*)
            if (!nonDistinctAggrs.contains(i)) {
                nonDistinctAggrs.add(i);
            }
        }
    }

    // init aggregationClasses
    aggregationEvaluators = new GenericUDAFEvaluator[conf.getAggregators().size()];
    for (int i = 0; i < aggregationEvaluators.length; i++) {
        AggregationDesc agg = conf.getAggregators().get(i);
        aggregationEvaluators[i] = agg.getGenericUDAFEvaluator();
    }

    // init objectInspectors
    int totalFields = keyFields.length + aggregationEvaluators.length;
    objectInspectors = new ArrayList<ObjectInspector>(totalFields);
    for (ExprNodeEvaluator keyField : keyFields) {
        objectInspectors.add(null);
    }
    for (int i = 0; i < aggregationEvaluators.length; i++) {
        ObjectInspector roi = aggregationEvaluators[i].init(conf.getAggregators().get(i).getMode(),
                aggregationParameterObjectInspectors[i]);
        objectInspectors.add(roi);
    }

    bucketGroup = conf.getBucketGroup();
    aggregationsParametersLastInvoke = new Object[conf.getAggregators().size()][];
    if (conf.getMode() != GroupByDesc.Mode.HASH || bucketGroup) {
        aggregations = newAggregations();
        hashAggr = false;
    } else {
        hashAggregations = new HashMap<KeyWrapper, AggregationBuffer[]>(256);
        aggregations = newAggregations();
        hashAggr = true;
        keyPositionsSize = new ArrayList<Integer>();
        aggrPositions = new ArrayList<varLenFields>();
        groupbyMapAggrInterval = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL);

        // compare every groupbyMapAggrInterval rows
        numRowsCompareHashAggr = groupbyMapAggrInterval;
        minReductionHashAggr = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION);
        groupKeyIsNotReduceKey = conf.getGroupKeyNotReductionKey();
        if (groupKeyIsNotReduceKey) {
            keysCurrentGroup = new HashSet<KeyWrapper>();
        }
    }

    fieldNames = conf.getOutputColumnNames();

    for (int i = 0; i < keyFields.length; i++) {
        objectInspectors.set(i, currentKeyObjectInspectors[i]);
    }

    // Generate key names
    ArrayList<String> keyNames = new ArrayList<String>(keyFields.length);
    for (int i = 0; i < keyFields.length; i++) {
        keyNames.add(fieldNames.get(i));
    }
    newKeyObjectInspector = ObjectInspectorFactory.getStandardStructObjectInspector(keyNames,
            Arrays.asList(keyObjectInspectors));
    currentKeyObjectInspector = ObjectInspectorFactory.getStandardStructObjectInspector(keyNames,
            Arrays.asList(currentKeyObjectInspectors));

    outputObjInspector = ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, objectInspectors);

    keyWrapperFactory = new KeyWrapperFactory(keyFields, keyObjectInspectors, currentKeyObjectInspectors);

    newKeys = keyWrapperFactory.getKeyWrapper();

    firstRow = true;
    // estimate the number of hash table entries based on the size of each
    // entry. Since the size of a entry
    // is not known, estimate that based on the number of entries
    if (hashAggr) {
        computeMaxEntriesHashAggr(hconf);
    }
    memoryMXBean = ManagementFactory.getMemoryMXBean();
    maxMemory = memoryMXBean.getHeapMemoryUsage().getMax();
    memoryThreshold = this.getConf().getMemoryThreshold();
    initializeChildren(hconf);
}

From source file:org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionHandler.java

/**
 * Constructor expects a LogHelper object in addition to the max percent
 * of heap memory which can be consumed before a MapJoinMemoryExhaustionException
 * is thrown./*from  w  ww  .ja v a  2  s  . c o  m*/
 */
public MapJoinMemoryExhaustionHandler(LogHelper console, double maxMemoryUsage) {
    this.console = console;
    this.maxMemoryUsage = maxMemoryUsage;
    this.memoryMXBean = ManagementFactory.getMemoryMXBean();
    this.maxHeapSize = getMaxHeapSize(memoryMXBean);
    percentageNumberFormat = NumberFormat.getInstance();
    percentageNumberFormat.setMinimumFractionDigits(2);
    LOG.info("JVM Max Heap Size: " + this.maxHeapSize);
}

From source file:org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionHandler.java

public static long getMaxHeapSize() {
    return getMaxHeapSize(ManagementFactory.getMemoryMXBean());
}

From source file:org.apache.hadoop.hive.ql.exec.MapredLocalTask.java

public int executeFromChildJVM(DriverContext driverContext) {
    // check the local work
    if (work == null) {
        return -1;
    }//from w  ww . java2s.c  om
    memoryMXBean = ManagementFactory.getMemoryMXBean();
    long startTime = System.currentTimeMillis();
    console.printInfo(
            Utilities.now() + "\tStarting to launch local task to process map join;\tmaximum memory = "
                    + memoryMXBean.getHeapMemoryUsage().getMax());
    fetchOperators = new HashMap<String, FetchOperator>();
    Map<FetchOperator, JobConf> fetchOpJobConfMap = new HashMap<FetchOperator, JobConf>();
    execContext.setJc(job);
    // set the local work, so all the operator can get this context
    execContext.setLocalWork(work);
    boolean inputFileChangeSenstive = work.getInputFileChangeSensitive();
    try {

        initializeOperators(fetchOpJobConfMap);
        // for each big table's bucket, call the start forward
        if (inputFileChangeSenstive) {
            for (LinkedHashMap<String, ArrayList<String>> bigTableBucketFiles : work.getBucketMapjoinContext()
                    .getAliasBucketFileNameMapping().values()) {
                for (String bigTableBucket : bigTableBucketFiles.keySet()) {
                    startForward(inputFileChangeSenstive, bigTableBucket);
                }
            }
        } else {
            startForward(inputFileChangeSenstive, null);
        }
        long currentTime = System.currentTimeMillis();
        long elapsed = currentTime - startTime;
        console.printInfo(
                Utilities.now() + "\tEnd of local task; Time Taken: " + Utilities.showTime(elapsed) + " sec.");
    } catch (Throwable e) {
        if (e instanceof OutOfMemoryError
                || (e instanceof HiveException && e.getMessage().equals("RunOutOfMeomoryUsage"))) {
            // Don't create a new object if we are already out of memory
            return 3;
        } else {
            l4j.error("Hive Runtime Error: Map local work failed");
            e.printStackTrace();
            return 2;
        }
    }
    return 0;
}

From source file:org.apache.hadoop.hive.ql.exec.mr.ExecDriver.java

@SuppressWarnings("unchecked")
public static void main(String[] args) throws IOException, HiveException {

    String planFileName = null;//from www . j  a  va2 s  .  c  o  m
    String jobConfFileName = null;
    boolean noLog = false;
    String files = null;
    String libjars = null;
    boolean localtask = false;
    try {
        for (int i = 0; i < args.length; i++) {
            if (args[i].equals("-plan")) {
                planFileName = args[++i];
            } else if (args[i].equals("-jobconffile")) {
                jobConfFileName = args[++i];
            } else if (args[i].equals("-nolog")) {
                noLog = true;
            } else if (args[i].equals("-files")) {
                files = args[++i];
            } else if (args[i].equals("-libjars")) {
                libjars = args[++i];
            } else if (args[i].equals("-localtask")) {
                localtask = true;
            }
        }
    } catch (IndexOutOfBoundsException e) {
        System.err.println("Missing argument to option");
        printUsage();
    }

    JobConf conf;
    if (localtask) {
        conf = new JobConf(MapredLocalTask.class);
    } else {
        conf = new JobConf(ExecDriver.class);
    }

    if (jobConfFileName != null) {
        conf.addResource(new Path(jobConfFileName));
    }

    // Initialize the resources from command line
    if (files != null) {
        conf.set("tmpfiles", files);
    }

    if (libjars != null) {
        conf.set("tmpjars", libjars);
    }

    if (UserGroupInformation.isSecurityEnabled()) {
        String hadoopAuthToken = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
        if (hadoopAuthToken != null) {
            conf.set("mapreduce.job.credentials.binary", hadoopAuthToken);
        }
    }

    boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESESSIONSILENT);

    String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID, "").trim();
    if (queryId.isEmpty()) {
        queryId = "unknown-" + System.currentTimeMillis();
        HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYID, queryId);
    }
    System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId);

    LogUtils.registerLoggingContext(conf);

    if (noLog) {
        // If started from main(), and noLog is on, we should not output
        // any logs. To turn the log on, please set -Dtest.silent=false
        org.apache.logging.log4j.Logger logger = org.apache.logging.log4j.LogManager.getRootLogger();
        NullAppender appender = NullAppender.createNullAppender();
        appender.addToLogger(logger.getName(), Level.ERROR);
        appender.start();
    } else {
        setupChildLog4j(conf);
    }

    Logger LOG = LoggerFactory.getLogger(ExecDriver.class.getName());
    LogHelper console = new LogHelper(LOG, isSilent);

    if (planFileName == null) {
        console.printError("Must specify Plan File Name");
        printUsage();
    }

    // print out the location of the log file for the user so
    // that it's easy to find reason for local mode execution failures
    for (Appender appender : ((org.apache.logging.log4j.core.Logger) LogManager.getRootLogger()).getAppenders()
            .values()) {
        if (appender instanceof FileAppender) {
            console.printInfo("Execution log at: " + ((FileAppender) appender).getFileName());
        } else if (appender instanceof RollingFileAppender) {
            console.printInfo("Execution log at: " + ((RollingFileAppender) appender).getFileName());
        }
    }

    // the plan file should always be in local directory
    Path p = new Path(planFileName);
    FileSystem fs = FileSystem.getLocal(conf);
    InputStream pathData = fs.open(p);

    // this is workaround for hadoop-17 - libjars are not added to classpath of the
    // child process. so we add it here explicitly
    try {
        // see also - code in CliDriver.java
        ClassLoader loader = conf.getClassLoader();
        if (StringUtils.isNotBlank(libjars)) {
            loader = Utilities.addToClassPath(loader, StringUtils.split(libjars, ","));
        }
        conf.setClassLoader(loader);
        // Also set this to the Thread ContextClassLoader, so new threads will
        // inherit
        // this class loader, and propagate into newly created Configurations by
        // those
        // new threads.
        Thread.currentThread().setContextClassLoader(loader);
    } catch (Exception e) {
        throw new HiveException(e.getMessage(), e);
    }
    int ret;
    if (localtask) {
        memoryMXBean = ManagementFactory.getMemoryMXBean();
        MapredLocalWork plan = SerializationUtilities.deserializePlan(pathData, MapredLocalWork.class);
        MapredLocalTask ed = new MapredLocalTask(plan, conf, isSilent);
        ret = ed.executeInProcess(new DriverContext());

    } else {
        MapredWork plan = SerializationUtilities.deserializePlan(pathData, MapredWork.class);
        ExecDriver ed = new ExecDriver(plan, conf, isSilent);
        ret = ed.execute(new DriverContext());
    }

    if (ret != 0) {
        System.exit(ret);
    }
}

From source file:org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask.java

public int executeInProcess(DriverContext driverContext) {
    // check the local work
    if (work == null) {
        return -1;
    }//from   w w w  . j a  v  a  2  s . c  om

    if (execContext == null) {
        execContext = new ExecMapperContext(job);
    }

    memoryMXBean = ManagementFactory.getMemoryMXBean();
    long startTime = System.currentTimeMillis();
    console.printInfo(
            Utilities.now() + "\tStarting to launch local task to process map join;\tmaximum memory = "
                    + memoryMXBean.getHeapMemoryUsage().getMax());
    execContext.setJc(job);
    // set the local work, so all the operator can get this context
    execContext.setLocalWork(work);
    try {
        startForward(null);
        long currentTime = System.currentTimeMillis();
        long elapsed = currentTime - startTime;
        console.printInfo(
                Utilities.now() + "\tEnd of local task; Time Taken: " + Utilities.showTime(elapsed) + " sec.");
    } catch (Throwable throwable) {
        if (throwable instanceof OutOfMemoryError || (throwable instanceof MapJoinMemoryExhaustionError)) {
            l4j.error("Hive Runtime Error: Map local work exhausted memory", throwable);
            return 3;
        } else {
            l4j.error("Hive Runtime Error: Map local work failed", throwable);
            return 2;
        }
    }
    return 0;
}

From source file:org.apache.hadoop.hive.ql.exec.mr2.MR2ExecDriver.java

@SuppressWarnings("unchecked")
public static void main(String[] args) throws IOException, HiveException {
    String planFileName = null;/*from   w  w  w .  j  a  v a 2 s  .c  o m*/
    String jobConfFileName = null;
    boolean noLog = false;
    String files = null;
    boolean localtask = false;
    try {
        for (int i = 0; i < args.length; i++) {
            if (args[i].equals("-plan")) {
                planFileName = args[++i];
            } else if (args[i].equals("-jobconffile")) {
                jobConfFileName = args[++i];
            } else if (args[i].equals("-nolog")) {
                noLog = true;
            } else if (args[i].equals("-files")) {
                files = args[++i];
            } else if (args[i].equals("-localtask")) {
                localtask = true;
            }
        }
    } catch (IndexOutOfBoundsException e) {
        System.err.println("Missing argument to option");
        printUsage();
    }

    JobConf conf;
    if (localtask) {
        conf = new JobConf(MapredLocalTask.class);
    } else {
        conf = new JobConf(MR2ExecDriver.class);
    }

    if (jobConfFileName != null) {
        conf.addResource(new Path(jobConfFileName));
    }

    if (files != null) {
        conf.set("tmpfiles", files);
    }

    if (UserGroupInformation.isSecurityEnabled()) {
        String hadoopAuthToken = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
        if (hadoopAuthToken != null) {
            conf.set("mapreduce.job.credentials.binary", hadoopAuthToken);
        }
    }

    boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESESSIONSILENT);

    String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID, "").trim();
    if (queryId.isEmpty()) {
        queryId = "unknown-" + System.currentTimeMillis();
    }
    System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId);

    if (noLog) {
        // If started from main(), and noLog is on, we should not output
        // any logs. To turn the log on, please set -Dtest.silent=false
        BasicConfigurator.resetConfiguration();
        BasicConfigurator.configure(new NullAppender());
    } else {
        setupChildLog4j(conf);
    }

    Log LOG = LogFactory.getLog(MR2ExecDriver.class.getName());
    LogHelper console = new LogHelper(LOG, isSilent);

    if (planFileName == null) {
        console.printError("Must specify Plan File Name");
        printUsage();
    }

    // print out the location of the log file for the user so
    // that it's easy to find reason for local mode execution failures
    for (Appender appender : Collections
            .list((Enumeration<Appender>) LogManager.getRootLogger().getAllAppenders())) {
        if (appender instanceof FileAppender) {
            console.printInfo("Execution log at: " + ((FileAppender) appender).getFile());
        }
    }

    // the plan file should always be in local directory
    Path p = new Path(planFileName);
    FileSystem fs = FileSystem.getLocal(conf);
    InputStream pathData = fs.open(p);

    // this is workaround for hadoop-17 - libjars are not added to classpath of the
    // child process. so we add it here explicitly

    String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS);
    String addedJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEADDEDJARS);
    try {
        // see also - code in CliDriver.java
        ClassLoader loader = conf.getClassLoader();
        if (StringUtils.isNotBlank(auxJars)) {
            loader = Utilities.addToClassPath(loader, StringUtils.split(auxJars, ","));
        }
        if (StringUtils.isNotBlank(addedJars)) {
            loader = Utilities.addToClassPath(loader, StringUtils.split(addedJars, ","));
        }
        conf.setClassLoader(loader);
        // Also set this to the Thread ContextClassLoader, so new threads will
        // inherit
        // this class loader, and propagate into newly created Configurations by
        // those
        // new threads.
        Thread.currentThread().setContextClassLoader(loader);
    } catch (Exception e) {
        throw new HiveException(e.getMessage(), e);
    }
    int ret;
    if (localtask) {
        memoryMXBean = ManagementFactory.getMemoryMXBean();
        MapredLocalWork plan = Utilities.deserializePlan(pathData, MapredLocalWork.class, conf);
        MapredLocalTask ed = new MapredLocalTask(plan, conf, isSilent);
        ret = ed.executeInProcess(new DriverContext());

    } else {
        MR2Work plan = Utilities.deserializePlan(pathData, MR2Work.class, conf);
        MR2ExecDriver ed = new MR2ExecDriver(plan, conf, isSilent);
        ret = ed.execute(new DriverContext());
    }

    if (ret != 0) {
        System.exit(ret);
    }
}

From source file:org.apache.hadoop.hive.ql.exec.tez.HashTableLoader.java

@Override
public void load(MapJoinTableContainer[] mapJoinTables, MapJoinTableContainerSerDe[] mapJoinTableSerdes)
        throws HiveException {

    Map<Integer, String> parentToInput = desc.getParentToInput();
    Map<Integer, Long> parentKeyCounts = desc.getParentKeyCounts();

    boolean useOptimizedTables = HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE);
    boolean useHybridGraceHashJoin = desc.isHybridHashJoin();
    boolean isFirstKey = true;

    // Get the total available memory from memory manager
    long totalMapJoinMemory = desc.getMemoryNeeded();
    if (totalMapJoinMemory <= 0) {
        totalMapJoinMemory = HiveConf.getLongVar(hconf,
                HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
    }//ww w .  j  a v a 2s.  com

    long processMaxMemory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
    if (totalMapJoinMemory > processMaxMemory) {
        float hashtableMemoryUsage = HiveConf.getFloatVar(hconf,
                HiveConf.ConfVars.HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE);
        LOG.warn("totalMapJoinMemory value of " + totalMapJoinMemory
                + " is greater than the max memory size of " + processMaxMemory);
        // Don't want to attempt to grab more memory than we have available .. percentage is a bit arbitrary
        totalMapJoinMemory = (long) (processMaxMemory * hashtableMemoryUsage);
    }

    // Only applicable to n-way Hybrid Grace Hash Join
    HybridHashTableConf nwayConf = null;
    long totalSize = 0;
    int biggest = 0; // position of the biggest small table
    Map<Integer, Long> tableMemorySizes = null;
    if (useHybridGraceHashJoin && mapJoinTables.length > 2) {
        // Create a Conf for n-way HybridHashTableContainers
        nwayConf = new HybridHashTableConf();

        // Find the biggest small table; also calculate total data size of all small tables
        long maxSize = Long.MIN_VALUE; // the size of the biggest small table
        for (int pos = 0; pos < mapJoinTables.length; pos++) {
            if (pos == desc.getPosBigTable()) {
                continue;
            }
            long smallTableSize = desc.getParentDataSizes().get(pos);
            totalSize += smallTableSize;
            if (maxSize < smallTableSize) {
                maxSize = smallTableSize;
                biggest = pos;
            }
        }

        tableMemorySizes = divideHybridHashTableMemory(mapJoinTables, desc, totalSize, totalMapJoinMemory);
        // Using biggest small table, calculate number of partitions to create for each small table
        long memory = tableMemorySizes.get(biggest);
        int numPartitions = 0;
        try {
            numPartitions = HybridHashTableContainer.calcNumPartitions(memory, maxSize,
                    HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS),
                    HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINWBSIZE), nwayConf);
        } catch (IOException e) {
            throw new HiveException(e);
        }
        nwayConf.setNumberOfPartitions(numPartitions);
    }

    for (int pos = 0; pos < mapJoinTables.length; pos++) {
        if (pos == desc.getPosBigTable()) {
            continue;
        }

        String inputName = parentToInput.get(pos);
        LogicalInput input = tezContext.getInput(inputName);

        try {
            input.start();
            tezContext.getTezProcessorContext().waitForAnyInputReady(Collections.<Input>singletonList(input));
        } catch (Exception e) {
            throw new HiveException(e);
        }

        try {
            KeyValueReader kvReader = (KeyValueReader) input.getReader();
            MapJoinObjectSerDeContext keyCtx = mapJoinTableSerdes[pos].getKeyContext(),
                    valCtx = mapJoinTableSerdes[pos].getValueContext();
            if (useOptimizedTables) {
                ObjectInspector keyOi = keyCtx.getSerDe().getObjectInspector();
                if (!MapJoinBytesTableContainer.isSupportedKey(keyOi)) {
                    if (isFirstKey) {
                        useOptimizedTables = false;
                        LOG.info(describeOi(
                                "Not using optimized hash table. "
                                        + "Only a subset of mapjoin keys is supported. Unsupported key: ",
                                keyOi));
                    } else {
                        throw new HiveException(describeOi(
                                "Only a subset of mapjoin keys is supported. Unsupported key: ", keyOi));
                    }
                }
            }
            isFirstKey = false;
            Long keyCountObj = parentKeyCounts.get(pos);
            long keyCount = (keyCountObj == null) ? -1 : keyCountObj.longValue();

            long memory = 0;
            if (useHybridGraceHashJoin) {
                if (mapJoinTables.length > 2) {
                    memory = tableMemorySizes.get(pos);
                } else { // binary join
                    memory = totalMapJoinMemory;
                }
            }

            MapJoinTableContainer tableContainer = useOptimizedTables
                    ? (useHybridGraceHashJoin
                            ? new HybridHashTableContainer(hconf, keyCount, memory,
                                    desc.getParentDataSizes().get(pos), nwayConf)
                            : new MapJoinBytesTableContainer(hconf, valCtx, keyCount, 0))
                    : new HashMapWrapper(hconf, keyCount);
            LOG.info("Using tableContainer " + tableContainer.getClass().getSimpleName());

            while (kvReader.next()) {
                tableContainer.putRow(keyCtx, (Writable) kvReader.getCurrentKey(), valCtx,
                        (Writable) kvReader.getCurrentValue());
            }
            tableContainer.seal();
            mapJoinTables[pos] = tableContainer;
        } catch (Exception e) {
            throw new HiveException(e);
        }
    }
}