Example usage for org.apache.hadoop.conf Configuration setBoolean

List of usage examples for org.apache.hadoop.conf Configuration setBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setBoolean.

Prototype

public void setBoolean(String name, boolean value) 

Source Link

Document

Set the value of the name property to a boolean.

Usage

From source file:com.linkedin.cubert.operator.TestOperators.java

License:Open Source License

@SuppressWarnings("rawtypes")
@BeforeTest/*from w w  w  . ja v a 2 s. co m*/
void setupConf() throws IOException {
    Configuration conf = new JobConf();
    conf.setBoolean(CubertStrings.USE_COMPACT_SERIALIZATION, false);

    PhaseContext.create((Mapper.Context) null, conf);
    PhaseContext.create((Reducer.Context) null, conf);
}

From source file:com.marklogic.contentpump.Command.java

License:Apache License

protected static boolean isStreaming(CommandLine cmdline, Configuration conf) {
    if (conf.get(MarkLogicConstants.OUTPUT_STREAMING) != null) {
        return conf.getBoolean(MarkLogicConstants.OUTPUT_STREAMING, false);
    }/*from  w  ww. j ava2  s  .  c o  m*/
    String arg = null;
    if (cmdline.hasOption(STREAMING)) {
        arg = cmdline.getOptionValue(STREAMING);
        if (arg == null || arg.equalsIgnoreCase("true")) {
            InputType inputType = getInputType(cmdline);
            if (inputType != InputType.DOCUMENTS) {
                LOG.warn("Streaming option is not applicable to input " + "type " + inputType);
                conf.setBoolean(MarkLogicConstants.OUTPUT_STREAMING, false);
                return false;
            } else {
                conf.setBoolean(MarkLogicConstants.OUTPUT_STREAMING, true);
                return true;
            }
        }
    }
    conf.setBoolean(MarkLogicConstants.OUTPUT_STREAMING, false);
    return false;
}

From source file:com.marklogic.contentpump.Command.java

License:Apache License

static void applyCopyConfigOptions(Configuration conf, CommandLine cmdline) {
    if (cmdline.hasOption(COPY_COLLECTIONS)) {
        String arg = cmdline.getOptionValue(COPY_COLLECTIONS);
        if (arg == null || arg.equalsIgnoreCase("true")) {
            conf.setBoolean(CONF_COPY_COLLECTIONS, true);
        } else if (arg.equalsIgnoreCase("false")) {
            conf.setBoolean(CONF_COPY_COLLECTIONS, false);
        } else {//from   w  ww .  ja v  a 2s  .  com
            throw new IllegalArgumentException(
                    "Unrecognized option argument for " + COPY_COLLECTIONS + ": " + arg);
        }
    } else {
        conf.set(CONF_COPY_COLLECTIONS, DEFAULT_COPY_COLLECTIONS);
    }
    if (cmdline.hasOption(COPY_PERMISSIONS)) {
        String arg = cmdline.getOptionValue(COPY_PERMISSIONS);
        if (arg == null || arg.equalsIgnoreCase("true")) {
            conf.setBoolean(CONF_COPY_PERMISSIONS, true);
        } else if (arg.equalsIgnoreCase("false")) {
            conf.setBoolean(CONF_COPY_PERMISSIONS, false);
        } else {
            throw new IllegalArgumentException(
                    "Unrecognized option argument for " + COPY_PERMISSIONS + ": " + arg);
        }
    } else {
        conf.set(CONF_COPY_PERMISSIONS, DEFAULT_COPY_PERMISSIONS);
    }
    if (cmdline.hasOption(COPY_PROPERTIES)) {
        String arg = cmdline.getOptionValue(COPY_PROPERTIES);
        if (arg == null || arg.equalsIgnoreCase("true")) {
            conf.setBoolean(CONF_COPY_PROPERTIES, true);
        } else {
            conf.setBoolean(CONF_COPY_PROPERTIES, false);
        }
    } else {
        conf.set(CONF_COPY_PROPERTIES, DEFAULT_COPY_PROPERTIES);
    }
    if (cmdline.hasOption(COPY_QUALITY)) {
        String arg = cmdline.getOptionValue(COPY_QUALITY);
        if (arg == null || arg.equalsIgnoreCase("true")) {
            conf.setBoolean(CONF_COPY_QUALITY, true);
        } else if (arg.equalsIgnoreCase("false")) {
            conf.setBoolean(CONF_COPY_QUALITY, false);
        } else {
            throw new IllegalArgumentException("Unrecognized option argument for " + COPY_QUALITY + ": " + arg);
        }
    } else {
        conf.set(CONF_COPY_QUALITY, DEFAULT_COPY_QUALITY);
    }
}

From source file:com.marklogic.contentpump.ContentPump.java

License:Apache License

public static int runCommand(String[] args) throws IOException {
    // get command
    String cmd = args[0];/* ww w .j  a  va2s .  co m*/
    if (cmd.equalsIgnoreCase("help")) {
        printUsage();
        return 1;
    } else if (cmd.equalsIgnoreCase("version")) {
        logVersions();
        return 1;
    }

    Command command = Command.forName(cmd);

    // get options arguments
    String[] optionArgs = Arrays.copyOfRange(args, 1, args.length);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Command: " + command);
        StringBuilder buf = new StringBuilder();
        for (String arg : optionArgs) {
            buf.append(arg);
            buf.append(' ');
        }
        LOG.debug("Arguments: " + buf);
    }

    // parse hadoop specific options
    Configuration conf = new Configuration();
    GenericOptionsParser genericParser = new GenericOptionsParser(conf, optionArgs);
    String[] remainingArgs = genericParser.getRemainingArgs();

    // parse command specific options
    CommandlineOptions options = new CommandlineOptions();
    command.configOptions(options);
    CommandLineParser parser = new GnuParser();
    CommandLine cmdline;
    try {
        cmdline = parser.parse(options, remainingArgs);
    } catch (Exception e) {
        LOG.error("Error parsing command arguments: ");
        LOG.error(e.getMessage());
        // Print the command usage message and exit.    
        command.printUsage(command, options.getPublicOptions());
        return 1; // Exit on exception here.
    }

    for (String arg : cmdline.getArgs()) {
        LOG.error("Unrecognized argument: " + arg);
        // Print the command usage message and exit.
        command.printUsage(command, options.getPublicOptions());
        return 1; // Exit on exception here.
    }

    // check running mode and hadoop conf dir configuration 
    String mode = cmdline.getOptionValue(MODE);
    String hadoopConfDir = System.getenv(HADOOP_CONFDIR_ENV_NAME);
    if (cmdline.hasOption(HADOOP_CONF_DIR)) {
        hadoopConfDir = cmdline.getOptionValue(HADOOP_CONF_DIR);
    }

    boolean distributed = hadoopConfDir != null && (mode == null || mode.equals(MODE_DISTRIBUTED));
    if (MODE_DISTRIBUTED.equalsIgnoreCase(mode) && !distributed) {
        LOG.error("Cannot run in distributed mode.  HADOOP_CONF_DIR is " + "not configured.");
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Running in: " + (distributed ? "distributed " : "local") + "mode");
        if (distributed) {
            LOG.debug("HADOOP_CONF_DIR is set to " + hadoopConfDir);
        }
    }
    conf.set(EXECUTION_MODE, distributed ? MODE_DISTRIBUTED : MODE_LOCAL);

    if (distributed) {
        if (!cmdline.hasOption(SPLIT_INPUT) && Command.getInputType(cmdline).equals(InputType.DELIMITED_TEXT)) {
            conf.setBoolean(ConfigConstants.CONF_SPLIT_INPUT, true);
        }
        File hdConfDir = new File(hadoopConfDir);
        try {
            checkHadoopConfDir(hdConfDir);
        } catch (IllegalArgumentException e) {
            LOG.error("Error found with Hadoop home setting", e);
            System.err.println(e.getMessage());
            return 1;
        }
        // set new class loader based on Hadoop Conf Dir
        try {
            setClassLoader(hdConfDir, conf);
        } catch (Exception e) {
            LOG.error("Error configuring class loader", e);
            System.err.println(e.getMessage());
            return 1;
        }
    } else { // running in local mode
        // Tell Hadoop that we are running in local mode.  This is useful
        // when the user has Hadoop home or their Hadoop conf dir in their
        // classpath but want to run in local mode.
        conf.set(CONF_MAPREDUCE_JOBTRACKER_ADDRESS, "local");
    }

    // create job
    Job job = null;
    try {
        if (distributed) {
            // So far all jobs created by mlcp are map only,
            // so set number of reduce tasks to 0.
            conf.setInt("mapreduce.job.reduces", 0);
            // No speculative runs since speculative tasks don't get to 
            // clean up sessions properly
            conf.setBoolean("mapreduce.map.speculative", false);
        } else {
            // set working directory
            conf.set(CONF_MAPREDUCE_JOB_WORKING_DIR, System.getProperty("user.dir"));
        }
        job = command.createJob(conf, cmdline);
    } catch (Exception e) {
        // Print exception message.
        e.printStackTrace();
        return 1;
    }

    LOG.info("Job name: " + job.getJobName());
    // run job
    try {
        if (distributed) {
            // submit job
            submitJob(job);
        } else {
            runJobLocally(job, cmdline, command);
        }
        return 0;
    } catch (Exception e) {
        LOG.error("Error running a ContentPump job", e);
        e.printStackTrace(System.err);
        return 1;
    }
}

From source file:com.marklogic.contentpump.DocumentInputFormat.java

License:Apache License

protected void getForestSplits(JobContext jobContext, ResultSequence result, List<ForestSplit> forestSplits,
        List<String> ruleUris) throws IOException {
    Configuration jobConf = jobContext.getConfiguration();
    super.getForestSplits(jobContext, result, forestSplits, ruleUris);
    // Third while loop: audit settings
    while (result.hasNext()) {
        ResultItem item = result.next();
        if (ItemType.XS_STRING != item.getItemType()) {
            throw new IOException("Unexpected item type " + item.getItemType().toString());
        }/*from   w  w w  . j a  v a2  s  .c  om*/
        String itemStr = ((XSString) item.getItem()).asString();
        if ("AUDIT".equals(itemStr)) {
            continue;
        } else if ("mlcp-start".equals(itemStr)) {
            mlcpStartEventEnabled = true;
        } else if ("mlcp-finish".equalsIgnoreCase(itemStr)) {
            mlcpFinishEventEnabled = true;
        } else {
            throw new IOException("Unrecognized audit event " + itemStr);
        }
    }
    if (ruleUris != null && ruleUris.size() > 0) {
        AuditUtil.prepareAuditMlcpFinish(jobConf, ruleUris.size());
        if (LOG.isDebugEnabled()) {
            // TODO: Use this version if only JAVA 8 is supported
            // String logMessage = String.join(", ", ruleUris);
            LOG.debug("Redaction rules applied: " + StringUtils.join(ruleUris, ", "));
        }
    }
    if (mlcpStartEventEnabled) {
        AuditUtil.auditMlcpStart(jobConf, jobContext.getJobName());
    }
    jobConf.setBoolean(ConfigConstants.CONF_AUDIT_MLCPFINISH_ENABLED, mlcpFinishEventEnabled);
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

private void initMiniDFSCluster(Configuration conf, int numDataNodes, StorageType storageType, boolean format,
        boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
        boolean manageDataDfsDirs, StartupOption startOpt, StartupOption dnStartOpt, String[] racks,
        String[] hosts, long[] simulatedCapacities, String clusterId, boolean waitSafeMode,
        boolean setupHostsFile, MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
        boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays)
        throws IOException {
    ExitUtil.disableSystemExit();//from ww  w.jav  a 2s .  com

    // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
    FileSystem.enableSymlinks();

    synchronized (MiniDFSCluster.class) {
        instanceId = instanceCount++;
    }

    this.conf = conf;
    base_dir = new File(determineDfsBaseDir());
    data_dir = new File(base_dir, "data");
    this.waitSafeMode = waitSafeMode;
    this.checkExitOnShutdown = checkExitOnShutdown;

    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    int safemodeExtension = conf.getInt(DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class);

    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
    // it needs to know the HTTP port of the Active. So, if ephemeral ports
    // are chosen, disable checkpoints for the test.
    if (!nnTopology.allHttpPortsSpecified() && nnTopology.isHA()) {
        LOG.info("MiniDFSCluster disabling checkpointing in the Standby node "
                + "since no HTTP ports have been specified.");
        conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
    }
    if (!nnTopology.allIpcPortsSpecified() && nnTopology.isHA()) {
        LOG.info("MiniDFSCluster disabling log-roll triggering in the "
                + "Standby node since no IPC ports have been specified.");
        conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
    }

    federation = nnTopology.isFederated();
    try {
        createNameNodesAndSetConf(nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
                enableManagedDfsDirsRedundancy, format, startOpt, clusterId, conf);
    } catch (IOException ioe) {
        LOG.error("IOE creating namenodes. Permissions dump:\n" + createPermissionsDiagnosisString(data_dir));
        throw ioe;
    }
    if (format) {
        if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
            throw new IOException(
                    "Cannot remove data directory: " + data_dir + createPermissionsDiagnosisString(data_dir));
        }
    }

    if (startOpt == StartupOption.RECOVER) {
        return;
    }

    // Start the DataNodes
    startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
            dnStartOpt != null ? dnStartOpt : startOpt, racks, hosts, simulatedCapacities, setupHostsFile,
            checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
    waitClusterUp();
    // make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}

From source file:com.mongodb.hadoop.examples.snmp.SnmpStatisticMongoTool.java

License:Apache License

public static void main(String[] args) throws Exception {
    boolean use_shards = true;
    boolean use_chunks = false;
    final Configuration conf = new Configuration();
    String output_table = null;//from  www.j  av  a2s  .com

    MongoConfigUtil.setInputURI(conf, "mongodb://localhost:30000/test.snmp");
    conf.setBoolean(MongoConfigUtil.SPLITS_USE_SHARDS, use_shards);
    conf.setBoolean(MongoConfigUtil.SPLITS_USE_CHUNKS, use_chunks);
    if (use_chunks) {
        if (use_shards)
            output_table = "snmp_with_shards_and_chunks";
        else
            output_table = "snmp_with_chunks";
    } else {
        if (use_shards)
            output_table = "snmp_with_shards";
        else
            output_table = "snmp_no_splits";
    }
    MongoConfigUtil.setOutputURI(conf, "mongodb://localhost:30000/test." + output_table);
    final Job job = new Job(conf, "snmp analysis " + output_table);
    job.setJarByClass(SnmpStatisticMongoTool.class);
    job.setMapperClass(MapHostUploadEachAPEachDay.class);
    job.setReducerClass(ReduceHostUploadEachAPEachDay.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(LongWritable.class);
    job.setInputFormatClass(MongoInputFormat.class);
    job.setOutputFormatClass(MongoOutputFormat.class);
    boolean result = job.waitForCompletion(true);
    System.exit(result ? 0 : 1);
}

From source file:com.mongodb.hadoop.examples.snmp.SnmpStatisticWithCombiner.java

License:Apache License

public static void main(String[] args) throws Exception {
    boolean use_shards = true;
    boolean use_chunks = false;
    final Configuration Conf = new Configuration();
    MongoConfigUtil.setInputURI(Conf, "mongodb://localhost:30000/test.snmp");
    Conf.setBoolean(MongoConfigUtil.SPLITS_USE_SHARDS, use_shards);
    Conf.setBoolean(MongoConfigUtil.SPLITS_USE_CHUNKS, use_chunks);
    String output_table = null;/*w w  w . j  a  v a 2 s. co  m*/
    if (use_chunks) {
        if (use_shards)
            output_table = "snmp_with_shards_and_chunks";
        else
            output_table = "snmp_with_chunks";
    } else {
        if (use_shards)
            output_table = "snmpWithShards";
        else
            output_table = "snmp_no_splits";
    }
    MongoConfigUtil.setOutputURI(Conf, "mongodb://localhost:30000/test." + output_table);
    final Job snmpJob = new Job(Conf, "snmp analysis " + output_table);
    snmpJob.setJarByClass(SnmpStatisticWithCombiner.class);
    snmpJob.setMapperClass(MapHostUploadOnEachAPPerDay.class);
    snmpJob.setCombinerClass(CombineHostUploadOnEachAPPerDay.class);
    snmpJob.setReducerClass(ReduceHostUploadOnEachAPPerDay.class);
    snmpJob.setOutputKeyClass(Text.class);
    snmpJob.setOutputValueClass(LongWritable.class);
    snmpJob.setInputFormatClass(MongoInputFormat.class);
    snmpJob.setOutputFormatClass(MongoOutputFormat.class);
    try {
        boolean result = snmpJob.waitForCompletion(true);
        System.out.println("job.waitForCompletion( true ) returned " + result);
    } catch (Exception e) {
        System.out.println("job.waitForCompletion( true ) threw Exception");
        e.printStackTrace();
    }
}

From source file:com.mongodb.hadoop.examples.snmp.SnmpStatistic_MapReduceChain.java

License:Apache License

public static void main(String[] args) throws Exception {
    boolean use_shards = true;
    boolean use_chunks = false;
    //******************This is the first job.******************/
    final Configuration firstConf = new Configuration();
    MongoConfigUtil.setInputURI(firstConf, "mongodb://localhost:30000/test.snmp");
    firstConf.setBoolean(MongoConfigUtil.SPLITS_USE_SHARDS, use_shards);
    firstConf.setBoolean(MongoConfigUtil.SPLITS_USE_CHUNKS, use_chunks);
    String output_table = null;/*from ww  w . jav  a2s  .c o m*/
    if (use_chunks) {
        if (use_shards)
            output_table = "snmp_with_shards_and_chunks";
        else
            output_table = "snmp_with_chunks";
    } else {
        if (use_shards)
            output_table = "snmp_with_shards";
        else
            output_table = "snmp_no_splits";
    }
    MongoConfigUtil.setOutputURI(firstConf, "mongodb://localhost:30000/test." + output_table);
    final Job firstJob = new Job(firstConf, "snmp analysis " + output_table);
    firstJob.setJarByClass(SnmpStatistic_MapReduceChain.class);
    firstJob.setMapperClass(MapHostUploadEachAPEachDay.class);
    firstJob.setReducerClass(ReduceHostUploadEachAPEachDay.class);
    firstJob.setOutputKeyClass(Text.class);
    firstJob.setOutputValueClass(LongWritable.class);
    firstJob.setInputFormatClass(MongoInputFormat.class);
    firstJob.setOutputFormatClass(MongoOutputFormat.class);
    try {
        boolean result = firstJob.waitForCompletion(true);
        System.out.println("job.waitForCompletion( true ) returned " + result);
    } catch (Exception e) {
        System.out.println("job.waitForCompletion( true ) threw Exception");
        e.printStackTrace();
    }

    //*****************This is the second job.********************/
    final Configuration secondConf = new Configuration();
    MongoConfigUtil.setInputURI(secondConf, "mongodb://localhost:30000/test." + output_table);
    secondConf.setBoolean(MongoConfigUtil.SPLITS_USE_SHARDS, use_shards);
    secondConf.setBoolean(MongoConfigUtil.SPLITS_USE_CHUNKS, use_chunks);
    String output_table2 = null;
    if (use_chunks) {
        if (use_shards)
            output_table2 = "second_snmp_with_shards_and_chunks";
        else
            output_table2 = "second_snmp_with_chunks";
    } else {
        if (use_shards)
            output_table2 = "second_snmp_with_shards";
        else
            output_table2 = "second_snmp_no_splits";
    }
    MongoConfigUtil.setOutputURI(secondConf, "mongodb://localhost:30000/test." + output_table2);
    final Job secondJob = new Job(secondConf, "snmp analysis " + output_table2);
    secondJob.setJarByClass(SnmpStatistic_MapReduceChain.class);
    secondJob.setMapperClass(MapHostUploadEachDay.class);
    secondJob.setReducerClass(ReduceHostUploadEachDay.class);
    secondJob.setOutputKeyClass(Text.class);
    secondJob.setOutputValueClass(LongWritable.class);
    secondJob.setInputFormatClass(MongoInputFormat.class);
    secondJob.setOutputFormatClass(MongoOutputFormat.class);
    try {
        boolean result2 = secondJob.waitForCompletion(true);
        System.out.println("job.waitForCompletion( true ) returned " + result2);
    } catch (Exception e) {
        System.out.println("job.waitForCompletion( true ) threw Exception");
        e.printStackTrace();
    }
}

From source file:com.mongodb.hadoop.examples.SnmpStatistic_MapReduceChain.java

License:Apache License

public static void main(String[] args) throws Exception {
    boolean use_shards = true;
    boolean use_chunks = false;
    //******************This is the first job.******************/
    final Configuration firstConf = new Configuration();
    MongoConfigUtil.setInputURI(firstConf, "mongodb://localhost:30000/test.snmp");
    firstConf.setBoolean(MongoConfigUtil.SPLITS_USE_SHARDS, use_shards);
    firstConf.setBoolean(MongoConfigUtil.SPLITS_USE_CHUNKS, use_chunks);
    String output_table = null;/*ww  w  . j  a  v a  2s .  c  o  m*/
    if (use_chunks) {
        if (use_shards)
            output_table = "snmp_with_shards_and_chunks";
        else
            output_table = "snmp_with_chunks";
    } else {
        if (use_shards)
            output_table = "snmp_with_shards";
        else
            output_table = "snmp_no_splits";
    }
    MongoConfigUtil.setOutputURI(firstConf, "mongodb://localhost:30000/test." + output_table);
    final Job firstJob = new Job(firstConf, "snmp analysis " + output_table);
    firstJob.setJarByClass(SnmpStatistic_MapReduceChain.class);
    firstJob.setMapperClass(MapHostUploadEachAPEachDay.class);
    firstJob.setReducerClass(ReduceHostUploadEachAPEachDay.class);
    firstJob.setOutputKeyClass(Text.class);
    firstJob.setOutputValueClass(LongWritable.class);
    firstJob.setInputFormatClass(MongoInputFormat.class);
    firstJob.setOutputFormatClass(MongoOutputFormat.class);
    try {
        boolean result = firstJob.waitForCompletion(true);
        System.out.println("job.waitForCompletion( true ) returned " + result);
    } catch (Exception e) {
        System.out.println("job.waitForCompletion( true ) threw Exception");
        e.printStackTrace();
    }

    //*****************This is the second job.********************/       
    final Configuration secondConf = new Configuration();
    MongoConfigUtil.setInputURI(secondConf, "mongodb://localhost:30000/test." + output_table);
    secondConf.setBoolean(MongoConfigUtil.SPLITS_USE_SHARDS, use_shards);
    secondConf.setBoolean(MongoConfigUtil.SPLITS_USE_CHUNKS, use_chunks);
    String output_table2 = null;
    if (use_chunks) {
        if (use_shards)
            output_table2 = "second_snmp_with_shards_and_chunks";
        else
            output_table2 = "second_snmp_with_chunks";
    } else {
        if (use_shards)
            output_table2 = "second_snmp_with_shards";
        else
            output_table2 = "second_snmp_no_splits";
    }
    MongoConfigUtil.setOutputURI(secondConf, "mongodb://localhost:30000/test." + output_table2);
    final Job secondJob = new Job(secondConf, "snmp analysis " + output_table2);
    secondJob.setJarByClass(SnmpStatistic_MapReduceChain.class);
    secondJob.setMapperClass(MapHostUploadEachDay.class);
    secondJob.setReducerClass(ReduceHostUploadEachDay.class);
    secondJob.setOutputKeyClass(Text.class);
    secondJob.setOutputValueClass(LongWritable.class);
    secondJob.setInputFormatClass(MongoInputFormat.class);
    secondJob.setOutputFormatClass(MongoOutputFormat.class);
    try {
        boolean result2 = secondJob.waitForCompletion(true);
        System.out.println("job.waitForCompletion( true ) returned " + result2);
    } catch (Exception e) {
        System.out.println("job.waitForCompletion( true ) threw Exception");
        e.printStackTrace();
    }
}