Example usage for org.apache.hadoop.conf Configuration setBoolean

List of usage examples for org.apache.hadoop.conf Configuration setBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setBoolean.

Prototype

public void setBoolean(String name, boolean value) 

Source Link

Document

Set the value of the name property to a boolean.

Usage

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyCommitter.java

License:Apache License

@Test
public void testAtomicCommitMissingFinal() {
    TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
    JobContext jobContext = Mockito.mock(JobContext.class);
    Mockito.when(jobContext.getConfiguration()).thenReturn(config);
    JobID jobID = new JobID();
    Mockito.when(jobContext.getJobID()).thenReturn(jobID);
    Configuration conf = jobContext.getConfiguration();

    String workPath = "/tmp1/" + String.valueOf(rand.nextLong());
    String finalPath = "/tmp1/" + String.valueOf(rand.nextLong());
    FileSystem fs = null;//ww  w  .  j  av a 2  s . c  om
    try {
        OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
        fs = FileSystem.get(conf);
        fs.mkdirs(new Path(workPath));

        conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, workPath);
        conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, finalPath);
        conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, true);
        //XXX set label to false explicitly, conf is not mixed up
        conf.setBoolean(DistCpConstants.CONF_LABEL_DELETE_MISSING, false);

        Assert.assertTrue(fs.exists(new Path(workPath)));
        Assert.assertFalse(fs.exists(new Path(finalPath)));
        committer.commitJob(jobContext);
        Assert.assertFalse(fs.exists(new Path(workPath)));
        Assert.assertTrue(fs.exists(new Path(finalPath)));

        //Test for idempotent commit
        committer.commitJob(jobContext);
        Assert.assertFalse(fs.exists(new Path(workPath)));
        Assert.assertTrue(fs.exists(new Path(finalPath)));

    } catch (IOException e) {
        LOG.error("Exception encountered while testing for preserve status", e);
        Assert.fail("Atomic commit failure");
    } finally {
        TestDistCpUtils.delete(fs, workPath);
        TestDistCpUtils.delete(fs, finalPath);
        conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false);
    }
}

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyCommitter.java

License:Apache License

@Test
public void testAtomicCommitExistingFinal() {
    TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
    JobContext jobContext = Mockito.mock(JobContext.class, Mockito.RETURNS_DEEP_STUBS);
    Mockito.when(jobContext.getConfiguration()).thenReturn(config);
    JobID jobID = new JobID();
    Mockito.when(jobContext.getJobID()).thenReturn(jobID);
    Configuration conf = jobContext.getConfiguration();

    String workPath = "/tmp1/" + String.valueOf(rand.nextLong());
    String finalPath = "/tmp1/" + String.valueOf(rand.nextLong());
    FileSystem fs = null;/*from   ww  w .  ja  v  a 2  s. co  m*/
    try {
        OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
        fs = FileSystem.get(conf);
        fs.mkdirs(new Path(workPath));
        fs.mkdirs(new Path(finalPath));

        conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, workPath);
        conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, finalPath);
        conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, true);
        //XXX set label to false explicitly, conf is not mixed up
        conf.setBoolean(DistCpConstants.CONF_LABEL_DELETE_MISSING, false);

        Assert.assertTrue(fs.exists(new Path(workPath)));
        Assert.assertTrue(fs.exists(new Path(finalPath)));
        committer.commitJob(jobContext);
        Assert.assertFalse(fs.exists(new Path(workPath)));
        Assert.assertTrue(fs.exists(new Path(finalPath)));

        //Test for idempotent commit
        committer.commitJob(jobContext);
        Assert.assertFalse(fs.exists(new Path(workPath)));
        Assert.assertTrue(fs.exists(new Path(finalPath)));

    } catch (IOException e) {
        LOG.error("Exception encountered while testing for preserve status", e);
        Assert.fail("Atomic commit failure");
    } finally {
        TestDistCpUtils.delete(fs, workPath);
        TestDistCpUtils.delete(fs, finalPath);
        conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false);
    }
}

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyMapper.java

License:Apache License

private static Configuration getConfiguration() throws IOException {
    Configuration configuration = getConfigurationForCluster();
    Path workPath = new Path(TARGET_PATH).makeQualified(cluster.getFileSystem());
    configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, workPath.toString());
    configuration.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, workPath.toString());
    configuration.setBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(), false);
    configuration.setBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(), true);
    configuration.setBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), true);
    configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(), "br");
    return configuration;
}

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyMapper.java

License:Apache License

private void doTestIgnoreFailures(boolean ignoreFailures) {
    try {/*from www .  j  a  va2  s.  co m*/
        deleteState();
        createSourceData();

        FileSystem fs = cluster.getFileSystem();
        CopyMapper copyMapper = new CopyMapper();
        StatusReporter reporter = new StubStatusReporter();
        InMemoryWriter writer = new InMemoryWriter();
        Mapper<Text, FileStatus, NullWritable, Text>.Context context = getMapperContext(copyMapper, reporter,
                writer);

        Configuration configuration = context.getConfiguration();
        configuration.setBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), ignoreFailures);
        configuration.setBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(), true);
        configuration.setBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(), true);
        copyMapper.setup(context);

        for (Path path : pathList) {
            final FileStatus fileStatus = fs.getFileStatus(path);
            if (!fileStatus.isDir()) {
                fs.delete(path, true);
                copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)), fileStatus,
                        context);
            }
        }
        if (ignoreFailures) {
            for (Text value : writer.values()) {
                Assert.assertTrue(value.toString() + " is not skipped", value.toString().startsWith("FAIL:"));
            }
        }
        Assert.assertTrue("There should have been an exception.", ignoreFailures);
    } catch (Exception e) {
        Assert.assertTrue("Unexpected exception: " + e.getMessage(), !ignoreFailures);
        e.printStackTrace();
    }
}

From source file:com.kasabi.labs.freebase.mr.Freebase2RDFDriver.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (log.isDebugEnabled()) {
        log.debug("run({})", Utils.toString(args));
    }//from  www.  j  a v a 2s .c  o m

    if (args.length != 2) {
        System.err.printf("Usage: %s [generic options] <input> <output>\n", getClass().getName());
        ToolRunner.printGenericCommandUsage(System.err);
        return -1;
    }

    Configuration configuration = getConf();
    boolean useCompression = configuration.getBoolean(Constants.OPTION_USE_COMPRESSION,
            Constants.OPTION_USE_COMPRESSION_DEFAULT);

    if (useCompression) {
        configuration.setBoolean("mapred.compress.map.output", true);
        configuration.set("mapred.output.compression.type", "BLOCK");
        configuration.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.GzipCodec");
    }

    boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERRIDE_OUTPUT,
            Constants.OPTION_OVERRIDE_OUTPUT_DEFAULT);
    FileSystem fs = FileSystem.get(new Path(args[1]).toUri(), configuration);
    if (overrideOutput) {
        fs.delete(new Path(args[1]), true);
    }

    Job job = new Job(configuration);
    job.setJobName("Freebase2RDFDriver");
    job.setJarByClass(getClass());

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    job.setInputFormatClass(TextInputFormat.class);

    job.setMapperClass(Freebase2RDFMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);

    job.setReducerClass(Freebase2RDFReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    Utils.setReducers(job, configuration, log);

    job.setOutputFormatClass(TextOutputFormat.class);

    if (log.isDebugEnabled())
        Utils.log(job, log);

    return job.waitForCompletion(true) ? 0 : 1;
}

From source file:com.kenshoo.integrations.plugins.connectors.GCSFileProtocol.java

License:Apache License

@Override
public void setupAuthentication(GenericConfiguration genericConfiguration, Configuration configuration,
        String arg1, String arg2) {
    configuration.setBoolean("fs.gcsfs.impl.disable.cache", true);
    configuration.setStrings("fs.gcsfs.impl", "com.kenshoo.integrations.plugins.connectors.GCSFileSystem");

    String accessToken = genericConfiguration.getStringProperty(PROPERTY_KEY_ACCESS_TOKEN, null);
    if (accessToken != null) {
        configuration.setStrings(PROPERTY_KEY_ACCESS_TOKEN, accessToken);
    }/*from w  w w  .  j  av a 2s .  com*/
    String refreshToken = genericConfiguration.getStringProperty(PROPERTY_KEY_REFRESH_TOKEN, null);
    if (refreshToken != null) {
        configuration.setStrings(PROPERTY_KEY_REFRESH_TOKEN, refreshToken);
    }
    Long accessTokenCreationTime = genericConfiguration.getLongProperty(PROPERTY_KEY_ACCESS_TOKEN_CREATION_TIME,
            null);
    if (accessTokenCreationTime != null) {
        configuration.setLong(PROPERTY_KEY_ACCESS_TOKEN_CREATION_TIME, accessTokenCreationTime);
    }
    Long accessTokenExpirationAfter = genericConfiguration
            .getLongProperty(PROPERTY_KEY_ACCESS_TOKEN_EXPIRES_AFTER, null);
    if (accessTokenExpirationAfter != null) {
        configuration.setLong(PROPERTY_KEY_ACCESS_TOKEN_EXPIRES_AFTER, accessTokenExpirationAfter);
    }
    String relayURL = genericConfiguration.getStringProperty(PROPERTY_KEY_OAUTH_RELAY_URL, null);
    if (relayURL != null) {
        configuration.setStrings(PROPERTY_KEY_OAUTH_RELAY_URL, relayURL);
    }
    String clientId = genericConfiguration.getStringProperty(PROPERTY_KEY_OAUTH_CLIENT_ID, null);
    if (clientId != null) {
        configuration.setStrings(PROPERTY_KEY_OAUTH_CLIENT_ID, clientId);
    }
    String secret = genericConfiguration.getStringProperty(PROPERTY_KEY_OAUTH_SECRET, null);
    if (secret != null) {
        configuration.setStrings(PROPERTY_KEY_OAUTH_SECRET, secret);
    }
}

From source file:com.koda.integ.hbase.test.OffHeapBlockCacheMultiPerfTest.java

License:Open Source License

/**
 * Sets the up.//ww w .j  a  v a  2s .  c om
 *
 * @throws Exception the exception
 */
protected static void setUp() throws Exception {

    Configuration config = new Configuration();

    // Set L2 config
    config.set(OffHeapBlockCache.BLOCK_CACHE_MEMORY_SIZE, Long.toString(sRAMCacheSize));

    config.setBoolean(OffHeapBlockCache.BLOCK_CACHE_OVERFLOW_TO_EXT_STORAGE_ENABLED, true);
    config.setLong(OffHeapBlockCache.BLOCK_CACHE_EXT_STORAGE_MEMORY_SIZE,
            (long) (sDiskMetaRatio * sRAMCacheSize));
    config.set(OffHeapBlockCache.BLOCK_CACHE_COMPRESSION, "LZ4");

    config.setBoolean(OffHeapBlockCache.BLOCK_CACHE_PERSISTENT, sIsPersistent);

    config.set(OffHeapBlockCache.BLOCK_CACHE_DATA_ROOTS, sSystemDataDir);

    // Set L3 config 
    config.set(FileExtStorage.FILE_STORAGE_BASE_DIR, baseDir);
    // 120G
    config.set(FileExtStorage.FILE_STORAGE_MAX_SIZE, Long.toString(sDiskCacheSize));
    config.set(OffHeapBlockCache.BLOCK_CACHE_EXT_STORAGE_IMPL,
            "com.koda.integ.hbase.storage.FileExtMultiStorage");
    // 2G file size limit
    config.setLong(FileExtStorage.FILE_STORAGE_FILE_SIZE_LIMIT, sMaxFileSize);
    // 8MB buffer size 
    config.setInt(FileExtStorage.FILE_STORAGE_BUFFER_SIZE, 8 * 1024 * 1024);

    config.setInt(FileExtStorage.FILE_STORAGE_NUM_BUFFERS, 2);

    if (sIsPersistent == false) {
        //checkDir();
        deleteData();
    } else {
        // Set deserializer
        CacheableSerializer.setSerializer(ByteArrayCacheable.deserializer);
    }

    // Create block cache      
    sCache = new OffHeapBlockCache(config);

    sStorage = (FileExtMultiStorage) sCache.getExternalStorage();

}

From source file:com.koda.integ.hbase.test.OffHeapBlockCachePerfTest.java

License:Open Source License

/**
 * Sets the up./* w w  w  .j  a  v a 2 s . co m*/
 *
 * @throws Exception the exception
 */
protected static void setUp() throws Exception {

    Configuration config = new Configuration();

    // Set L2 config
    config.set(OffHeapBlockCache.BLOCK_CACHE_MEMORY_SIZE, Long.toString(sRAMCacheSize));

    config.setBoolean(OffHeapBlockCache.BLOCK_CACHE_OVERFLOW_TO_EXT_STORAGE_ENABLED, true);
    config.setLong(OffHeapBlockCache.BLOCK_CACHE_EXT_STORAGE_MEMORY_SIZE,
            (long) (sDiskMetaRatio * sRAMCacheSize));
    config.set(OffHeapBlockCache.BLOCK_CACHE_COMPRESSION, "LZ4");

    config.setBoolean(OffHeapBlockCache.BLOCK_CACHE_PERSISTENT, sIsPersistent);

    config.set(OffHeapBlockCache.BLOCK_CACHE_DATA_ROOTS, sSystemDataDir);

    // Set L3 config 
    config.set(FileExtStorage.FILE_STORAGE_BASE_DIR, baseDir);
    // 120G
    config.set(FileExtStorage.FILE_STORAGE_MAX_SIZE, Long.toString(sDiskCacheSize));
    config.set(OffHeapBlockCache.BLOCK_CACHE_EXT_STORAGE_IMPL, "com.koda.integ.hbase.storage.FileExtStorage");
    // 2G file size limit
    config.setLong(FileExtStorage.FILE_STORAGE_FILE_SIZE_LIMIT, sMaxFileSize);
    // 8MB buffer size 
    config.setInt(FileExtStorage.FILE_STORAGE_BUFFER_SIZE, 8 * 1024 * 1024);

    config.setInt(FileExtStorage.FILE_STORAGE_NUM_BUFFERS, 2);

    if (sIsPersistent == false) {
        checkDir();
    } else {
        // Set deserializer
        CacheableSerializer.setSerializer(ByteArrayCacheable.deserializer);
    }

    // Create block cache      
    sCache = new OffHeapBlockCache(config);

    sStorage = (FileExtStorage) sCache.getExternalStorage();

}

From source file:com.kse.bigdata.main.Driver.java

License:Apache License

public static void main(String[] args) throws Exception {
    /**********************************************************************************
     **    Merge the source files into one.                                          **
    /**    Should change the directories of each file before executing the program   **
    ***********************************************************************************/
    //        String inputFileDirectory = "/media/bk/??/BigData_Term_Project/Debug";
    //        String resultFileDirectory = "/media/bk/??/BigData_Term_Project/debug.csv";
    //        File resultFile = new File(resultFileDirectory);
    //        if(!resultFile.exists())
    //            new SourceFileMerger(inputFileDirectory, resultFileDirectory).mergeFiles();

    /**********************************************************************************
     * Hadoop Operation.//  ww  w  . j  a v  a  2  s.  co m
     * Befort Start, Check the Length of Sequence We Want to Predict.
     **********************************************************************************/

    Configuration conf = new Configuration();

    //Enable MapReduce intermediate compression as Snappy
    conf.setBoolean("mapred.compress.map.output", true);
    conf.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.SnappyCodec");

    //Enable Profiling
    //conf.setBoolean("mapred.task.profile", true);

    String testPath = null;
    String inputPath = null;
    String outputPath = null;

    int sampleSize = 1;
    ArrayList<String> results = new ArrayList<String>();

    for (int index = 0; index < args.length; index++) {

        /*
         * Mandatory command
         */
        //Extract input path string from command line.
        if (args[index].equals("-in"))
            inputPath = args[index + 1];

        //Extract output path string from command line.
        if (args[index].equals("-out"))
            outputPath = args[index + 1];

        //Extract test data path string from command line.
        if (args[index].equals("-test"))
            testPath = args[index + 1];

        /*
         * Optional command
         */
        //Extract a number of neighbors.
        if (args[index].equals("-nn"))
            conf.setInt(Reduce.NUMBER_OF_NEAREAST_NEIGHBOR, Integer.parseInt(args[index + 1]));

        //Whether job uses normalization or not.
        if (args[index].equals("-norm"))
            conf.setBoolean(Map.NORMALIZATION, true);

        //Extract the number of sample size to test.
        if (args[index].equals("-s"))
            sampleSize = Integer.valueOf(args[index + 1]);

        //Whether job uses mean or median
        //[Default : mean]
        if (args[index].equals("-med"))
            conf.setBoolean(Reduce.MEDIAN, true);
    }

    String outputFileName = "part-r-00000";
    SequenceSampler sampler = new SequenceSampler(testPath, sampleSize);
    LinkedList<Sequence> testSequences = sampler.getRandomSample();

    //        Test Sequence
    //        String testSeqString = "13.591-13.674-13.778-13.892-13.958-14.049-14.153-14.185-14.169-14.092-13.905-13.702-13.438-13.187-13.0-12.914-12.868-12.766-12.62-12.433-12.279-12.142-12.063-12.025-100";
    //        Sequence testSeq = new Sequence(testSeqString);
    //        LinkedList<Sequence> testSequences = new LinkedList<>();
    //        testSequences.add(testSeq);

    for (Sequence seq : testSequences) {

        /*
         ********************  Hadoop Launch ***********************
         */

        System.out.println(seq.getTailString());

        conf.set(Map.INPUT_SEQUENCE, seq.toString());

        Job job = new Job(conf);
        job.setJarByClass(Driver.class);
        job.setJobName("term-project-driver");

        job.setMapperClass(Map.class);
        job.setMapOutputKeyClass(NullWritable.class);
        job.setMapOutputValueClass(Text.class);

        //          Should think another way to implement the combiner class
        //          Current Implementation is not helpful to Job.
        //          job.setCombinerClass(Combiner.class);

        //Set 1 for number of reduce task for keeping 100 most neighbors in sorted set.
        job.setNumReduceTasks(1);
        job.setReducerClass(Reduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        FileInputFormat.setInputPaths(job, new Path(inputPath));
        FileOutputFormat.setOutputPath(job, new Path(outputPath));

        job.waitForCompletion(true);

        /*
         * if job finishes, get result of the job and store it in results(list).
         */
        try {
            FileSystem hdfs = FileSystem.get(new Configuration());
            BufferedReader fileReader = new BufferedReader(
                    new InputStreamReader(hdfs.open(new Path(outputPath + "/" + outputFileName))));

            String line;
            while ((line = fileReader.readLine()) != null) {
                results.add(seq.getSeqString() + " " + line);
            }

            fileReader.close();

            hdfs.delete(new Path(outputPath), true);
            hdfs.close();

        } catch (IOException e) {
            e.printStackTrace();
            System.exit(1);
        }
    }

    /*
     * if all jobs finish, store results of jobs to output/result.txt file.
     */
    String finalOutputPath = "output/result.csv";
    try {
        FileSystem hdfs = FileSystem.get(new Configuration());
        Path file = new Path(finalOutputPath);
        if (hdfs.exists(file)) {
            hdfs.delete(file, true);
        }

        OutputStream os = hdfs.create(file);
        PrintWriter printWriter = new PrintWriter(new OutputStreamWriter(os, "UTF-8"));

        //CSV File Header
        printWriter.println("Actual,Predicted,MER,MAE");
        printWriter.flush();

        for (String result : results) {
            String[] tokens = result.split("\\s+");

            printWriter.println(tokens[0] + "," + tokens[1] + "," + tokens[2] + "," + tokens[3]);
            printWriter.flush();
        }

        printWriter.close();
        hdfs.close();
    } catch (IOException e) {
        e.printStackTrace();
        System.exit(1);
    }

}

From source file:com.linkedin.cubert.io.rubix.RubixStorage.java

License:Open Source License

@Override
public void prepareOutput(Job job, Configuration conf, JsonNode params, BlockSchema schema, Path path) {
    Class<?> tupleClass = TupleFactory.getInstance().newTuple().getClass();
    job.setOutputKeyClass(tupleClass);//from   w ww  . ja v a2  s . c  o m
    job.setOutputValueClass(tupleClass);

    job.setOutputFormatClass(RubixOutputFormat.class);

    if (params.has("compact"))
        conf.setBoolean(CubertStrings.USE_COMPACT_SERIALIZATION,
                Boolean.parseBoolean(JsonUtils.getText(params, "compact")));
}