Example usage for org.apache.hadoop.conf Configuration unset

List of usage examples for org.apache.hadoop.conf Configuration unset

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration unset.

Prototype

public synchronized void unset(String name) 

Source Link

Document

Unset a previously set property.

Usage

From source file:com.google.cloud.bigtable.hbase.TestBigtableOptionsFactory.java

License:Open Source License

@Test
public void testHostIsRequired() throws IOException {
    Configuration configuration = new Configuration(false);
    configuration.unset(BigtableOptionsFactory.BIGTABLE_HOST_KEY);

    expectedException.expect(IllegalArgumentException.class);
    BigtableOptionsFactory.fromConfiguration(configuration);
}

From source file:com.google.cloud.bigtable.hbase.TestBigtableOptionsFactory.java

License:Open Source License

@Test
public void testClusterIsRequired() throws IOException {
    Configuration configuration = new Configuration(false);
    configuration.unset(BigtableOptionsFactory.CLUSTER_KEY);

    expectedException.expect(IllegalArgumentException.class);
    BigtableOptionsFactory.fromConfiguration(configuration);
}

From source file:com.google.cloud.bigtable.hbase.TestBigtableOptionsFactory.java

License:Open Source License

@Test
public void testZoneIsRequired() throws IOException {
    Configuration configuration = new Configuration(false);
    configuration.unset(BigtableOptionsFactory.ZONE_KEY);

    expectedException.expect(IllegalArgumentException.class);
    BigtableOptionsFactory.fromConfiguration(configuration);
}

From source file:com.ikanow.aleph2.analytics.hadoop.assets.VerySimpleLocalExample.java

License:Apache License

@SuppressWarnings({ "deprecation", "unchecked", "rawtypes" })
@Test//from ww  w  .j a  v  a2  s .  c  om
public void test_localHadoopLaunch()
        throws IOException, IllegalStateException, ClassNotFoundException, InterruptedException {

    // 0) Setup the temp dir 
    final String temp_dir = System.getProperty("java.io.tmpdir") + File.separator;
    //final Path tmp_path = FileContext.getLocalFSFileContext().makeQualified(new Path(temp_dir));
    final Path tmp_path2 = FileContext.getLocalFSFileContext()
            .makeQualified(new Path(temp_dir + "/tmp_output"));
    try {
        FileContext.getLocalFSFileContext().delete(tmp_path2, true);
    } catch (Exception e) {
    } // (just doesn't exist yet)

    // 1) Setup config with local mode
    final Configuration config = new Configuration();
    config.setBoolean("mapred.used.genericoptionsparser", true); // (just stops an annoying warning from appearing)
    config.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");
    config.set("mapred.job.tracker", "local");
    config.set("fs.defaultFS", "local");
    config.unset("mapreduce.framework.name");

    // If running locally, turn "snappy" off - tomcat isn't pointing its native library path in the right place
    config.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.DefaultCodec");

    // 2) Build job and do more setup using the Job API
    //TODO: not sure why this is deprecated, it doesn't seem to be in v1? We do need to move to JobConf at some point, but I ran into some 
    // issues when trying to do everything I needed to for V1, so seems expedient to start here and migrate away
    final Job hj = new Job(config); // (NOTE: from here, changes to config are ignored)

    // Input format:
    //TOOD: fails because of guava issue, looks like we'll need to move to 2.7 and check it works with 2.5.x server?
    //TextInputFormat.addInputPath(hj, tmp_path);
    //hj.setInputFormatClass((Class<? extends InputFormat>) Class.forName ("org.apache.hadoop.mapreduce.lib.input.TextInputFormat"));
    hj.setInputFormatClass(TestInputFormat.class);

    // Output format:
    hj.setOutputFormatClass((Class<? extends OutputFormat>) Class
            .forName("org.apache.hadoop.mapreduce.lib.output.TextOutputFormat"));
    TextOutputFormat.setOutputPath(hj, tmp_path2);

    // Mapper etc (combiner/reducer are similar)
    hj.setMapperClass(TestMapper.class);
    hj.setOutputKeyClass(Text.class);
    hj.setOutputValueClass(Text.class);
    hj.setNumReduceTasks(0); // (disable reducer for now)

    hj.setJar("test");

    try {
        hj.submit();
    } catch (UnsatisfiedLinkError e) {
        throw new RuntimeException(
                "This is a windows/hadoop compatibility problem - adding the hadoop-commons in the misc_test_assets subdirectory to the top of the classpath should resolve it (and does in V1), though I haven't yet made that work with Aleph2",
                e);
    }
    //hj.getJobID().toString();
    while (!hj.isComplete()) {
        Thread.sleep(1000);
    }
    assertTrue("Finished successfully", hj.isSuccessful());
}

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyCommitter.java

License:Apache License

@Test
public void testPreserveStatus() {
    TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);

    JobContext jobContext = Mockito.mock(JobContext.class);
    Mockito.when(jobContext.getConfiguration()).thenReturn(config);
    JobID jobID = new JobID();
    Mockito.when(jobContext.getJobID()).thenReturn(jobID);
    Configuration conf = jobContext.getConfiguration();

    String sourceBase;//from   www. jav  a  2  s. co  m
    String targetBase;
    FileSystem fs = null;
    try {
        OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
        fs = FileSystem.get(conf);
        FsPermission sourcePerm = new FsPermission((short) 511);
        FsPermission initialPerm = new FsPermission((short) 448);
        sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm);
        targetBase = TestDistCpUtils.createTestSetup(fs, initialPerm);

        DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)), new Path("/out"));
        options.preserve(FileAttribute.PERMISSION);
        options.appendToConf(conf);

        CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
        Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
        listing.buildListing(listingFile, options);

        conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);

        committer.commitJob(jobContext);
        if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
            Assert.fail("Permission don't match");
        }

        //Test for idempotent commit
        committer.commitJob(jobContext);
        if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
            Assert.fail("Permission don't match");
        }

    } catch (IOException e) {
        LOG.error("Exception encountered while testing for preserve status", e);
        Assert.fail("Preserve status failure");
    } finally {
        TestDistCpUtils.delete(fs, "/tmp1");
        conf.unset(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
    }

}

From source file:io.prestosql.plugin.hive.HdfsConfigurationInitializer.java

License:Apache License

public static void configureCompression(Configuration config, HiveCompressionCodec compressionCodec) {
    boolean compression = compressionCodec != HiveCompressionCodec.NONE;
    config.setBoolean(COMPRESSRESULT.varname, compression);
    config.setBoolean("mapred.output.compress", compression);
    config.setBoolean(FileOutputFormat.COMPRESS, compression);
    // For DWRF/*from  w w  w.  java 2s .c om*/
    com.facebook.hive.orc.OrcConf.setVar(config, HIVE_ORC_COMPRESSION,
            compressionCodec.getOrcCompressionKind().name());
    // For ORC
    OrcConf.COMPRESS.setString(config, compressionCodec.getOrcCompressionKind().name());
    // For RCFile and Text
    if (compressionCodec.getCodec().isPresent()) {
        config.set("mapred.output.compression.codec", compressionCodec.getCodec().get().getName());
        config.set(FileOutputFormat.COMPRESS_CODEC, compressionCodec.getCodec().get().getName());
    } else {
        config.unset("mapred.output.compression.codec");
        config.unset(FileOutputFormat.COMPRESS_CODEC);
    }
    // For Parquet
    config.set(ParquetOutputFormat.COMPRESSION, compressionCodec.getParquetCompressionCodec().name());
    // For SequenceFile
    config.set(FileOutputFormat.COMPRESS_TYPE, BLOCK.toString());
}

From source file:org.apache.crunch.kafka.inputformat.KafkaInputFormatIT.java

License:Apache License

@Test(expected = IllegalStateException.class)
public void getOffsetsFromConfigMissingStart() {
    Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>();
    Set<String> topics = new HashSet<>();

    int numPartitions = 10;
    int numTopics = 10;
    for (int j = 0; j < numTopics; j++) {
        String topic = testName.getMethodName() + ".partitions" + j;
        topics.add(topic);//  w  ww  . java2s. c  o m
        for (int i = 0; i < numPartitions; i++) {
            TopicPartition tAndP = new TopicPartition(topic, i);
            offsets.put(tAndP, Pair.of((long) i, i * 10L));
        }
    }

    Configuration config = new Configuration(false);

    KafkaInputFormat.writeOffsetsToConfiguration(offsets, config);

    config.unset("org.apache.crunch.kafka.offsets.topic." + topics.iterator().next() + ".partitions.0.start");

    Map<TopicPartition, Pair<Long, Long>> returnedOffsets = KafkaInputFormat.getOffsets(config);
}

From source file:org.apache.crunch.kafka.inputformat.KafkaInputFormatIT.java

License:Apache License

@Test(expected = IllegalStateException.class)
public void getOffsetsFromConfigMissingEnd() {
    Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>();
    Set<String> topics = new HashSet<>();

    int numPartitions = 10;
    int numTopics = 10;
    for (int j = 0; j < numTopics; j++) {
        String topic = testName.getMethodName() + ".partitions" + j;
        topics.add(topic);// ww w .  ja va2s .c o  m
        for (int i = 0; i < numPartitions; i++) {
            TopicPartition tAndP = new TopicPartition(topic, i);
            offsets.put(tAndP, Pair.of((long) i, i * 10L));
        }
    }

    Configuration config = new Configuration(false);

    KafkaInputFormat.writeOffsetsToConfiguration(offsets, config);

    config.unset("org.apache.crunch.kafka.offsets.topic." + topics.iterator().next() + ".partitions.0.end");

    Map<TopicPartition, Pair<Long, Long>> returnedOffsets = KafkaInputFormat.getOffsets(config);
}

From source file:org.apache.gobblin.compaction.CliOptions.java

License:Apache License

/**
 * Parse command line arguments and return a {@link java.util.Properties} object for the Gobblin job found.
 * @param caller Class of the calling main method. Used for error logs.
 * @param args Command line arguments./*from   ww w  . j  a v a  2 s  . co  m*/
 * @param conf Hadoop configuration object
 * @return Instance of {@link Properties} for the Gobblin job to run.
 * @throws IOException
 */
public static Properties parseArgs(Class<?> caller, String[] args, Configuration conf) throws IOException {
    try {

        // Parse command-line options
        if (conf != null) {
            args = new GenericOptionsParser(conf, args).getCommandLine().getArgs();
        }
        CommandLine cmd = new DefaultParser().parse(options(), args);

        if (cmd.hasOption(HELP_OPTION.getOpt())) {
            printUsage(caller);
            System.exit(0);
        }

        String jobConfigLocation = JOB_CONFIG_OPTION.getLongOpt();
        if (!cmd.hasOption(jobConfigLocation)) {
            printUsage(caller);
            System.exit(1);
        }

        // Load job configuration properties
        Properties jobConfig;
        if (conf == null) {
            jobConfig = JobConfigurationUtils.fileToProperties(cmd.getOptionValue(jobConfigLocation));
        } else {
            jobConfig = JobConfigurationUtils.fileToProperties(cmd.getOptionValue(jobConfigLocation), conf);
            for (String configKey : jobConfig.stringPropertyNames()) {
                if (conf.get(configKey) != null) {
                    conf.unset(configKey);
                }
            }
            JobConfigurationUtils.putConfigurationIntoProperties(conf, jobConfig);
        }
        return jobConfig;
    } catch (ParseException | ConfigurationException e) {
        throw new IOException(e);
    }
}

From source file:org.apache.lens.cube.parse.TestJoinResolver.java

License:Apache License

@Test
public void testChainsWithMultipleStorage() throws ParseException, HiveException, LensException {
    Configuration conf = new Configuration(hconf);
    conf.unset(CubeQueryConfUtil.DRIVER_SUPPORTED_STORAGES); // supports all storages
    String dimOnlyQuery = "select testDim2.name, testDim2.cityStateCapital FROM testDim2 where "
            + TWO_DAYS_RANGE;/*  ww  w .  j  a  v a 2 s  .c  o m*/
    CubeQueryRewriter driver = new CubeQueryRewriter(conf, hconf);
    CubeQueryContext rewrittenQuery = driver.rewrite(dimOnlyQuery);
    rewrittenQuery.toHQL();
    Dimension citydim = CubeMetastoreClient.getInstance(hconf).getDimension("citydim");
    Set<String> cdimTables = new HashSet<>();
    for (CandidateDim cdim : rewrittenQuery.getCandidateDims().get(citydim)) {
        cdimTables.add(cdim.getName());
    }
    Assert.assertTrue(cdimTables.contains("citytable"));
    Assert.assertTrue(cdimTables.contains("citytable2"));
    Assert.assertFalse(cdimTables.contains("citytable3"));
    Assert.assertFalse(cdimTables.contains("citytable4"));
}