Example usage for org.apache.hadoop.conf Configuration clear

List of usage examples for org.apache.hadoop.conf Configuration clear

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration clear.

Prototype

public void clear() 

Source Link

Document

Clears all keys from the configuration.

Usage

From source file:co.cask.cdap.internal.app.runtime.batch.stream.StreamInputFormatProvider.java

License:Apache License

/**
 * Sets the {@link StreamEventDecoder} to be used by the InputFormat for the given type. If the
 * {@link Input.StreamInput} already defined a {@link StreamEventDecoder} or {@link FormatSpecification},
 * this method is a no-op.//w ww .  jav  a  2  s  . c o m
 *
 * @param configuration configuration to update
 * @param type type for {@link StreamEventData} to decode to
 * @return the same configuration map as in the argument.
 */
public Map<String, String> setDecoderType(Map<String, String> configuration, Type type) {
    if (streamInput.getBodyFormatSpec() == null && streamInput.getDecoderType() == null) {
        Configuration hConf = new Configuration();
        hConf.clear();
        AbstractStreamInputFormat.inferDecoderClass(hConf, type);
        configuration.putAll(ConfigurationUtil.toMap(hConf));
    }
    return configuration;
}

From source file:co.cask.cdap.internal.app.runtime.batch.stream.StreamInputFormatProvider.java

License:Apache License

@Override
public Map<String, String> getInputFormatConfiguration() {
    try {/*from  w ww  .j a v  a 2 s  .c  o m*/
        StreamConfig streamConfig = streamAdmin.getConfig(streamId);
        Location streamPath = StreamUtils.createGenerationLocation(streamConfig.getLocation(),
                StreamUtils.getGeneration(streamConfig));
        Configuration hConf = new Configuration();
        hConf.clear();

        AbstractStreamInputFormat.setStreamId(hConf, streamId);
        AbstractStreamInputFormat.setTTL(hConf, streamConfig.getTTL());
        AbstractStreamInputFormat.setStreamPath(hConf, streamPath.toURI());
        AbstractStreamInputFormat.setTimeRange(hConf, streamInput.getStartTime(), streamInput.getEndTime());
        FormatSpecification formatSpec = streamInput.getBodyFormatSpec();
        if (formatSpec != null) {
            AbstractStreamInputFormat.setBodyFormatSpecification(hConf, formatSpec);
        } else {
            String decoderType = streamInput.getDecoderType();
            if (decoderType != null) {
                AbstractStreamInputFormat.setDecoderClassName(hConf, decoderType);
            }
        }

        return ConfigurationUtil.toMap(hConf);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.internal.app.runtime.spark.AbstractSparkContext.java

License:Apache License

/**
 * Adds the supplied {@link Configuration} file as an resource
 * This configuration is needed to read/write {@link Dataset} using {@link DataSetInputFormat}/{@link
 * DataSetOutputFormat} by {@link JavaSparkContext#readFromDataset(String, Class, Class)} or
 * {@link ScalaSparkContext#readFromDataset(String, Class, Class)}
 * This function requires that the hConf.xml file containing {@link Configuration} is present in the job jar.
 *//*from  ww w.ja v a2s  .  co m*/
private Configuration loadHConf() {
    // TODO: Inject through Guice in Distributed mode, see CDAP-3
    Configuration hConf = new Configuration();
    hConf.clear();

    URL url = Thread.currentThread().getContextClassLoader()
            .getResource(SparkRuntimeService.SPARK_HCONF_FILENAME);
    if (url == null) {
        LOG.error("Unable to find Hadoop Configuration file {} in the submitted jar.",
                SparkRuntimeService.SPARK_HCONF_FILENAME);
        throw new RuntimeException(
                "Hadoop Configuration file not found in the supplied jar. Please include Hadoop "
                        + "Configuration file with name " + SparkRuntimeService.SPARK_HCONF_FILENAME);
    }
    hConf.addResource(url);
    return hConf;
}

From source file:co.cask.cdap.logging.run.LogSaverTwillRunnable.java

License:Apache License

@Override
public void initialize(TwillContext context) {
    super.initialize(context);

    completion = SettableFuture.create();
    name = context.getSpecification().getName();
    Map<String, String> configs = context.getSpecification().getConfigs();

    LOG.info("Initialize runnable: " + name);
    try {//from w  w w . j a v  a 2  s  .  com
        // Load configuration
        Configuration hConf = new Configuration();
        hConf.clear();
        hConf.addResource(new File(configs.get("hConf")).toURI().toURL());

        UserGroupInformation.setConfiguration(hConf);

        CConfiguration cConf = CConfiguration.create(new File(configs.get("cConf")));

        cConf.set(Constants.LogSaver.ADDRESS, context.getHost().getCanonicalHostName());

        // Initialize ZK client
        String zookeeper = cConf.get(Constants.Zookeeper.QUORUM);
        if (zookeeper == null) {
            LOG.error("No ZooKeeper quorum provided.");
            throw new IllegalStateException("No ZooKeeper quorum provided.");
        }

        Injector injector = createGuiceInjector(cConf, hConf);
        zkClientService = injector.getInstance(ZKClientService.class);
        kafkaClientService = injector.getInstance(KafkaClientService.class);
        logSaverService = injector.getInstance(KafkaLogSaverService.class);

        int numPartitions = Integer.parseInt(
                cConf.get(LoggingConfiguration.NUM_PARTITIONS, LoggingConfiguration.DEFAULT_NUM_PARTITIONS));
        LOG.info("Num partitions = {}", numPartitions);

        logSaverStatusService = injector.getInstance(LogSaverStatusService.class);
        metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
        LOG.info("Runnable initialized: " + name);
    } catch (Throwable t) {
        LOG.error(t.getMessage(), t);
        throw Throwables.propagate(t);
    }
}

From source file:co.cask.hydrator.common.batch.JobUtils.java

License:Apache License

/**
 * Creates a new instance of {@link Job}. Note that the job created is not meant for actual MR
 * submission. It's just for setting up configurations.
 */// www.  j  ava 2s . c o  m
public static Job createInstance() throws IOException {
    Job job = Job.getInstance();
    Configuration conf = job.getConfiguration();
    conf.clear();

    if (UserGroupInformation.isSecurityEnabled()) {
        // If runs in secure cluster, this program runner is running in a yarn container, hence not able
        // to get authenticated with the history.
        conf.unset("mapreduce.jobhistory.address");
        conf.setBoolean(Job.JOB_AM_ACCESS_DISABLED, false);

        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        job.getCredentials().addAll(credentials);
    }

    return job;
}

From source file:co.cask.hydrator.plugin.batch.source.BatchCassandraSource.java

License:Apache License

@Override
public void prepareRun(BatchSourceContext context) throws Exception {
    Configuration conf = new Configuration();
    conf.clear();

    ConfigHelper.setInputColumnFamily(conf, config.keyspace, config.columnFamily);
    ConfigHelper.setInputInitialAddress(conf, config.initialAddress);
    ConfigHelper.setInputPartitioner(conf, config.partitioner);
    ConfigHelper.setInputRpcPort(conf, (config.port == null) ? "9160" : Integer.toString(config.port));
    Preconditions/*from w  w w  . j  a  va  2s .  c om*/
            .checkArgument(!(Strings.isNullOrEmpty(config.username) ^ Strings.isNullOrEmpty(config.password)),
                    "You must either set both username and password or neither username nor password. "
                            + "Currently, they are username: " + config.username + " and password: "
                            + config.password);
    if (!Strings.isNullOrEmpty(config.username)) {
        ConfigHelper.setInputKeyspaceUserNameAndPassword(conf, config.username, config.password);
    }

    if (!Strings.isNullOrEmpty(config.properties)) {
        for (String pair : config.properties.split(",")) {
            // the key and value of properties might have spaces so remove only leading and trailing ones
            conf.set(CharMatcher.WHITESPACE.trimFrom(pair.split(":")[0]),
                    CharMatcher.WHITESPACE.trimFrom(pair.split(":")[1]));
        }
    }
    CqlConfigHelper.setInputCql(conf, config.query);
    context.setInput(Input.of(config.referenceName, new SourceInputFormatProvider(CqlInputFormat.class, conf)));
}

From source file:co.cask.hydrator.plugin.batch.source.MongoDBBatchSource.java

License:Apache License

@Override
public void prepareRun(BatchSourceContext context) throws Exception {
    Configuration conf = new Configuration();
    conf.clear();

    MongoConfigUtil.setInputFormat(conf, MongoInputFormat.class);
    MongoConfigUtil.setInputURI(conf, config.connectionString);
    if (!Strings.isNullOrEmpty(config.inputQuery)) {
        MongoConfigUtil.setQuery(conf, config.inputQuery);
    }//  w  ww.ja v  a  2s .c  o m
    if (!Strings.isNullOrEmpty(config.authConnectionString)) {
        MongoConfigUtil.setAuthURI(conf, config.authConnectionString);
    }
    if (!Strings.isNullOrEmpty(config.inputFields)) {
        MongoConfigUtil.setFields(conf, config.inputFields);
    }
    if (!Strings.isNullOrEmpty(config.splitterClass)) {
        String className = String.format("%s.%s", StandaloneMongoSplitter.class.getPackage().getName(),
                config.splitterClass);
        Class<? extends MongoSplitter> klass = getClass().getClassLoader().loadClass(className)
                .asSubclass(MongoSplitter.class);
        MongoConfigUtil.setSplitterClass(conf, klass);
    }

    context.setInput(Input.of(config.referenceName,
            new SourceInputFormatProvider(MongoConfigUtil.getInputFormat(conf), conf)));
}

From source file:co.cask.hydrator.plugin.db.batch.source.DBSource.java

License:Apache License

@Override
public void prepareRun(BatchSourceContext context) throws Exception {
    sourceConfig.substituteMacros(context);
    LOG.debug(//from   w  w w . j  a va2 s  . c om
            "pluginType = {}; pluginName = {}; connectionString = {}; importQuery = {}; "
                    + "boundingQuery = {}",
            sourceConfig.jdbcPluginType, sourceConfig.jdbcPluginName, sourceConfig.connectionString,
            sourceConfig.getImportQuery(), sourceConfig.getBoundingQuery());
    Configuration hConf = new Configuration();
    hConf.clear();

    // Load the plugin class to make sure it is available.
    Class<? extends Driver> driverClass = context.loadPluginClass(getJDBCPluginId());
    if (sourceConfig.user == null && sourceConfig.password == null) {
        DBConfiguration.configureDB(hConf, driverClass.getName(), sourceConfig.connectionString);
    } else {
        DBConfiguration.configureDB(hConf, driverClass.getName(), sourceConfig.connectionString,
                sourceConfig.user, sourceConfig.password);
    }
    DataDrivenETLDBInputFormat.setInput(hConf, DBRecord.class, sourceConfig.getImportQuery(),
            sourceConfig.getBoundingQuery(), sourceConfig.getEnableAutoCommit());
    if (sourceConfig.numSplits == null || sourceConfig.numSplits != 1) {
        hConf.set(DBConfiguration.INPUT_ORDER_BY_PROPERTY, sourceConfig.splitBy);
    }
    if (sourceConfig.numSplits != null) {
        hConf.setInt(MRJobConfig.NUM_MAPS, sourceConfig.numSplits);
    }
    context.setInput(Input.of(sourceConfig.referenceName,
            new SourceInputFormatProvider(DataDrivenETLDBInputFormat.class, hConf)));
}

From source file:co.cask.hydrator.plugin.source.HBaseSource.java

License:Apache License

@Override
public void prepareRun(BatchSourceContext context) throws Exception {
    Configuration conf = new Configuration();
    String ioSerializations = conf.get("io.serializations");
    conf.clear();

    conf.set(TableInputFormat.INPUT_TABLE, config.tableName);
    conf.set(TableInputFormat.SCAN_COLUMN_FAMILY, config.columnFamily);
    String zkQuorum = !Strings.isNullOrEmpty(config.zkQuorum) ? config.zkQuorum : "localhost";
    String zkClientPort = !Strings.isNullOrEmpty(config.zkClientPort) ? config.zkClientPort : "2181";
    conf.set("hbase.zookeeper.quorum", zkQuorum);
    conf.set("hbase.zookeeper.property.clientPort", zkClientPort);
    conf.setStrings(ioSerializations, MutationSerialization.class.getName(),
            ResultSerialization.class.getName(), KeyValueSerialization.class.getName());
    context.setInput(Input.of(config.referenceName, new SourceInputFormatProvider(TableInputFormat.class, conf))
            .alias(config.columnFamily));
}

From source file:com.bah.culvert.util.ConfUtils.java

License:Apache License

/**
 * Unpack a configuration that has been prefixed. A '.' character must be used
 * to separate the prefixed keys from the prefix.
 * /*from  w w  w . ja v a  2 s.  co  m*/
 * @param prefix The prefix to unpack.
 * @param toUnpackFrom The configuration to unpack from.
 * @return The unpacked configuration.
 */
public static Configuration unpackConfigurationInPrefix(String prefix, Configuration toUnpackFrom) {
    Configuration conf = new Configuration(false);
    conf.clear();
    prefix += ".";
    for (Entry<String, String> entry : toUnpackFrom) {
        String key = entry.getKey();
        if (key.startsWith(prefix)) {
            key = key.substring(prefix.length());
            conf.set(key, entry.getValue());
        }
    }
    return conf;
}