List of usage examples for org.apache.hadoop.conf Configuration set
public void set(String name, String value)
value
of the name
property. From source file:com.aerospike.hadoop.mapreduce.AerospikeConfigUtil.java
License:Apache License
public static void setInputNumRangeBin(Configuration conf, String binname) { log.info("setting " + INPUT_NUMRANGE_BIN + " to " + binname); conf.set(INPUT_NUMRANGE_BIN, binname); }
From source file:com.aerospike.hadoop.mapreduce.AerospikeConfigUtil.java
License:Apache License
public static void setOutputHost(Configuration conf, String host) { log.info("setting " + OUTPUT_HOST + " to " + host); conf.set(OUTPUT_HOST, host); }
From source file:com.aerospike.hadoop.mapreduce.AerospikeConfigUtil.java
License:Apache License
public static void setOutputNamespace(Configuration conf, String namespace) { log.info("setting " + OUTPUT_NAMESPACE + " to " + namespace); conf.set(OUTPUT_NAMESPACE, namespace); }
From source file:com.aerospike.hadoop.mapreduce.AerospikeConfigUtil.java
License:Apache License
public static void setOutputSetName(Configuration conf, String setname) { log.info("setting " + OUTPUT_SETNAME + " to " + setname); conf.set(OUTPUT_SETNAME, setname); }
From source file:com.aerospike.hadoop.mapreduce.AerospikeConfigUtil.java
License:Apache License
public static void setOutputBinName(Configuration conf, String binname) { log.info("setting " + OUTPUT_BINNAME + " to " + binname); conf.set(OUTPUT_BINNAME, binname); }
From source file:com.aerospike.hadoop.mapreduce.AerospikeConfigUtil.java
License:Apache License
public static void setOutputKeyName(Configuration conf, String keyname) { log.info("setting " + OUTPUT_KEYNAME + " to " + keyname); conf.set(OUTPUT_KEYNAME, keyname); }
From source file:com.ailk.oci.ocnosql.tools.load.csvbulkload.CsvBulkImportUtil.java
License:Apache License
/** * Configure a job configuration for a bulk CSV import. * * @param conf job configuration to be set up * @param tableName name of the table to be imported to, can include a schema name * @param fieldDelimiter field delimiter character for the CSV input * @param arrayDelimiter array delimiter character, can be null * @param columnInfoList list of columns to be imported * @param ignoreInvalidRows flag to ignore invalid input rows *//* w w w . j a v a 2 s . c o m*/ public static void initCsvImportJob(Configuration conf, String tableName, char fieldDelimiter, String arrayDelimiter, List<ColumnInfo> columnInfoList, boolean ignoreInvalidRows) { Preconditions.checkNotNull(tableName); Preconditions.checkNotNull(columnInfoList); Preconditions.checkArgument(!columnInfoList.isEmpty(), "Column info list is empty"); conf.set(PhoenixCsvToKeyValueMapper.TABLE_NAME_CONFKEY, tableName); conf.set(PhoenixCsvToKeyValueMapper.FIELD_DELIMITER_CONFKEY, String.valueOf(fieldDelimiter)); if (arrayDelimiter != null) { conf.set(PhoenixCsvToKeyValueMapper.ARRAY_DELIMITER_CONFKEY, arrayDelimiter); } PhoenixCsvToKeyValueMapper.configureColumnInfoList(conf, columnInfoList); conf.setBoolean(PhoenixCsvToKeyValueMapper.IGNORE_INVALID_ROW_CONFKEY, ignoreInvalidRows); }
From source file:com.ailk.oci.ocnosql.tools.load.csvbulkload.CsvBulkLoadTool.java
License:Apache License
/** * Set configuration values based on parsed command line options. * * @param cmdLine supplied command line options * @param importColumns descriptors of columns to be imported * @param conf job configuration/*w ww . j av a 2s.c o m*/ */ @VisibleForTesting static void configureOptions(CommandLine cmdLine, List<ColumnInfo> importColumns, Configuration conf) { char delimiterChar = ','; if (cmdLine.hasOption(DELIMITER_OPT.getOpt())) { String delimString = cmdLine.getOptionValue(DELIMITER_OPT.getOpt()); if (delimString.length() != 1) { throw new IllegalArgumentException("Illegal delimiter character: " + delimString); } delimiterChar = delimString.charAt(0); } /* if (cmdLine.hasOption(ZK_QUORUM_OPT.getOpt())) { String zkQuorum = cmdLine.getOptionValue(ZK_QUORUM_OPT.getOpt()); LOG.info("Configuring ZK quorum to {}", zkQuorum); conf.set(HConstants.ZOOKEEPER_QUORUM, zkQuorum); } */ //?Configuration String rpCols = cmdLine.getOptionValue(ROW_PREFIX_COLUMNS_OPT.getOpt()); LOG.info("Configuring row prefix columns to {}", rpCols); conf.set(PhoenixCsvToKeyValueMapper.ROW_PREFIX_COLUMNS, rpCols); if (cmdLine.hasOption(ROW_PREFIX_ALG_OPT.getOpt())) { String rowPrefixAlg = cmdLine.getOptionValue(ROW_PREFIX_ALG_OPT.getOpt()); LOG.info("Configuring row prefix alg to {}", rowPrefixAlg); conf.set(PhoenixCsvToKeyValueMapper.ROW_PREFIX_ALG, rowPrefixAlg); } String rCols = cmdLine.getOptionValue(ROW_COLUMNS_OPT.getOpt()); LOG.info("Configuring row columns to {}", rCols); conf.set(PhoenixCsvToKeyValueMapper.ROW_COLUMNS, rCols); if (cmdLine.hasOption(UNIQUE_INDEX_COLUMNS_OPT.getOpt())) { String uniqueIndexColumns = cmdLine.getOptionValue(UNIQUE_INDEX_COLUMNS_OPT.getOpt()); LOG.info("Configuring unique index columns to {}", uniqueIndexColumns); conf.set(PhoenixCsvToKeyValueMapper.UNIQUE_INDEX_COLUMNS, uniqueIndexColumns); } CsvBulkImportUtil.initCsvImportJob(conf, getQualifiedTableName(cmdLine.getOptionValue(SCHEMA_NAME_OPT.getOpt()), cmdLine.getOptionValue(TABLE_NAME_OPT.getOpt())), delimiterChar, cmdLine.getOptionValue(ARRAY_DELIMITER_OPT.getOpt()), importColumns, cmdLine.hasOption(IGNORE_ERRORS_OPT.getOpt())); }
From source file:com.ailk.oci.ocnosql.tools.load.csvbulkload.PhoenixCsvToKeyValueMapper.java
License:Apache License
/** * Write the list of to-import columns to a job configuration. * //from ww w.j a v a2 s . c o m * @param conf * configuration to be written to * @param columnInfoList * list of ColumnInfo objects to be configured for import */ @VisibleForTesting static void configureColumnInfoList(Configuration conf, List<ColumnInfo> columnInfoList) { conf.set(COLUMN_INFO_CONFKEY, Joiner.on("|").useForNull("").join(columnInfoList)); }
From source file:com.ailk.oci.ocnosql.tools.load.mutiple.MutipleColumnImportTsv.java
License:Apache License
/** * Sets up the actual job./*from ww w. j a va 2 s . co m*/ * * @param conf The current configuration. * @return The newly created job. * @throws IOException When setting up the job fails. */ public static Job createSubmittableJob(Configuration conf, String tableName, String inputPath, String tmpOutputPath) throws IOException, ClassNotFoundException { // Support non-XML supported characters // by re-encoding the passed separator as a Base64 string. String actualSeparator = conf.get(CommonConstants.SEPARATOR); if (actualSeparator != null) { conf.set(CommonConstants.SEPARATOR, Base64.encodeBytes(actualSeparator.getBytes())); } String tableNameConf = conf.get(CommonConstants.TABLE_NAME); if (tableNameConf == null) { conf.set(CommonConstants.TABLE_NAME, tableName); } // See if a non-default Mapper was set String mapperClassName = conf.get(MAPPER_CONF_KEY); Class mapperClass = mapperClassName != null ? Class.forName(mapperClassName) : DEFAULT_MAPPER; Path inputDir = new Path(inputPath); Job job = new Job(conf, NAME + "_" + tableName); job.setJarByClass(MutipleColumnImportTsv.class); FileInputFormat.setInputPaths(job, inputDir); //??Dimporttsv.inputFormatInputFormat,TextInputFormat String inputFmtName = conf.get(CommonConstants.INPUTFORMAT, "org.apache.hadoop.mapreduce.lib.input.TextInputFormat"); LOG.info(CommonConstants.INPUTFORMAT + " is " + inputFmtName); Class<? extends InputFormat> inputFmtClass = Class.forName(inputFmtName).asSubclass(InputFormat.class); job.setInputFormatClass(inputFmtClass); job.setMapperClass(mapperClass); String hfileOutPath = tmpOutputPath; if (hfileOutPath != null) { if (!doesTableExist(tableName)) { createTable(conf, tableName); } HTable table = new HTable(conf, tableName); // job.setReducerClass(MutipleColumnReducer.class); Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); job.setMapOutputKeyClass(ImmutableBytesWritable.class); job.setMapOutputValueClass(Put.class); HFileOutputFormat.configureIncrementalLoad(job, table); } else { // No reducers. Just write straight to table. Call initTableReducerJob // to set up the TableOutputFormat. TableMapReduceUtil.initTableReducerJob(tableName, null, job); job.setNumReduceTasks(0); } TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.addDependencyJars(job.getConfiguration(), com.google.common.base.Function.class /* Guava used by TsvParser */); return job; }