List of usage examples for org.apache.hadoop.conf Configuration getInt
public int getInt(String name, int defaultValue)
name
property as an int
. From source file:com.toshiba.mwcloud.gs.hadoop.mapreduce.GSRowInputFormat.java
License:Apache License
/** * <div lang="ja">// w w w . j av a 2 s.c o m * GridDB?InputSplit???????<br/> * InputSplit?????mapreduce.job.maps? ????????? * @param context JobContext * @throws GSException GridDB?????? * </div><div lang="en"> * Generate a list of GridDB InputSplit objects.<br/> * The number of InputSplits will be the smaller of the number of partitions for input processing and the value of property mapreduce.job.maps. * @param context JobContext object * @throws GSException an exception occurred in GridDB * </div> */ @Override public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); int numSplits = conf.getInt("mapreduce.job.maps", 1); GDInputFormat inputFormat = new GDInputFormat(); List<InputSplit> splits = inputFormat.getSplitList(numSplits, conf); return splits; }
From source file:com.trendmicro.hdfs.webdav.Main.java
License:Apache License
private static InetSocketAddress getAddress(Configuration conf) { return NetUtils.createSocketAddr(conf.get("hadoop.webdav.bind.address", "0.0.0.0"), conf.getInt("hadoop.webdav.port", 8080)); }
From source file:com.tuplejump.calliope.hadoop.AbstractColumnFamilyRecordWriter.java
License:Apache License
protected AbstractColumnFamilyRecordWriter(Configuration conf) { this.conf = conf; this.ringCache = new RingCache(conf); this.queueSize = conf.getInt(AbstractColumnFamilyOutputFormat.QUEUE_SIZE, 32 * FBUtilities.getAvailableProcessors()); batchThreshold = conf.getLong(AbstractColumnFamilyOutputFormat.BATCH_THRESHOLD, 32); consistencyLevel = ConsistencyLevel.valueOf(ConfigHelper.getWriteConsistencyLevel(conf)); }
From source file:com.tuplejump.calliope.hadoop.ConfigHelper.java
License:Apache License
/** * @param conf The configuration to use. * @return Value (converts MBs to Bytes) set by {@link #setThriftFramedTransportSizeInMb(org.apache.hadoop.conf.Configuration, int)} or default of 15MB *//*from www .j a va 2 s. c o m*/ public static int getThriftFramedTransportSize(Configuration conf) { return conf.getInt(THRIFT_FRAMED_TRANSPORT_SIZE_IN_MB, 15) * 1024 * 1024; // 15MB is default in Cassandra }
From source file:com.tuplejump.calliope.hadoop.cql3.CqlConfigHelper.java
License:Apache License
public static Integer getRangesInMultiRangeSplit(Configuration conf) { return conf.getInt(RANGES_PER_SPLIT, 1); }
From source file:com.twitter.algebra.matrix.format.RowPartitioner.java
License:Apache License
@Override public void setConf(Configuration conf) { this.conf = conf; totalKeys = conf.getInt(TOTAL_KEYS, -1); checkTotalKeys();/*from w w w . j a va 2s. c o m*/ }
From source file:com.twitter.algebra.matrix.multiply.AtBOuterStaticMapsideJoinJob.java
License:Apache License
public void run(Configuration conf, Path atPath, Path bPath, Path outPath, int outCardinality) throws IOException, InterruptedException, ClassNotFoundException { conf.setInt(OUT_CARD, outCardinality); @SuppressWarnings("deprecation") Job job = new Job(conf); job.setJobName(AtBOuterStaticMapsideJoinJob.class.getSimpleName()); job.setJarByClass(AtBOuterStaticMapsideJoinJob.class); FileSystem fs = FileSystem.get(atPath.toUri(), conf); atPath = fs.makeQualified(atPath);//ww w . j a v a 2s . co m bPath = fs.makeQualified(bPath); job.setInputFormatClass(CompositeInputFormat.class); //mapside join expression job.getConfiguration().set(CompositeInputFormat.JOIN_EXPR, CompositeInputFormat.compose("inner", SequenceFileInputFormat.class, atPath, bPath)); job.setOutputFormatClass(MatrixOutputFormat.class); outPath = fs.makeQualified(outPath); FileOutputFormat.setOutputPath(job, outPath); job.setMapperClass(MyMapper.class); job.setMapOutputKeyClass(IntWritable.class); job.setMapOutputValueClass(VectorWritable.class); job.setCombinerClass(MyReducer.class); int numReducers = conf.getInt("algebra.reduceslots.multiply", 10); job.setNumReduceTasks(numReducers); job.setReducerClass(MyReducer.class); job.setOutputKeyClass(IntWritable.class); job.setOutputValueClass(VectorWritable.class); job.submit(); boolean res = job.waitForCompletion(true); if (!res) throw new IOException("Job failed"); }
From source file:com.twitter.algebra.nmf.NMFCommon.java
License:Apache License
public static void setNumberOfMapSlots(Configuration conf, FileSystem fs, Path[] paths, String joblabel) { if (conf.get(MAPSPLOTS) == null) return;/*from w ww. j a v a2s . c om*/ int mapSlots = conf.getInt(MAPSPLOTS, 1); mapSlots = conf.getInt(MAPSPLOTS + "." + joblabel, mapSlots); long du = 0; try { for (Path path : paths) du += MapDir.du(path, fs); } catch (FileNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } long splitSize = du / mapSlots; log.info("du: " + du + " mapSlots: " + mapSlots + " splitSize: " + splitSize); long minSplitSize = (long) (splitSize * 0.9); long maxSplitSize = Math.max((long) (splitSize * 1.1), 1024 * 1024); conf.setLong("mapred.min.split.size", minSplitSize); conf.setLong("mapreduce.min.split.size", minSplitSize); conf.setLong("mapred.max.split.size", maxSplitSize); conf.setLong("mapreduce.max.split.size", maxSplitSize); }
From source file:com.twitter.algebra.nmf.NMFCommon.java
License:Apache License
public static int getNumberOfReduceSlots(Configuration conf, String joblabel) { int redSlots = conf.getInt(REDUCESLOTS, DEFAULT_REDUCESPLOTS); redSlots = conf.getInt(REDUCESLOTS + "." + joblabel, redSlots); return redSlots; }
From source file:com.twitter.algebra.nmf.NMFCommon.java
License:Apache License
public static int computeOptColPartitionsForMemCombiner(Configuration conf, int rows, int cols) { final int MB = 1024 * 1024; final int MEMBYTES = conf.getInt("mapreduce.map.memory.mb", 1024); int availableMem = (MEMBYTES - 512 /* jvm */) / 2; //use only half for combiner int colParts = (int) (rows / (float) availableMem / MB * cols * 8); /*bytes per double element*/ return colParts; }