List of usage examples for org.apache.hadoop.conf Configuration getInt
public int getInt(String name, int defaultValue)
name
property as an int
. From source file:com.avira.couchdoop.exp.CouchbaseOutputFormat.java
License:Apache License
public RecordWriter<String, CouchbaseAction> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); ExportArgs args;/*from w ww . j a v a 2 s. c o m*/ try { args = new ExportArgs(conf); } catch (ArgsException e) { throw new IllegalArgumentException(e); } CouchbaseRecordWriter couchbaseRecordWriter = new CouchbaseRecordWriter(args.getUrls(), args.getBucket(), args.getPassword()); couchbaseRecordWriter .setExpBackoffMaxTries(conf.getInt(CONF_EXP_BACKOFF_MAX_TRIES_PER_TASK, EXP_BACKOFF_MAX_TRIES)); couchbaseRecordWriter.setExpBackoffMaxRetryInterval( conf.getInt(CONF_EXP_BACKOFF_MAX_RETRY_INTERVAL_PER_TASK, EXP_BACKOFF_MAX_RETRY_INTERVAL)); couchbaseRecordWriter.setExpBackoffMaxTotalTimeout( conf.getInt(CONF_EXP_BACKOFF_MAX_TOTAL_TIMEOUT_PER_TASK, EXP_BACKOFF_MAX_TOTAL_TIMEOUT)); return couchbaseRecordWriter; }
From source file:com.avira.couchdoop.imp.ImportViewArgs.java
License:Apache License
@Override public void loadFromHadoopConfiguration(Configuration conf) throws ArgsException { super.loadFromHadoopConfiguration(conf); designDocumentName = conf.get(ARG_DESIGNDOC_NAME.getPropertyName()); viewName = conf.get(ARG_VIEW_NAME.getPropertyName()); viewKeys = parseViewKeys(conf);/*ww w . j a v a2s . co m*/ output = conf.get(ARG_OUTPUT.getPropertyName()); documentsPerPage = conf.getInt(ARG_DOCS_PER_PAGE.getPropertyName(), 1024); //numMappers default to the number of viewKeys numMappers = conf.getInt(ARG_NUM_MAPPERS.getPropertyName(), viewKeys.length); }
From source file:com.avira.couchdoop.update.CouchbaseUpdateMapper.java
License:Apache License
@Override protected void setup(Context context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); ExportArgs args;/* w ww. j a v a 2 s.c om*/ try { args = new ExportArgs(conf); } catch (ArgsException e) { throw new IllegalArgumentException(e); } // Create and configure queue. int queueSize = conf.getInt(PROPERTY_QUEUE_SIZE, 4096); bulkSize = conf.getInt(PROPERTY_BULK_SIZE, 1024); queue = new LinkedBlockingQueue<HadoopInput<T>>(queueSize); LOGGER.info("Connecting to Couchbase..."); couchbaseClient = new CouchbaseClient(args.getUrls(), args.getBucket(), args.getPassword()); LOGGER.info("Connected to Couchbase."); // Start the consumer thread. LOGGER.info("Starting consumer thread..."); consumer = new Consumer(context); consumer.start(); LOGGER.info("Consumer thread started."); }
From source file:com.bah.culvert.accumulo.database.AccumuloDatabaseAdapter.java
License:Apache License
@Override public TableAdapter getTableAdapter(String tableName) { Configuration conf = this.getConf(); return new AccumuloTableAdapter(this.conn, tableName, conf.getLong(MAX_MEMORY_KEY, DEFAULT_MAX_MEMORY), conf.getLong(MAX_LATENCY_KEY, DEFAULT_MAX_LATENCY), conf.getInt(MAX_THREADS_KEY, DEFAULT_MAX_THREADS)); }
From source file:com.bah.lucene.BlockCacheDirectoryFactoryV1.java
License:Apache License
public BlockCacheDirectoryFactoryV1(Configuration configuration, long totalNumberOfBytes) { // setup block cache // 134,217,728 is the slab size, therefore there are 16,384 blocks // in a slab when using a block size of 8,192 int numberOfBlocksPerSlab = 16384; int blockSize = BlockDirectory.BLOCK_SIZE; int slabCount = configuration.getInt(BLUR_SHARD_BLOCKCACHE_SLAB_COUNT, -1); slabCount = getSlabCount(slabCount, numberOfBlocksPerSlab, blockSize, totalNumberOfBytes); Cache cache;/*from ww w . jav a 2s . c o m*/ if (slabCount >= 1) { BlockCache blockCache; boolean directAllocation = configuration.getBoolean(BLUR_SHARD_BLOCKCACHE_DIRECT_MEMORY_ALLOCATION, true); int slabSize = numberOfBlocksPerSlab * blockSize; LOG.info(MessageFormat.format( "Number of slabs of block cache [{0}] with direct memory allocation set to [{1}]", slabCount, directAllocation)); LOG.info(MessageFormat.format( "Block cache target memory usage, slab size of [{0}] will allocate [{1}] slabs and use ~[{2}] bytes", slabSize, slabCount, ((long) slabCount * (long) slabSize))); try { long totalMemory = (long) slabCount * (long) numberOfBlocksPerSlab * (long) blockSize; blockCache = new BlockCache(directAllocation, totalMemory, slabSize); } catch (OutOfMemoryError e) { if ("Direct buffer memory".equals(e.getMessage())) { System.err.println( "The max direct memory is too low. Either increase by setting (-XX:MaxDirectMemorySize=<size>g -XX:+UseLargePages) or disable direct allocation by (blur.shard.blockcache.direct.memory.allocation=false) in blur-site.properties"); System.exit(1); } throw e; } cache = new BlockDirectoryCache(blockCache); } else { cache = BlockDirectory.NO_CACHE; } _cache = cache; }
From source file:com.bah.lucene.BlockCacheDirectoryFactoryV2.java
License:Apache License
public BlockCacheDirectoryFactoryV2(Configuration configuration, long totalNumberOfBytes) { final int fileBufferSizeInt = configuration.getInt(BLUR_SHARD_BLOCK_CACHE_V2_FILE_BUFFER_SIZE, 8192); LOG.info(MessageFormat.format("{0}={1}", BLUR_SHARD_BLOCK_CACHE_V2_FILE_BUFFER_SIZE, fileBufferSizeInt)); final int cacheBlockSizeInt = configuration.getInt(BLUR_SHARD_BLOCK_CACHE_V2_CACHE_BLOCK_SIZE, 8192); LOG.info(MessageFormat.format("{0}={1}", BLUR_SHARD_BLOCK_CACHE_V2_CACHE_BLOCK_SIZE, cacheBlockSizeInt)); final Map<String, Integer> cacheBlockSizeMap = new HashMap<String, Integer>(); for (Entry<String, String> prop : configuration) { String key = prop.getKey(); if (key.startsWith(BLUR_SHARD_BLOCK_CACHE_V2_CACHE_BLOCK_SIZE_PREFIX)) { String value = prop.getValue(); int cacheBlockSizeForFile = Integer.parseInt(value); String fieldType = key.substring(BLUR_SHARD_BLOCK_CACHE_V2_CACHE_BLOCK_SIZE_PREFIX.length()); cacheBlockSizeMap.put(fieldType, cacheBlockSizeForFile); LOG.info(// w w w . ja v a2s. c om MessageFormat.format("{0}={1} for file type [{2}]", key, cacheBlockSizeForFile, fieldType)); } } final STORE store = STORE.valueOf(configuration.get(BLUR_SHARD_BLOCK_CACHE_V2_STORE, OFF_HEAP)); LOG.info(MessageFormat.format("{0}={1}", BLUR_SHARD_BLOCK_CACHE_V2_STORE, store)); final Set<String> cachingFileExtensionsForRead = getSet( configuration.get(BLUR_SHARD_BLOCK_CACHE_V2_READ_CACHE_EXT, DEFAULT_VALUE)); LOG.info(MessageFormat.format("{0}={1}", BLUR_SHARD_BLOCK_CACHE_V2_READ_CACHE_EXT, cachingFileExtensionsForRead)); final Set<String> nonCachingFileExtensionsForRead = getSet( configuration.get(BLUR_SHARD_BLOCK_CACHE_V2_READ_NOCACHE_EXT, DEFAULT_VALUE)); LOG.info(MessageFormat.format("{0}={1}", BLUR_SHARD_BLOCK_CACHE_V2_READ_NOCACHE_EXT, nonCachingFileExtensionsForRead)); final boolean defaultReadCaching = configuration.getBoolean(BLUR_SHARD_BLOCK_CACHE_V2_READ_DEFAULT, true); LOG.info(MessageFormat.format("{0}={1}", BLUR_SHARD_BLOCK_CACHE_V2_READ_DEFAULT, defaultReadCaching)); final Set<String> cachingFileExtensionsForWrite = getSet( configuration.get(BLUR_SHARD_BLOCK_CACHE_V2_WRITE_CACHE_EXT, DEFAULT_VALUE)); LOG.info(MessageFormat.format("{0}={1}", BLUR_SHARD_BLOCK_CACHE_V2_WRITE_CACHE_EXT, cachingFileExtensionsForWrite)); final Set<String> nonCachingFileExtensionsForWrite = getSet( configuration.get(BLUR_SHARD_BLOCK_CACHE_V2_WRITE_NOCACHE_EXT, DEFAULT_VALUE)); LOG.info(MessageFormat.format("{0}={1}", BLUR_SHARD_BLOCK_CACHE_V2_WRITE_NOCACHE_EXT, nonCachingFileExtensionsForWrite)); final boolean defaultWriteCaching = configuration.getBoolean(BLUR_SHARD_BLOCK_CACHE_V2_WRITE_DEFAULT, true); LOG.info(MessageFormat.format("{0}={1}", BLUR_SHARD_BLOCK_CACHE_V2_WRITE_DEFAULT, defaultWriteCaching)); Size fileBufferSize = new Size() { @Override public int getSize(CacheDirectory directory, String fileName) { return fileBufferSizeInt; } }; Size cacheBlockSize = new Size() { @Override public int getSize(CacheDirectory directory, String fileName) { String ext = getExt(fileName); Integer size = cacheBlockSizeMap.get(ext); if (size != null) { return size; } return cacheBlockSizeInt; } }; FileNameFilter readFilter = new FileNameFilter() { @Override public boolean accept(CacheDirectory directory, String fileName) { String ext = getExt(fileName); if (cachingFileExtensionsForRead.contains(ext)) { return true; } else if (nonCachingFileExtensionsForRead.contains(ext)) { return false; } return defaultReadCaching; } }; FileNameFilter writeFilter = new FileNameFilter() { @Override public boolean accept(CacheDirectory directory, String fileName) { String ext = getExt(fileName); if (cachingFileExtensionsForWrite.contains(ext)) { return true; } else if (nonCachingFileExtensionsForWrite.contains(ext)) { return false; } return defaultWriteCaching; } }; Quiet quiet = new Quiet() { @Override public boolean shouldBeQuiet(CacheDirectory directory, String fileName) { Thread thread = Thread.currentThread(); String name = thread.getName(); if (name.startsWith(SHARED_MERGE_SCHEDULER)) { return true; } return false; } }; _cache = new BaseCache(totalNumberOfBytes, fileBufferSize, cacheBlockSize, readFilter, writeFilter, quiet, store); }
From source file:com.basho.riak.hadoop.config.RiakConfig.java
License:Apache License
/** * Get the hadoop cluster size property, provide a default in case it hasn't * been set/* w ww . j a va2s.co m*/ * * @param conf * the {@link Configuration} to get the property value from * @param defaultValue * the default size to use if it hasn't been set * @return the hadoop cluster size or <code>defaultValue</code> */ public static int getHadoopClusterSize(Configuration conf, int defaultValue) { return conf.getInt(CLUSTER_SIZE_PROPERTY, defaultValue); }
From source file:com.baynote.kafka.hadoop.KafkaInputFormat.java
License:Apache License
/** * Gets the Zookeeper session timeout set by {@link #setZkSessionTimeoutMs(Job, int)}, defaulting to * {@link #DEFAULT_ZK_SESSION_TIMEOUT_MS} if it has not been set. * // www. jav a2s.co m * @param conf * the job conf. * @return the Zookeeper session timeout. */ public static int getZkSessionTimeoutMs(final Configuration conf) { return conf.getInt("kafka.zk.session.timeout.ms", DEFAULT_ZK_SESSION_TIMEOUT_MS); }
From source file:com.baynote.kafka.hadoop.KafkaInputFormat.java
License:Apache License
/** * Gets the Zookeeper connection timeout set by {@link #setZkConnectionTimeoutMs(Job, int)}, defaulting to * {@link #DEFAULT_ZK_CONNECTION_TIMEOUT_MS} if it has not been set. * //from w ww .j a v a2s. c om * @param conf * the job conf. * @return the Zookeeper connection timeout. */ public static int getZkConnectionTimeoutMs(final Configuration conf) { return conf.getInt("kafka.zk.connection.timeout.ms", DEFAULT_ZK_CONNECTION_TIMEOUT_MS); }
From source file:com.baynote.kafka.hadoop.KafkaInputFormat.java
License:Apache License
/** * Gets the maximum number of splits per partition set by {@link #setMaxSplitsPerPartition(Job, int)}, returning * {@link Integer#MAX_VALUE} by default. * //from w w w.j av a2 s.c o m * @param conf * the job conf * @return the maximum number of splits, {@link Integer#MAX_VALUE} by default. */ public static int getMaxSplitsPerPartition(final Configuration conf) { return conf.getInt("kafka.max.splits.per.partition", DEFAULT_MAX_SPLITS_PER_PARTITION); }