List of usage examples for org.apache.hadoop.conf Configuration getLong
public long getLong(String name, long defaultValue)
name
property as a long
. From source file:andromache.hadoop.CassandraRecordWriter.java
License:Apache License
CassandraRecordWriter(TaskAttemptContext context) throws IOException { this.progressable = context; Configuration conf = context.getConfiguration(); int queueSize = conf.getInt(QUEUE_SIZE, 256); int batchSize = conf.getInt(BATCH_SIZE, 32); long batchDelay = conf.getLong(BATCH_DELAY_MS, 200); ConsistencyLevel consistencyLevel = CassandraConfigHelper.getWriteConsistencyLevel(conf); CassandraClientFactory cassandraClientFactory = new CassandraClientFactory( CassandraConfigHelper.getOutputTransportFactory(conf), CassandraConfigHelper.getOutputRpcPort(conf)); this.rangeThreadsCache = new RangeThreadsCache(conf, cassandraClientFactory, queueSize, batchSize, batchDelay, progressable, consistencyLevel); log.info("Using consistency level of {}", consistencyLevel); }
From source file:backup.datanode.DataNodeBackupProcessor.java
License:Apache License
public DataNodeBackupProcessor(Configuration conf, DataNode datanode) throws Exception { super(conf);//from www . j a v a 2 s.c o m _retryDelay = conf.getLong(DFS_BACKUP_DATANODE_BACKUP_RETRY_DELAY_KEY, DFS_BACKUP_DATANODE_RETRY_DELAY_DEFAULT); _backupStore = _closer.register(BackupStore.create(BackupUtil.convert(conf))); _datanode = datanode; _nameNodeClient = new NameNodeClient(conf, UserGroupInformation.getCurrentUser()); }
From source file:backup.datanode.DataNodeBackupProcessorBase.java
License:Apache License
public DataNodeBackupProcessorBase(Configuration conf) throws Exception { int backupThreads = conf.getInt(DFS_BACKUP_DATANODE_BACKUP_THREAD_COUNT_KEY, DFS_BACKUP_DATANODE_BACKUP_THREAD_COUNT_DEFAULT); int queueDepth = conf.getInt(DFS_BACKUP_DATANODE_BACKUP_QUEUE_DEPTH_KEY, DFS_BACKUP_DATANODE_BACKUP_QUEUE_DEPTH_DEFAULT); _defaultAge = conf.getLong(DFS_BACKUP_DATANODE_BACKUP_AGE_KEY, DFS_BACKUP_DATANODE_BACKUP_AGE_DEFAULT); _closer = Closer.create();// ww w . j a v a 2s. com _service = _closer.register(Executors.newFixedThreadPool(backupThreads + 1)); _backupQueue = new PriorityBlockingQueue<>(queueDepth); _backupQueueDepth = Metrics.METRICS.counter(QUEUE_BACKUP); _enqueueBackupDropMetric = Metrics.METRICS.histogram(ENQUEUE_BACKUP_DROP); _enqueueBackupRetryMetric = Metrics.METRICS.histogram(ENQUEUE_BACKUP_RETRY); _backupThroughput = Metrics.METRICS.meter(BACKUP_THROUGHPUT); startBackupThreads(backupThreads); }
From source file:backup.datanode.DataNodeRestoreProcessor.java
License:Apache License
public DataNodeRestoreProcessor(Configuration conf, DataNode datanode) throws Exception { _closer = Closer.create();/*from w w w.jav a 2 s .com*/ _datanode = datanode; _restoreThroughput = Metrics.METRICS.meter(RESTORE_THROUGHPUT); _bytesPerChecksum = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT); _checksumType = Type .valueOf(conf.get(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT)); int threads = conf.getInt(DFS_BACKUP_DATANODE_RESTORE_BLOCK_HANDLER_COUNT_KEY, DFS_BACKUP_DATANODE_RESTORE_BLOCK_HANDLER_COUNT_DEFAULT); long pauseOnError = conf.getLong(DFS_BACKUP_DATANODE_RESTORE_ERROR_PAUSE_KEY, DFS_BACKUP_DATANODE_RESTORE_ERROR_PAUSE_DEFAULT); _backupStore = _closer.register(BackupStore.create(BackupUtil.convert(conf))); _restoreBlocks = new ArrayBlockingQueue<>(threads); _executorService = Executors.newCachedThreadPool(); _closer.register((Closeable) () -> _executorService.shutdownNow()); for (int t = 0; t < threads; t++) { _executorService.submit(Executable.createDaemon(LOG, pauseOnError, _running, () -> restoreBlocks())); } }
From source file:backup.namenode.NameNodeBackupBlockCheckProcessor.java
License:Apache License
public NameNodeBackupBlockCheckProcessor(Configuration conf, NameNodeRestoreProcessor processor, NameNode namenode, UserGroupInformation ugi) throws Exception { String[] nnStorageLocations = conf.getStrings(DFS_NAMENODE_NAME_DIR); URI uri = new URI(nnStorageLocations[0]); _reportPath = new File(new File(uri.getPath()).getParent(), "backup-reports"); _reportPath.mkdirs();//from w w w .j a v a 2 s. c o m if (!_reportPath.exists()) { throw new IOException("Report path " + _reportPath + " does not exist"); } this.ugi = ugi; this.namenode = namenode; this.conf = conf; this.processor = processor; backupStore = BackupStore.create(BackupUtil.convert(conf)); this.fileSystem = (DistributedFileSystem) FileSystem.get(conf); this.ignorePath = conf.get(DFS_BACKUP_IGNORE_PATH_FILE_KEY, DFS_BACKUP_IGNORE_PATH_FILE_DEFAULT); this.batchSize = conf.getInt(DFS_BACKUP_REMOTE_BACKUP_BATCH_KEY, DFS_BACKUP_REMOTE_BACKUP_BATCH_DEFAULT); this.checkInterval = conf.getLong(DFS_BACKUP_NAMENODE_BLOCK_CHECK_INTERVAL_KEY, DFS_BACKUP_NAMENODE_BLOCK_CHECK_INTERVAL_DEFAULT); this.initInterval = conf.getLong(DFS_BACKUP_NAMENODE_BLOCK_CHECK_INTERVAL_DELAY_KEY, DFS_BACKUP_NAMENODE_BLOCK_CHECK_INTERVAL_DELAY_DEFAULT); start(); }
From source file:backup.namenode.NameNodeRestoreProcessor.java
License:Apache License
public NameNodeRestoreProcessor(Configuration conf, NameNode namenode, UserGroupInformation ugi) throws Exception { this.ugi = ugi; this.conf = conf; this.namesystem = namenode.getNamesystem(); this.blockManager = namesystem.getBlockManager(); Cache<ExtendedBlock, Boolean> cache = CacheBuilder.newBuilder().expireAfterWrite(10, TimeUnit.MINUTES) .build();/*ww w. jav a2s .co m*/ currentRequestedRestore = Collections.newSetFromMap(cache.asMap()); pollTime = conf.getLong(DFS_BACKUP_NAMENODE_MISSING_BLOCKS_POLL_TIME_KEY, DFS_BACKUP_NAMENODE_MISSING_BLOCKS_POLL_TIME_DEFAULT); blockCheck = new NameNodeBackupBlockCheckProcessor(conf, this, namenode, ugi); start(); }
From source file:be.uantwerpen.adrem.hadoop.util.SplitByKTextInputFormat.java
License:Apache License
/** * Gets the total number of lines from the file. If Config.NUMBER_OF_LINES_KEY is set, this value is returned. * //from w w w .jav a2s .c om * @param conf * hadoop configuration object * @param fileName * name of file to count * @return the number of lines in the file * @throws IOException */ public static long getTotalNumberOfLines(Configuration conf, Path fileName) throws IOException { long nrLines = conf.getLong(NUMBER_OF_LINES_KEY, -1); if (nrLines != -1) { return nrLines; } try { FSDataInputStream in = fileName.getFileSystem(conf).open(fileName); LineReader lr = new LineReader(in, conf); Text text = new Text(); nrLines = 0; while (lr.readLine(text) > 0) { nrLines++; } in.close(); return nrLines; } catch (IOException e) { e.printStackTrace(); } return 0; }
From source file:be.ugent.intec.halvade.utils.HalvadeConf.java
License:Open Source License
public static long getRefSize(Configuration conf) { return conf.getLong(refSize, HUMAN_REF_SIZE); }
From source file:cascading.flow.tez.planner.Hadoop2TezFlowStepJob.java
License:Open Source License
private static long getStoreInterval(Configuration configuration) { return configuration.getLong(STATS_STORE_INTERVAL, 60 * 1000); }
From source file:cascading.flow.tez.planner.Hadoop2TezFlowStepJob.java
License:Open Source License
public static long getJobPollingInterval(Configuration configuration) { return configuration.getLong(JOB_POLLING_INTERVAL, 5000); }