Example usage for org.apache.hadoop.conf Configuration getInt

List of usage examples for org.apache.hadoop.conf Configuration getInt

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getInt.

Prototype

public int getInt(String name, int defaultValue) 

Source Link

Document

Get the value of the name property as an int.

Usage

From source file:cn.easyhbase.common.hbase.parallel.ScanTaskConfig.java

License:Apache License

public ScanTaskConfig(TableName tableName, Configuration configuration, Charset charset,
        TableFactory tableFactory, AbstractRowKeyDistributor rowKeyDistributor, int scanCaching) {
    if (tableName == null) {
        throw new NullPointerException("No table specified");
    }// w w w  .ja v  a  2 s .c om
    if (rowKeyDistributor == null) {
        throw new NullPointerException("rowKeyDistributor must not be null");
    }
    this.tableName = tableName;
    this.configuration = configuration;
    this.charset = charset;
    this.tableFactory = tableFactory;
    this.rowKeyDistributor = rowKeyDistributor;
    if (scanCaching > 0) {
        this.scanTaskQueueSize = scanCaching;
    } else {
        this.scanTaskQueueSize = configuration.getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING,
                HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING);
    }
}

From source file:cn.uc.hadoop.mapreduce.lib.input.FileNameLineRecordReader.java

License:Apache License

public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException {
    FileSplit split = (FileSplit) genericSplit;
    Configuration job = context.getConfiguration();
    this.maxLineLength = job.getInt(MAX_LINE_LENGTH, Integer.MAX_VALUE);
    start = split.getStart();/*from www .  jav  a2s  .c o  m*/
    end = start + split.getLength();
    final Path file = split.getPath();
    //ADD by qiujw key??
    key = new Text(file.getName());

    compressionCodecs = new CompressionCodecFactory(job);
    codec = compressionCodecs.getCodec(file);

    // open the file and seek to the start of the split
    final FileSystem fs = file.getFileSystem(job);
    fileIn = fs.open(file);
    if (isCompressedInput()) {
        decompressor = CodecPool.getDecompressor(codec);
        if (codec instanceof SplittableCompressionCodec) {
            final SplitCompressionInputStream cIn = ((SplittableCompressionCodec) codec).createInputStream(
                    fileIn, decompressor, start, end, SplittableCompressionCodec.READ_MODE.BYBLOCK);
            if (null == this.recordDelimiterBytes) {
                in = new LineReader(cIn, job);
            } else {
                in = new LineReader(cIn, job, this.recordDelimiterBytes);
            }

            start = cIn.getAdjustedStart();
            end = cIn.getAdjustedEnd();
            filePosition = cIn;
        } else {
            if (null == this.recordDelimiterBytes) {
                in = new LineReader(codec.createInputStream(fileIn, decompressor), job);
            } else {
                in = new LineReader(codec.createInputStream(fileIn, decompressor), job,
                        this.recordDelimiterBytes);
            }
            filePosition = fileIn;
        }
    } else {
        fileIn.seek(start);
        if (null == this.recordDelimiterBytes) {
            in = new LineReader(fileIn, job);
        } else {
            in = new LineReader(fileIn, job, this.recordDelimiterBytes);
        }

        filePosition = fileIn;
    }
    // If this is not the first split, we always throw away first record
    // because we always (except the last split) read one extra line in
    // next() method.
    if (start != 0) {
        start += in.readLine(new Text(), 0, maxBytesToConsume(start));
    }
    this.pos = start;
}

From source file:cn.uc.hadoop.mapreduce.lib.input.FilePathLineRecordReader.java

License:Apache License

public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException {
    FileSplit split = (FileSplit) genericSplit;
    Configuration job = context.getConfiguration();
    this.maxLineLength = job.getInt(MAX_LINE_LENGTH, Integer.MAX_VALUE);
    start = split.getStart();//from   w  w  w . j a v  a  2 s .  c o m
    end = start + split.getLength();
    final Path file = split.getPath();
    //ADD by qiujw key?
    key = new Text(file.toString());

    compressionCodecs = new CompressionCodecFactory(job);
    codec = compressionCodecs.getCodec(file);

    // open the file and seek to the start of the split
    final FileSystem fs = file.getFileSystem(job);
    fileIn = fs.open(file);
    if (isCompressedInput()) {
        decompressor = CodecPool.getDecompressor(codec);
        if (codec instanceof SplittableCompressionCodec) {
            final SplitCompressionInputStream cIn = ((SplittableCompressionCodec) codec).createInputStream(
                    fileIn, decompressor, start, end, SplittableCompressionCodec.READ_MODE.BYBLOCK);
            if (null == this.recordDelimiterBytes) {
                in = new LineReader(cIn, job);
            } else {
                in = new LineReader(cIn, job, this.recordDelimiterBytes);
            }

            start = cIn.getAdjustedStart();
            end = cIn.getAdjustedEnd();
            filePosition = cIn;
        } else {
            if (null == this.recordDelimiterBytes) {
                in = new LineReader(codec.createInputStream(fileIn, decompressor), job);
            } else {
                in = new LineReader(codec.createInputStream(fileIn, decompressor), job,
                        this.recordDelimiterBytes);
            }
            filePosition = fileIn;
        }
    } else {
        fileIn.seek(start);
        if (null == this.recordDelimiterBytes) {
            in = new LineReader(fileIn, job);
        } else {
            in = new LineReader(fileIn, job, this.recordDelimiterBytes);
        }

        filePosition = fileIn;
    }
    // If this is not the first split, we always throw away first record
    // because we always (except the last split) read one extra line in
    // next() method.
    if (start != 0) {
        start += in.readLine(new Text(), 0, maxBytesToConsume(start));
    }
    this.pos = start;
}

From source file:co.cask.cdap.data2.transaction.TransactionManagerDebuggerMain.java

License:Apache License

/**
 * Parse the arguments from the command line and execute the different modes.
 * @param args command line arguments//from   w w w  . j  av a  2 s. co m
 * @param conf default configuration
 * @return true if the arguments were parsed successfully and comply with the expected usage
 */
private boolean parseArgsAndExecMode(String[] args, Configuration conf) {
    CommandLineParser parser = new GnuParser();
    // Check all the options of the command line
    try {
        CommandLine line = parser.parse(options, args);
        if (line.hasOption(HELP_OPTION)) {
            printUsage(false);
            return true;
        }

        hostname = line.getOptionValue(HOST_OPTION);
        existingFilename = line.getOptionValue(FILENAME_OPTION);
        persistingFilename = line.hasOption(SAVE_OPTION) ? line.getOptionValue(SAVE_OPTION) : null;
        showTxids = line.hasOption(IDS_OPTION);
        txId = line.hasOption(TRANSACTION_OPTION) ? Long.valueOf(line.getOptionValue(TRANSACTION_OPTION))
                : null;
        accessToken = line.hasOption(TOKEN_OPTION) ? line.getOptionValue(TOKEN_OPTION).replaceAll("(\r|\n)", "")
                : null;
        tokenFile = line.hasOption(TOKEN_FILE_OPTION)
                ? line.getOptionValue(TOKEN_FILE_OPTION).replaceAll("(\r|\n)", "")
                : null;
        portNumber = line.hasOption(PORT_OPTION) ? Integer.valueOf(line.getOptionValue(PORT_OPTION))
                : conf.getInt(Constants.Router.ROUTER_PORT,
                        Integer.parseInt(Constants.Router.DEFAULT_ROUTER_PORT));

        // if both tokenfile and accessToken are given, just use the access token
        if (tokenFile != null) {
            if (accessToken != null) {
                tokenFile = null;
            } else {
                readTokenFile();
            }
        }

        switch (this.mode) {
        case VIEW:
            if (!line.hasOption(HOST_OPTION) && !line.hasOption(FILENAME_OPTION)) {
                usage("Either specify a hostname to download a new snapshot, "
                        + "or a filename of an existing snapshot.");
                return false;
            }
            // Execute mode
            executeViewMode();
            break;
        case INVALIDATE:
            if (!line.hasOption(HOST_OPTION) || !line.hasOption(TRANSACTION_OPTION)) {
                usage("Specify a host name and a transaction id.");
                return false;
            }
            // Execute mode
            executeInvalidateMode();
            break;
        case RESET:
            if (!line.hasOption(HOST_OPTION)) {
                usage("Specify a host name.");
                return false;
            }
            // Execute mode
            executeResetMode();
            break;
        default:
            printUsage(true);
            return false;
        }
    } catch (ParseException e) {
        printUsage(true);
        return false;
    }
    return true;
}

From source file:co.cask.tephra.inmemory.InMemoryTransactionService.java

License:Apache License

@Inject
public InMemoryTransactionService(Configuration conf, DiscoveryService discoveryService,
        Provider<TransactionManager> txManagerProvider) {

    this.discoveryService = discoveryService;
    this.txManagerProvider = txManagerProvider;
    this.serviceName = conf.get(TxConstants.Service.CFG_DATA_TX_DISCOVERY_SERVICE_NAME,
            TxConstants.Service.DEFAULT_DATA_TX_DISCOVERY_SERVICE_NAME);

    address = conf.get(TxConstants.Service.CFG_DATA_TX_BIND_ADDRESS,
            TxConstants.Service.DEFAULT_DATA_TX_BIND_ADDRESS);
    port = conf.getInt(TxConstants.Service.CFG_DATA_TX_BIND_PORT,
            TxConstants.Service.DEFAULT_DATA_TX_BIND_PORT);

    // Retrieve the number of threads for the service
    threads = conf.getInt(TxConstants.Service.CFG_DATA_TX_SERVER_THREADS,
            TxConstants.Service.DEFAULT_DATA_TX_SERVER_THREADS);
    ioThreads = conf.getInt(TxConstants.Service.CFG_DATA_TX_SERVER_IO_THREADS,
            TxConstants.Service.DEFAULT_DATA_TX_SERVER_IO_THREADS);

    maxReadBufferBytes = conf.getInt(TxConstants.Service.CFG_DATA_TX_THRIFT_MAX_READ_BUFFER,
            TxConstants.Service.DEFAULT_DATA_TX_THRIFT_MAX_READ_BUFFER);

    LOG.info("Configuring TransactionService" + ", address: " + address + ", port: " + port + ", threads: "
            + threads + ", io threads: " + ioThreads + ", max read buffer (bytes): " + maxReadBufferBytes);
}

From source file:co.cask.tephra.metrics.DefaultMetricsCollector.java

License:Apache License

@Override
public void configure(Configuration conf) {
    // initialize selected output reporter
    reportPeriod = conf.getInt(TxConstants.Metrics.REPORT_PERIOD_KEY,
            TxConstants.Metrics.REPORT_PERIOD_DEFAULT);
    LOG.info("Configured metrics report to emit every {} seconds", reportPeriod);
    // TODO: reporters should be pluggable based on injection
    jmxReporter = JmxReporter.forRegistry(metrics).build();
    reporter = Slf4jReporter.forRegistry(metrics).outputTo(LoggerFactory.getLogger("tephra-metrics"))
            .convertRatesTo(TimeUnit.SECONDS).convertDurationsTo(TimeUnit.MILLISECONDS).build();
}

From source file:co.cask.tephra.persist.HDFSUtil.java

License:Apache License

boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p, final Configuration conf)
        throws IOException {
    LOG.info("Recovering lease on dfs file " + p);
    long startWaiting = System.currentTimeMillis();
    // Default is 15 minutes. It's huge, but the idea is that if we have a major issue, HDFS
    // usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves
    // beyond that limit 'to be safe'.
    long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 900000) + startWaiting;
    // This setting should be what the cluster dfs heartbeat is set to.
    long firstPause = conf.getInt("hbase.lease.recovery.first.pause", 3000);
    // This should be set to how long it'll take for us to timeout against primary datanode if it
    // is dead.  We set it to 61 seconds, 1 second than the default READ_TIMEOUT in HDFS, the
    // default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY.
    long subsequentPause = conf.getInt("hbase.lease.recovery.dfs.timeout", 61 * 1000);

    Method isFileClosedMeth = null;
    // whether we need to look for isFileClosed method
    boolean findIsFileClosedMeth = true;
    boolean recovered = false;
    // We break the loop if we succeed the lease recovery, timeout, or we throw an exception.
    for (int nbAttempt = 0; !recovered; nbAttempt++) {
        recovered = recoverLease(dfs, nbAttempt, p, startWaiting);
        if (recovered || checkIfTimedout(conf, recoveryTimeout, nbAttempt, p, startWaiting)) {
            break;
        }//from   ww w.  j a va2s . c o  m
        try {
            // On the first time through wait the short 'firstPause'.
            if (nbAttempt == 0) {
                Thread.sleep(firstPause);
            } else {
                // Cycle here until subsequentPause elapses.  While spinning, check isFileClosed if
                // available (should be in hadoop 2.0.5... not in hadoop 1 though.
                long localStartWaiting = System.currentTimeMillis();
                while ((System.currentTimeMillis() - localStartWaiting) < subsequentPause) {
                    Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000));
                    if (findIsFileClosedMeth) {
                        try {
                            isFileClosedMeth = dfs.getClass().getMethod("isFileClosed",
                                    new Class[] { Path.class });
                        } catch (NoSuchMethodException nsme) {
                            LOG.debug("isFileClosed not available");
                        } finally {
                            findIsFileClosedMeth = false;
                        }
                    }
                    if (isFileClosedMeth != null && isFileClosed(dfs, isFileClosedMeth, p)) {
                        recovered = true;
                        break;
                    }
                }
            }
        } catch (InterruptedException ie) {
            InterruptedIOException iioe = new InterruptedIOException();
            iioe.initCause(ie);
            throw iioe;
        }
    }
    return recovered;
}

From source file:co.cask.tephra.persist.HDFSUtil.java

License:Apache License

boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout, final int nbAttempt, final Path p,
        final long startWaiting) {
    if (recoveryTimeout < System.currentTimeMillis()) {
        LOG.warn("Cannot recoverLease after trying for " + conf.getInt("hbase.lease.recovery.timeout", 900000)
                + "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; "
                + getLogMessageDetail(nbAttempt, p, startWaiting));
        return true;
    }/*ww w. java  2 s .com*/
    return false;
}

From source file:co.cask.tephra.runtime.ZKModule.java

License:Apache License

@Provides
@Singleton/* ww w  .j a  va 2 s. c om*/
private ZKClientService provideZKClientService(Configuration conf) {
    String zkStr = conf.get(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM);
    if (zkStr == null) {
        // Default to HBase one.
        zkStr = conf.get(TxConstants.HBase.ZOOKEEPER_QUORUM);
    }

    int timeOut = conf.getInt(TxConstants.HBase.ZK_SESSION_TIMEOUT,
            TxConstants.HBase.DEFAULT_ZK_SESSION_TIMEOUT);
    ZKClientService zkClientService = new TephraZKClientService(zkStr, timeOut, null,
            ArrayListMultimap.<String, byte[]>create());
    return ZKClientServices.delegate(ZKClients.reWatchOnExpire(ZKClients.retryOnFailure(zkClientService,
            RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS))));
}

From source file:co.cask.tephra.TransactionManager.java

License:Apache License

@Inject
public TransactionManager(Configuration conf, @Nonnull TransactionStateStorage persistor,
        MetricsCollector txMetricsCollector) {
    this.persistor = persistor;
    cleanupInterval = conf.getInt(TxConstants.Manager.CFG_TX_CLEANUP_INTERVAL,
            TxConstants.Manager.DEFAULT_TX_CLEANUP_INTERVAL);
    defaultTimeout = conf.getInt(TxConstants.Manager.CFG_TX_TIMEOUT, TxConstants.Manager.DEFAULT_TX_TIMEOUT);
    defaultLongTimeout = conf.getInt(TxConstants.Manager.CFG_TX_LONG_TIMEOUT,
            TxConstants.Manager.DEFAULT_TX_LONG_TIMEOUT);
    snapshotFrequencyInSeconds = conf.getLong(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL,
            TxConstants.Manager.DEFAULT_TX_SNAPSHOT_INTERVAL);
    // must always keep at least 1 snapshot
    snapshotRetainCount = Math.max(conf.getInt(TxConstants.Manager.CFG_TX_SNAPSHOT_RETAIN,
            TxConstants.Manager.DEFAULT_TX_SNAPSHOT_RETAIN), 1);

    // intentionally not using a constant, as this config should not be exposed
    // TODO: REMOVE WITH txnBackwardsCompatCheck()
    longTimeoutTolerance = conf.getLong("data.tx.long.timeout.tolerance", 10000);

    ///*w w  w  .ja v  a2s .  c  om*/
    this.txMetricsCollector = txMetricsCollector;
    this.txMetricsCollector.configure(conf);
    clear();
}