Example usage for org.apache.hadoop.conf Configuration getLong

List of usage examples for org.apache.hadoop.conf Configuration getLong

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getLong.

Prototype

public long getLong(String name, long defaultValue) 

Source Link

Document

Get the value of the name property as a long.

Usage

From source file:com.mellanox.r4h.DFSClient.java

License:Apache License

/**
 * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
 * If HA is enabled and a positive value is set for {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
 * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} as its RetryInvocationHandler. Otherwise one of nameNodeUri or
 * rpcNamenode// w w  w  .j a v a  2  s  . com
 * must be null.
 */
@VisibleForTesting
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats)
        throws IOException {
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
    traceSampler = new SamplerBuilder(TraceUtils.wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf))
            .build();
    // Copy only the required DFSClient configuration
    this.dfsClientConf = new DFSClientConfBridge2_7(conf);
    if (this.dfsClientConf.isUseLegacyBlockReaderLocal()) {
        LOG.debug("Using legacy short-circuit local reads.");
    }
    this.conf = conf;
    this.stats = stats;
    this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
    this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);

    this.ugi = UserGroupInformation.getCurrentUser();

    this.authority = nameNodeUri == null ? "null" : nameNodeUri.getAuthority();
    this.clientName = "DFSClient_" + dfsClientConf.getTaskId() + "_" + DFSUtil.getRandom().nextInt() + "_"
            + Thread.currentThread().getId();
    provider = DFSUtil.createKeyProvider(conf);
    if (LOG.isDebugEnabled()) {
        if (provider == null) {
            LOG.debug("No KeyProvider found.");
        } else {
            LOG.debug("Found KeyProvider: " + provider.toString());
        }
    }
    int numResponseToDrop = conf.getInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
            DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
    NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null;
    AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
    if (numResponseToDrop > 0) {
        // This case is used for testing.
        LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to "
                + numResponseToDrop + ", this hacked client will proactively drop responses");
        proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class,
                numResponseToDrop, nnFallbackToSimpleAuth);
    }

    if (proxyInfo != null) {
        this.dtService = proxyInfo.getDelegationTokenService();
        this.namenode = proxyInfo.getProxy();
    } else if (rpcNamenode != null) {
        // This case is used for testing.
        Preconditions.checkArgument(nameNodeUri == null);
        this.namenode = rpcNamenode;
        dtService = null;
    } else {
        Preconditions.checkArgument(nameNodeUri != null, "null URI");
        proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class,
                nnFallbackToSimpleAuth);
        this.dtService = proxyInfo.getDelegationTokenService();
        this.namenode = proxyInfo.getProxy();
    }

    String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
    localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
    if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
        LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces) + "] with addresses ["
                + Joiner.on(',').join(localInterfaceAddrs) + "]");
    }

    Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null
            : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false);
    Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null
            : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0);
    Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null
            : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false);
    this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead);
    this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead);
    this.clientContext = ClientContext.get(conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT),
            dfsClientConf);
    this.hedgedReadThresholdMillis = conf.getLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
            DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS);
    int numThreads = conf.getInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,
            DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE);
    if (numThreads > 0) {
        this.initThreadsNumForHedgedReads(numThreads);
    }
    this.saslClient = new SaslDataTransferClient(conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
            TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
}

From source file:com.mongodb.hadoop.splitter.BSONSplitter.java

License:Apache License

public static long getSplitSize(final Configuration conf, final FileStatus file) {
    long minSize = Math.max(1L, conf.getLong("mapred.min.split.size", 1L));
    long maxSize = conf.getLong("mapred.max.split.size", Long.MAX_VALUE);

    if (file != null) {
        long fileBlockSize = file.getBlockSize();
        return Math.max(minSize, Math.min(maxSize, fileBlockSize));
    } else {/* w w  w  .  j  a  v  a  2  s.c om*/
        long blockSize = conf.getLong("dfs.blockSize", 64 * 1024 * 1024);
        return Math.max(minSize, Math.min(maxSize, blockSize));
    }
}

From source file:com.mortardata.pig.storage.DynamoDBStorage.java

License:Apache License

/**
 * FRONTEND and BACKEND//  w w w .  j a  v a2  s . c o m
 **/
@Override
public void setStoreLocation(String location, Job job) throws IOException {
    this.hadoopJobInfo = loadHadoopJobInfo(job);
    Configuration conf = this.hadoopJobInfo.getJobConfiguration();
    this.maxRetryWaitMilliseconds = conf.getLong(MAX_RETRY_WAIT_MILLISECONDS_PROPERTY,
            MAX_RETRY_WAIT_MILLISECONDS_DEFAULT);
    this.maxNumRetriesPerBatchWrite = conf.getInt(MAX_NUM_RETRIES_PER_BATCH_WRITE_PROPERTY,
            MAX_NUM_RETRIES_PER_BATCH_WRITE);
    this.throughputWritePercent = new Float(
            conf.getFloat(THROUGHPUT_WRITE_PERCENT_PROPERTY, THROUGHPUT_WRITE_PERCENT_DEFAULT)).doubleValue();
    if (this.throughputWritePercent < 0.1 || this.throughputWritePercent > 1.5) {
        throw new IOException(THROUGHPUT_WRITE_PERCENT_PROPERTY + " must be between 0.1 and 1.5.  Got: "
                + this.throughputWritePercent);
    }

    this.minBatchSize = conf.getInt(MINIMUM_BATCH_SIZE_PROPERTY, MINIMUM_BATCH_SIZE_DEFAULT);
    if (this.minBatchSize < 1 || this.minBatchSize > DYNAMO_MAX_ITEMS_IN_BATCH_WRITE_REQUEST) {
        throw new IOException(MINIMUM_BATCH_SIZE_PROPERTY + " must be between 1 and "
                + DYNAMO_MAX_ITEMS_IN_BATCH_WRITE_REQUEST + ". Got: " + this.minBatchSize);
    }
}

From source file:com.moz.fiji.hadoop.configurator.ConfigurationMethod.java

License:Apache License

/**
 * Calls an object's method with the value read from a Configuration instance.
 *
 * @param instance The object to populate.
 * @param conf The configuration to read from.
 * @throws IllegalAccessException If the method cannot be called on the object.
 * @throws HadoopConfigurationException If there is a problem with the annotation definition.
 *///from w w  w.ja  v a2 s. c  o m
public void call(Object instance, Configuration conf) throws IllegalAccessException {
    final String key = getKey();
    if (null == key) {
        throw new HadoopConfigurationException("Missing 'key' attribute of @HadoopConf on "
                + instance.getClass().getName() + "." + mMethod.getName());
    }

    if (!mMethod.isAccessible()) {
        mMethod.setAccessible(true);
    }

    final Class<?>[] parameterTypes = mMethod.getParameterTypes();
    if (1 != parameterTypes.length) {
        throw new HadoopConfigurationException(
                "Methods annotated with @HadoopConf must have exactly one parameter: "
                        + instance.getClass().getName() + "." + mMethod.getName());
    }

    final Class<?> parameterType = parameterTypes[0];

    try {
        try {
            if (boolean.class == parameterType) {
                mMethod.invoke(instance, conf.getBoolean(key, Boolean.parseBoolean(getDefault())));
            } else if (float.class == parameterType) {
                mMethod.invoke(instance, conf.getFloat(key, Float.parseFloat(getDefault())));
            } else if (double.class == parameterType) {
                mMethod.invoke(instance, conf.getFloat(key, Float.parseFloat(getDefault())));
            } else if (int.class == parameterType) {
                mMethod.invoke(instance, conf.getInt(key, Integer.parseInt(getDefault())));
            } else if (long.class == parameterType) {
                mMethod.invoke(instance, conf.getLong(key, Long.parseLong(getDefault())));
            } else if (parameterType.isAssignableFrom(String.class)) {
                mMethod.invoke(instance, conf.get(key, getDefault()));
            } else if (parameterType.isAssignableFrom(Collection.class)) {
                mMethod.invoke(instance, conf.getStringCollection(key));
            } else if (String[].class == parameterType) {
                mMethod.invoke(instance, new Object[] { conf.getStrings(key) });
            } else {
                throw new HadoopConfigurationException(
                        "Unsupported method parameter type annotated by @HadoopConf: "
                                + instance.getClass().getName() + "." + mMethod.getName());
            }
        } catch (NumberFormatException e) {
            mMethod.invoke(instance, getDefault());
        }
    } catch (InvocationTargetException e) {
        throw new HadoopConfigurationException(e);
    }
}

From source file:com.moz.fiji.hadoop.configurator.ConfigurationVariable.java

License:Apache License

/**
 * Populates an object's field with the value read from a Configuration instance.
 *
 * @param instance The object to populate.
 * @param conf The configuration to read from.
 * @throws IllegalAccessException If the field cannot be set on the object.
 * @throws HadoopConfigurationException If there is a problem with the annotation definition.
 *///from   w  w  w  .  ja  v a2  s  . com
public void setValue(Object instance, Configuration conf) throws IllegalAccessException {
    final String key = getKey();
    if (null == key) {
        throw new HadoopConfigurationException("Missing 'key' attribute of @HadoopConf on "
                + instance.getClass().getName() + "." + mField.getName());
    }
    if (null == conf.get(key) && mAnnotation.defaultValue().isEmpty()) {
        // Nothing set in the configuration, and no default value
        // specified. Just leave the field alone.
        return;
    }

    if (!mField.isAccessible()) {
        mField.setAccessible(true);
    }

    try {
        if (boolean.class == mField.getType()) {
            mField.setBoolean(instance, conf.getBoolean(key, getDefaultBoolean(instance)));
        } else if (float.class == mField.getType()) {
            mField.setFloat(instance, conf.getFloat(key, getDefaultFloat(instance)));
        } else if (double.class == mField.getType()) {
            mField.setDouble(instance, conf.getFloat(key, getDefaultDouble(instance)));
        } else if (int.class == mField.getType()) {
            mField.setInt(instance, conf.getInt(key, getDefaultInt(instance)));
        } else if (long.class == mField.getType()) {
            mField.setLong(instance, conf.getLong(key, getDefaultLong(instance)));
        } else if (mField.getType().isAssignableFrom(String.class)) {
            mField.set(instance, conf.get(key, getDefaultString(instance)));
        } else if (mField.getType().isAssignableFrom(Collection.class)) {
            mField.set(instance, conf.getStringCollection(key));
        } else if (String[].class == mField.getType()) {
            mField.set(instance, conf.getStrings(key));
        } else {
            throw new HadoopConfigurationException("Unsupported field type annotated by @HadoopConf: "
                    + instance.getClass().getName() + "." + mField.getName());
        }
    } catch (NumberFormatException e) {
        // That's okay. The default value for the field will be kept.
    }
}

From source file:com.moz.fiji.mapreduce.impl.FijiTableSplit.java

License:Apache License

/**
 * Returns the length of the split.//from w w w.  j  a  v  a  2s  .c o  m
 *
 * This method does not currently examine the data in the region
 * represented by the split. We assume that each split is 3/4 full (where
 * "full" is defined as hbase.hregion.max.filesize). If the region had
 * that many bytes in it, it would split in two, each containing 1/2 that
 * many bytes. So we expect, on average, regions to be halfway between
 * "newly split" and "just about to split."
 *
 * @return the length of the split.
 * @see org.apache.hadoop.mapreduce.InputSplit#getLength()
 */
@Override
public long getLength() {
    if (0 == mSplitSize) {
        // Calculate this value once and memoize its result.
        Configuration conf = new Configuration();
        conf = HBaseConfiguration.addHbaseResources(conf);
        mSplitSize = (conf.getLong("hbase.hregion.max.filesize", 0) * 4) / 3;
        if (0 == mSplitSize) {
            // Set this to some reasonable non-zero default if the HBase properties
            // weren't set correctly.
            mSplitSize = 64 * 1024 * 1024; // 64 MB
        }
    }

    return mSplitSize;
}

From source file:com.moz.fiji.mapreduce.kvstore.TestKeyValueStoreConfiguration.java

License:Apache License

@Test
public void testStoreLong() {
    Configuration parent = new Configuration(false);
    KeyValueStoreConfiguration isolated = KeyValueStoreConfiguration.createInConfiguration(parent, 0);
    isolated.setLong("foo-key", 12345L);
    assertEquals(12345L, isolated.getLong("foo-key", 0L));

    // Check that this value is stored in the namespace on the parent:
    Configuration delegate = isolated.getDelegate();
    assertEquals(12345L, delegate.getLong(KeyValueStoreConfiguration.confKeyAtIndex("foo-key", 0), 0L));
}

From source file:com.mozilla.bagheera.sink.HBaseSink.java

License:Apache License

public HBaseSink(String tableName, String family, String qualifier, boolean prefixDate, int numThreads,
        final int batchSize) {
    this.tableName = Bytes.toBytes(tableName);
    this.family = Bytes.toBytes(family);
    this.qualifier = Bytes.toBytes(qualifier);
    this.prefixDate = prefixDate;
    this.batchSize = batchSize;
    this.currentTimeMillis = System.currentTimeMillis();
    Configuration conf = HBaseConfiguration.create();

    // Use the standard HBase default
    maxKeyValueSize = conf.getLong("hbase.client.keyvalue.maxsize", 10485760l);
    hbasePool = new HTablePool(conf, numThreads);

    stored = Metrics.newMeter(new MetricName("bagheera", "sink.hbase", tableName + ".stored"), "messages",
            TimeUnit.SECONDS);/* ww  w  .j  a v a 2  s .  co  m*/
    storeFailed = Metrics.newMeter(new MetricName("bagheera", "sink.hbase", tableName + ".store.failed"),
            "messages", TimeUnit.SECONDS);
    deleted = Metrics.newMeter(new MetricName("bagheera", "sink.hbase", tableName + ".deleted"), "messages",
            TimeUnit.SECONDS);
    deleteFailed = Metrics.newMeter(new MetricName("bagheera", "sink.hbase", tableName + ".delete.failed"),
            "messages", TimeUnit.SECONDS);
    oversized = Metrics.newMeter(new MetricName("bagheera", "sink.hbase", tableName + ".oversized"), "messages",
            TimeUnit.SECONDS);
    flushTimer = Metrics.newTimer(new MetricName("bagheera", "sink.hbase", tableName + ".flush.time"),
            TimeUnit.MILLISECONDS, TimeUnit.SECONDS);
    htableTimer = Metrics.newTimer(new MetricName("bagheera", "sink.hbase", tableName + ".htable.time"),
            TimeUnit.MILLISECONDS, TimeUnit.SECONDS);
    batchSizeGauge = Metrics.newGauge(new MetricName("bagheera", "sink.hbase", tableName + ".batchsize"),
            new Gauge<Integer>() {
                @Override
                public Integer value() {
                    return batchSize;
                }
            });
}

From source file:com.nearinfinity.honeycomb.hbase.HTableProvider.java

License:Apache License

public HTableProvider(final Configuration configuration) {
    String hTableName = configuration.get(ConfigConstants.TABLE_NAME);
    long writeBufferSize = configuration.getLong(ConfigConstants.WRITE_BUFFER,
            ConfigConstants.DEFAULT_WRITE_BUFFER);
    int poolSize = configuration.getInt(ConfigConstants.TABLE_POOL_SIZE,
            ConfigConstants.DEFAULT_TABLE_POOL_SIZE);
    boolean autoFlush = configuration.getBoolean(ConfigConstants.AUTO_FLUSH,
            ConfigConstants.DEFAULT_AUTO_FLUSH);

    tableName = hTableName;/* w  w w .jav  a 2s . co  m*/
    tablePool = new HTablePool(configuration, poolSize, new HTableFactory(writeBufferSize, autoFlush));
}

From source file:com.netflix.bdp.s3.S3MultipartOutputCommitter.java

License:Apache License

public S3MultipartOutputCommitter(Path outputPath, JobContext context) throws IOException {
    super(outputPath, (TaskAttemptContext) context);
    this.constructorOutputPath = outputPath;

    Configuration conf = context.getConfiguration();

    this.uploadPartSize = conf.getLong(S3Committer.UPLOAD_SIZE, S3Committer.DEFAULT_UPLOAD_SIZE);
    // Spark will use a fake app id based on the current minute and job id 0.
    // To avoid collisions, use the YARN application ID for Spark.
    this.uuid = conf.get(S3Committer.UPLOAD_UUID, conf.get(S3Committer.SPARK_WRITE_UUID,
            conf.get(S3Committer.SPARK_APP_ID, context.getJobID().toString())));

    if (context instanceof TaskAttemptContext) {
        this.workPath = taskAttemptPath((TaskAttemptContext) context, uuid);
    } else {/*from w  w  w .j a va 2s  .c  o m*/
        this.workPath = null;
    }

    this.wrappedCommitter = new FileOutputCommitter(Paths.getMultipartUploadCommitsDirectory(conf, uuid),
            context);
}