Example usage for org.apache.hadoop.conf Configuration getInt

List of usage examples for org.apache.hadoop.conf Configuration getInt

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getInt.

Prototype

public int getInt(String name, int defaultValue) 

Source Link

Document

Get the value of the name property as an int.

Usage

From source file:com.google.cloud.bigtable.hbase.BigtableOptionsFactory.java

License:Open Source License

public static BigtableOptions fromConfiguration(final Configuration configuration) throws IOException {

    BigtableOptions.Builder bigtableOptionsBuilder = new BigtableOptions.Builder();

    bigtableOptionsBuilder.setProjectId(getValue(configuration, PROJECT_ID_KEY, "Project ID"));
    bigtableOptionsBuilder.setZoneId(getValue(configuration, ZONE_KEY, "Zone"));
    bigtableOptionsBuilder.setClusterId(getValue(configuration, CLUSTER_KEY, "Cluster"));

    bigtableOptionsBuilder/*w  w w.  j  a va2 s .  co  m*/
            .setDataHost(getHost(configuration, BIGTABLE_HOST_KEY, BIGTABLE_DATA_HOST_DEFAULT, "API Data"));

    bigtableOptionsBuilder.setTableAdminHost(getHost(configuration, BIGTABLE_TABLE_ADMIN_HOST_KEY,
            BIGTABLE_TABLE_ADMIN_HOST_DEFAULT, "Table Admin"));

    bigtableOptionsBuilder.setClusterAdminHost(getHost(configuration, BIGTABLE_CLUSTER_ADMIN_HOST_KEY,
            BIGTABLE_CLUSTER_ADMIN_HOST_DEFAULT, "Cluster Admin"));

    int port = configuration.getInt(BIGTABLE_PORT_KEY, BIGTABLE_PORT_DEFAULT);
    bigtableOptionsBuilder.setPort(port);
    setChannelOptions(bigtableOptionsBuilder, configuration);

    int asyncMutatorCount = configuration.getInt(BIGTABLE_ASYNC_MUTATOR_COUNT_KEY,
            BIGTABLE_ASYNC_MUTATOR_COUNT_DEFAULT);
    bigtableOptionsBuilder.setAsyncMutatorWorkerCount(asyncMutatorCount);

    bigtableOptionsBuilder.setUseBulkApi(configuration.getBoolean(BIGTABLE_USE_BULK_API, true));
    bigtableOptionsBuilder.setBulkMaxRowKeyCount(configuration.getInt(BIGTABLE_BULK_MAX_ROW_KEY_COUNT,
            BigtableOptions.BIGTABLE_BULK_MAX_ROW_KEY_COUNT_DEFAULT));
    bigtableOptionsBuilder.setBulkMaxRequestSize(configuration.getLong(BIGTABLE_BULK_MAX_REQUEST_SIZE_BYTES,
            BigtableOptions.BIGTABLE_BULK_MAX_REQUEST_SIZE_BYTES_DEFAULT));
    bigtableOptionsBuilder
            .setUsePlaintextNegotiation(configuration.getBoolean(BIGTABLE_USE_PLAINTEXT_NEGOTIATION, false));

    return bigtableOptionsBuilder.build();
}

From source file:com.google.cloud.bigtable.hbase.BigtableOptionsFactory.java

License:Open Source License

private static void setChannelOptions(BigtableOptions.Builder builder, Configuration configuration)
        throws IOException {
    setCredentialOptions(builder, configuration);

    builder.setRetryOptions(createRetryOptions(configuration));

    int channelCount = configuration.getInt(BIGTABLE_DATA_CHANNEL_COUNT_KEY,
            BigtableOptions.BIGTABLE_DATA_CHANNEL_COUNT_DEFAULT);
    builder.setDataChannelCount(channelCount);

    int channelTimeout = configuration.getInt(BIGTABLE_CHANNEL_TIMEOUT_MS_KEY,
            BigtableOptions.BIGTABLE_CHANNEL_TIMEOUT_MS_DEFAULT);

    // Connection refresh takes a couple of seconds. 1 minute is the bare minimum that this should
    // be allowed to be set at.
    Preconditions.checkArgument(channelTimeout == 0 || channelTimeout >= 60000,
            BIGTABLE_CHANNEL_TIMEOUT_MS_KEY + " has to be 0 (no timeout) or 1 minute+ (60000)");
    builder.setTimeoutMs(channelTimeout);

    builder.setUserAgent(BigtableConstants.USER_AGENT);
}

From source file:com.google.cloud.bigtable.hbase.BigtableOptionsFactory.java

License:Open Source License

private static RetryOptions createRetryOptions(Configuration configuration) {
    RetryOptions.Builder retryOptionsBuilder = new RetryOptions.Builder();
    boolean enableRetries = configuration.getBoolean(ENABLE_GRPC_RETRIES_KEY,
            RetryOptions.DEFAULT_ENABLE_GRPC_RETRIES);
    LOG.debug("gRPC retries enabled: %s", enableRetries);
    retryOptionsBuilder.setEnableRetries(enableRetries);

    String retryCodes = configuration.get(ADDITIONAL_RETRY_CODES, "");
    String codes[] = retryCodes.split(",");
    for (String stringCode : codes) {
        String trimmed = stringCode.trim();
        if (trimmed.isEmpty()) {
            continue;
        }/*from   w ww .j  a  v  a 2  s .co m*/
        Status.Code code = Status.Code.valueOf(trimmed);
        Preconditions.checkArgument(code != null, "Code " + stringCode + " not found.");
        LOG.debug("gRPC retry on: %s", stringCode);
        retryOptionsBuilder.addStatusToRetryOn(code);
    }

    boolean retryOnDeadlineExceeded = configuration.getBoolean(ENABLE_GRPC_RETRY_DEADLINEEXCEEDED_KEY, true);
    LOG.debug("gRPC retry on deadline exceeded enabled: %s", retryOnDeadlineExceeded);
    retryOptionsBuilder.setRetryOnDeadlineExceeded(retryOnDeadlineExceeded);

    int maxElapsedBackoffMillis = configuration.getInt(MAX_ELAPSED_BACKOFF_MILLIS_KEY,
            RetryOptions.DEFAULT_MAX_ELAPSED_BACKOFF_MILLIS);
    LOG.debug("gRPC retry maxElapsedBackoffMillis: %d", maxElapsedBackoffMillis);
    retryOptionsBuilder.setMaxElapsedBackoffMillis(maxElapsedBackoffMillis);

    int readPartialRowTimeoutMillis = configuration.getInt(READ_PARTIAL_ROW_TIMEOUT_MS,
            RetryOptions.DEFAULT_READ_PARTIAL_ROW_TIMEOUT_MS);
    LOG.debug("gRPC read partial row timeout (millis): %d", readPartialRowTimeoutMillis);
    retryOptionsBuilder.setReadPartialRowTimeoutMillis(readPartialRowTimeoutMillis);

    int streamingBufferSize = configuration.getInt(READ_BUFFER_SIZE,
            RetryOptions.DEFAULT_STREAMING_BUFFER_SIZE);
    LOG.debug("gRPC read buffer size (count): %d", streamingBufferSize);
    retryOptionsBuilder.setStreamingBufferSize(streamingBufferSize);

    int streamingBatchSize = configuration.getInt(READ_BATCH_SIZE, RetryOptions.DEFAULT_STREAMING_BATCH_SIZE);
    LOG.debug("gRPC read batch size (count): %d", streamingBatchSize);
    retryOptionsBuilder.setStreamingBatchSize(streamingBatchSize);

    int maxScanTimeoutRetries = configuration.getInt(MAX_SCAN_TIMEOUT_RETRIES,
            RetryOptions.DEFAULT_MAX_SCAN_TIMEOUT_RETRIES);
    LOG.debug("gRPC max scan timeout retries (count): %d", maxScanTimeoutRetries);
    retryOptionsBuilder.setMaxScanTimeoutRetries(maxScanTimeoutRetries);

    return retryOptionsBuilder.build();
}

From source file:com.google.cloud.bigtable.mapreduce.Export.java

License:Apache License

private static Scan getConfiguredScanForJob(Configuration conf, String[] args) throws IOException {
    Scan s = new Scan();
    // Optional arguments.
    // Set Scan Versions
    int versions = args.length > 2 ? Integer.parseInt(args[2]) : 1;
    s.setMaxVersions(versions);/*from  w w w . j  a v  a2  s.  c o m*/
    // Set Scan Range
    long startTime = args.length > 3 ? Long.parseLong(args[3]) : 0L;
    long endTime = args.length > 4 ? Long.parseLong(args[4]) : Long.MAX_VALUE;
    s.setTimeRange(startTime, endTime);
    // Set cache blocks
    s.setCacheBlocks(false);
    // set Start and Stop row
    if (conf.get(TableInputFormat.SCAN_ROW_START) != null) {
        s.setStartRow(Bytes.toBytes(conf.get(TableInputFormat.SCAN_ROW_START)));
    }
    if (conf.get(TableInputFormat.SCAN_ROW_STOP) != null) {
        s.setStopRow(Bytes.toBytes(conf.get(TableInputFormat.SCAN_ROW_STOP)));
    }
    // Set Scan Column Family
    boolean raw = Boolean.parseBoolean(conf.get(RAW_SCAN));
    if (raw) {
        s.setRaw(raw);
    }

    if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
        s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
    }
    // Add additional comma-separated families
    for (String family : conf.getStrings(SCAN_COLUMN_FAMILIES, new String[0])) {
        s.addFamily(Bytes.toBytes(family));
    }
    // Set RowFilter or Prefix Filter if applicable.
    Filter exportFilter = getExportFilter(args);
    if (exportFilter != null) {
        LOG.info("Setting Scan Filter for Export.");
        s.setFilter(exportFilter);
    }

    int batching = conf.getInt(EXPORT_BATCHING, -1);
    if (batching != -1) {
        try {
            s.setBatch(batching);
        } catch (IncompatibleFilterException e) {
            LOG.error("Batching could not be set", e);
        }
    }
    LOG.info("versions=" + versions + ", starttime=" + startTime + ", endtime=" + endTime
            + ", keepDeletedCells=" + raw);
    return s;
}

From source file:com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.java

License:Open Source License

/**
 * Configures GHFS using the supplied configuration.
 *
 * @param config Hadoop configuration object.
 *///from   w ww .ja v  a  2 s  .c om
private synchronized void configure(Configuration config) throws IOException {
    LOG.debug("GHFS.configure");
    LOG.debug("GHFS_ID = {}", GHFS_ID);

    if (gcsfs == null) {

        copyDeprecatedConfigurationOptions(config);

        Credential credential;
        try {
            credential = HadoopCredentialConfiguration.newBuilder().withConfiguration(config)
                    .withOverridePrefix(AUTHENTICATION_PREFIX).build()
                    .getCredential(CredentialFactory.GCS_SCOPES);
        } catch (GeneralSecurityException gse) {
            throw new IOException(gse);
        }

        GoogleCloudStorageFileSystemOptions.Builder optionsBuilder = createOptionsBuilderFromConfig(config);

        PathCodec pathCodec;
        String specifiedPathCodec = config.get(PATH_CODEC_KEY, PATH_CODEC_DEFAULT).toLowerCase();
        LOG.debug("{} = {}", PATH_CODEC_KEY, specifiedPathCodec);
        if (specifiedPathCodec.equals(PATH_CODEC_USE_LEGACY_ENCODING)) {
            pathCodec = GoogleCloudStorageFileSystem.LEGACY_PATH_CODEC;
        } else if (specifiedPathCodec.equals(PATH_CODEC_USE_URI_ENCODING)) {
            pathCodec = GoogleCloudStorageFileSystem.URI_ENCODED_PATH_CODEC;
        } else {
            pathCodec = GoogleCloudStorageFileSystem.LEGACY_PATH_CODEC;
            LOG.warn("Unknwon path codec specified {}. Using default / legacy.", specifiedPathCodec);
        }
        optionsBuilder.setPathCodec(pathCodec);
        gcsfs = new GoogleCloudStorageFileSystem(credential, optionsBuilder.build());
    }

    bufferSizeOverride = config.getInt(BUFFERSIZE_KEY, BUFFERSIZE_DEFAULT);
    LOG.debug("{} = {}", BUFFERSIZE_KEY, bufferSizeOverride);

    defaultBlockSize = config.getLong(BLOCK_SIZE_KEY, BLOCK_SIZE_DEFAULT);
    LOG.debug("{} = {}", BLOCK_SIZE_KEY, defaultBlockSize);

    String systemBucketName = config.get(GCS_SYSTEM_BUCKET_KEY, null);
    LOG.debug("{} = {}", GCS_SYSTEM_BUCKET_KEY, systemBucketName);

    boolean createSystemBucket = config.getBoolean(GCS_CREATE_SYSTEM_BUCKET_KEY,
            GCS_CREATE_SYSTEM_BUCKET_DEFAULT);
    LOG.debug("{} = {}", GCS_CREATE_SYSTEM_BUCKET_KEY, createSystemBucket);

    reportedPermissions = new FsPermission(
            config.get(PERMISSIONS_TO_REPORT_KEY, PERMISSIONS_TO_REPORT_DEFAULT));
    LOG.debug("{} = {}", PERMISSIONS_TO_REPORT_KEY, reportedPermissions);

    configureBuckets(systemBucketName, createSystemBucket);

    // Set initial working directory to root so that any configured value gets resolved
    // against file system root.
    workingDirectory = getFileSystemRoot();

    Path newWorkingDirectory;
    String configWorkingDirectory = config.get(GCS_WORKING_DIRECTORY_KEY);
    if (Strings.isNullOrEmpty(configWorkingDirectory)) {
        newWorkingDirectory = getDefaultWorkingDirectory();
        LOG.warn("No working directory configured, using default: '{}'", newWorkingDirectory);
    } else {
        newWorkingDirectory = new Path(configWorkingDirectory);
    }

    // Use the public method to ensure proper behavior of normalizing and resolving the new
    // working directory relative to the initial filesystem-root directory.
    setWorkingDirectory(newWorkingDirectory);
    LOG.debug("{} = {}", GCS_WORKING_DIRECTORY_KEY, getWorkingDirectory());

    // Set this configuration as the default config for this instance.
    setConf(config);

    LOG.debug("GHFS.configure: done");
}

From source file:com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.java

License:Open Source License

@VisibleForTesting
GoogleCloudStorageFileSystemOptions.Builder createOptionsBuilderFromConfig(Configuration config)
        throws IOException {
    GoogleCloudStorageFileSystemOptions.Builder optionsBuilder = GoogleCloudStorageFileSystemOptions
            .newBuilder();/*from  ww  w. j  a  v  a2s  .  com*/

    boolean enableMetadataCache = config.getBoolean(GCS_ENABLE_METADATA_CACHE_KEY,
            GCS_ENABLE_METADATA_CACHE_DEFAULT);
    LOG.debug("{} = {}", GCS_ENABLE_METADATA_CACHE_KEY, enableMetadataCache);
    optionsBuilder.setIsMetadataCacheEnabled(enableMetadataCache);

    boolean enableBucketDelete = config.getBoolean(GCE_BUCKET_DELETE_ENABLE_KEY,
            GCE_BUCKET_DELETE_ENABLE_DEFAULT);
    LOG.debug("{} = {}", GCE_BUCKET_DELETE_ENABLE_KEY, enableBucketDelete);
    optionsBuilder.setEnableBucketDelete(enableBucketDelete);

    DirectoryListCache.Type cacheType = DirectoryListCache.Type
            .valueOf(config.get(GCS_METADATA_CACHE_TYPE_KEY, GCS_METADATA_CACHE_TYPE_DEFAULT));
    LOG.debug("{} = {}", GCS_METADATA_CACHE_TYPE_KEY, cacheType);
    optionsBuilder.setCacheType(cacheType);

    String cacheBasePath = config.get(GCS_METADATA_CACHE_DIRECTORY_KEY, GCS_METADATA_CACHE_DIRECTORY_DEFAULT);
    LOG.debug("{} = {}", GCS_METADATA_CACHE_DIRECTORY_KEY, cacheBasePath);
    optionsBuilder.setCacheBasePath(cacheBasePath);

    long cacheMaxEntryAgeMillis = config.getLong(GCS_METADATA_CACHE_MAX_ENTRY_AGE_KEY,
            GCS_METADATA_CACHE_MAX_ENTRY_AGE_DEFAULT);
    LOG.debug("{} = {}", GCS_METADATA_CACHE_MAX_ENTRY_AGE_KEY, cacheMaxEntryAgeMillis);
    optionsBuilder.setCacheMaxEntryAgeMillis(cacheMaxEntryAgeMillis);

    long cacheMaxInfoAgeMillis = config.getLong(GCS_METADATA_CACHE_MAX_INFO_AGE_KEY,
            GCS_METADATA_CACHE_MAX_INFO_AGE_DEFAULT);
    LOG.debug("{} = {}", GCS_METADATA_CACHE_MAX_INFO_AGE_KEY, cacheMaxInfoAgeMillis);
    optionsBuilder.setCacheMaxInfoAgeMillis(cacheMaxInfoAgeMillis);

    GoogleCloudStorageFileSystemOptions.TimestampUpdatePredicate updatePredicate = ParentTimestampUpdateIncludePredicate
            .create(config);
    optionsBuilder.setShouldIncludeInTimestampUpdatesPredicate(updatePredicate);

    enableAutoRepairImplicitDirectories = config.getBoolean(GCS_ENABLE_REPAIR_IMPLICIT_DIRECTORIES_KEY,
            GCS_ENABLE_REPAIR_IMPLICIT_DIRECTORIES_DEFAULT);
    LOG.debug("{} = {}", GCS_ENABLE_REPAIR_IMPLICIT_DIRECTORIES_KEY, enableAutoRepairImplicitDirectories);

    enableInferImplicitDirectories = config.getBoolean(GCS_ENABLE_INFER_IMPLICIT_DIRECTORIES_KEY,
            GCS_ENABLE_INFER_IMPLICIT_DIRECTORIES_DEFAULT);
    LOG.debug("{} = {}", GCS_ENABLE_INFER_IMPLICIT_DIRECTORIES_KEY, enableInferImplicitDirectories);

    enableFlatGlob = config.getBoolean(GCS_ENABLE_FLAT_GLOB_KEY, GCS_ENABLE_FLAT_GLOB_DEFAULT);
    LOG.debug("{} = {}", GCS_ENABLE_FLAT_GLOB_KEY, enableFlatGlob);

    optionsBuilder.getCloudStorageOptionsBuilder()
            .setAutoRepairImplicitDirectoriesEnabled(enableAutoRepairImplicitDirectories)
            .setInferImplicitDirectoriesEnabled(enableInferImplicitDirectories);

    boolean enableMarkerFileCreation = config.getBoolean(GCS_ENABLE_MARKER_FILE_CREATION_KEY,
            GCS_ENABLE_MARKER_FILE_CREATION_DEFAULT);
    LOG.debug("{} = {}", GCS_ENABLE_MARKER_FILE_CREATION_KEY, enableMarkerFileCreation);

    optionsBuilder.getCloudStorageOptionsBuilder().setCreateMarkerObjects(enableMarkerFileCreation);

    String transportTypeString = config.get(GCS_HTTP_TRANSPORT_KEY, GCS_HTTP_TRANSPORT_DEFAULT);
    String proxyAddress = config.get(GCS_PROXY_ADDRESS_KEY, GCS_PROXY_ADDRESS_DEFAULT);
    HttpTransportFactory.HttpTransportType transportType = HttpTransportFactory
            .getTransportTypeOf(transportTypeString);

    optionsBuilder.getCloudStorageOptionsBuilder().setTransportType(transportType)
            .setProxyAddress(proxyAddress);

    String projectId = ConfigurationUtil.getMandatoryConfig(config, GCS_PROJECT_ID_KEY);

    optionsBuilder.getCloudStorageOptionsBuilder().setProjectId(projectId);

    long maxListItemsPerCall = config.getLong(GCS_MAX_LIST_ITEMS_PER_CALL, GCS_MAX_LIST_ITEMS_PER_CALL_DEFAULT);

    optionsBuilder.getCloudStorageOptionsBuilder().setMaxListItemsPerCall(maxListItemsPerCall);

    // Configuration for setting 250GB upper limit on file size to gain higher write throughput.
    boolean limitFileSizeTo250Gb = config.getBoolean(GCS_FILE_SIZE_LIMIT_250GB,
            GCS_FILE_SIZE_LIMIT_250GB_DEFAULT);

    optionsBuilder.getCloudStorageOptionsBuilder().getWriteChannelOptionsBuilder()
            .setFileSizeLimitedTo250Gb(limitFileSizeTo250Gb);

    // Configuration for setting GoogleCloudStorageWriteChannel upload buffer size.
    int uploadBufferSize = config.getInt(WRITE_BUFFERSIZE_KEY, WRITE_BUFFERSIZE_DEFAULT);
    LOG.debug("{} = {}", WRITE_BUFFERSIZE_KEY, uploadBufferSize);

    optionsBuilder.getCloudStorageOptionsBuilder().getWriteChannelOptionsBuilder()
            .setUploadBufferSize(uploadBufferSize);

    String applicationNameSuffix = config.get(GCS_APPLICATION_NAME_SUFFIX_KEY,
            GCS_APPLICATION_NAME_SUFFIX_DEFAULT);
    LOG.debug("{} = {}", GCS_APPLICATION_NAME_SUFFIX_KEY, applicationNameSuffix);

    String applicationName = GHFS_ID;
    if (!Strings.isNullOrEmpty(applicationNameSuffix)) {
        applicationName = applicationName + applicationNameSuffix;
    }

    LOG.debug("Setting GCS application name to {}", applicationName);
    optionsBuilder.getCloudStorageOptionsBuilder().setAppName(applicationName);

    boolean enablePerformanceCache = config.getBoolean(GCS_ENABLE_PERFORMANCE_CACHE_KEY,
            GCS_ENABLE_PERFORMANCE_CACHE_DEFAULT);
    LOG.debug("{} = {}", GCS_ENABLE_PERFORMANCE_CACHE_KEY, enablePerformanceCache);
    optionsBuilder.setIsPerformanceCacheEnabled(enablePerformanceCache);

    long performanceCacheMaxEntryAgeMillis = config.getLong(GCS_PERFORMANCE_CACHE_MAX_ENTRY_AGE_MILLIS_KEY,
            GCS_PERFORMANCE_CACHE_MAX_ENTRY_AGE_MILLIS_DEFAULT);
    LOG.debug("{} = {}", GCS_PERFORMANCE_CACHE_MAX_ENTRY_AGE_MILLIS_KEY, performanceCacheMaxEntryAgeMillis);

    boolean listCachingEnabled = config.getBoolean(GCS_PERFORMANCE_CACHE_LIST_CACHING_ENABLE_KEY,
            GCS_PERFORMANCE_CACHE_LIST_CACHING_ENABLE_DEFAULT);
    LOG.debug("{} = {}", GCS_PERFORMANCE_CACHE_LIST_CACHING_ENABLE_KEY, listCachingEnabled);
    optionsBuilder.getPerformanceCachingOptionsBuilder().setMaxEntryAgeMillis(performanceCacheMaxEntryAgeMillis)
            .setInferImplicitDirectoriesEnabled(enableInferImplicitDirectories)
            .setListCachingEnabled(listCachingEnabled);

    return optionsBuilder;
}

From source file:com.hadoop.mapreduce.FourMcLineRecordReader.java

License:BSD License

@Override
public void initialize(InputSplit genericSplit, TaskAttemptContext context)
        throws IOException, InterruptedException {
    FileSplit split = (FileSplit) genericSplit;
    start = split.getStart();/*from  w w  w .  j  a  v a  2 s. c o m*/
    end = start + split.getLength();
    final Path file = split.getPath();
    Configuration job = HadoopUtils.getConfiguration(context);
    maxLineLen = job.getInt(MAX_LINE_LEN_CONF, Integer.MAX_VALUE);

    FileSystem fs = file.getFileSystem(job);
    CompressionCodecFactory compressionCodecs = new CompressionCodecFactory(job);
    final CompressionCodec codec = compressionCodecs.getCodec(file);
    if (codec == null) {
        throw new IOException("Codec for file " + file + " not found, cannot run");
    }

    // open the file and seek to the start of the split
    fileIn = fs.open(split.getPath());

    // creates input stream and also reads the file header
    in = new LineReader(codec.createInputStream(fileIn), job);

    if (start != 0) {
        fileIn.seek(start);

        // read and ignore the first line
        in.readLine(new Text());
        start = fileIn.getPos();
    }

    this.pos = start;
}

From source file:com.hive_unit.HiveServicePing.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    Configuration conf = getConf();
    for (String arg : args) {
        if (arg.contains("=")) {
            String vname = arg.substring(0, arg.indexOf('='));
            String vval = arg.substring(arg.indexOf('=') + 1);
            conf.set(vname, vval.replace("\"", ""));
        }/*from   ww w . ja v a  2s  .  c om*/
    }
    System.out.println(conf.get("service.host"));
    System.out.println(conf.get("service.port"));
    ServiceHive sh = new ServiceHive(conf.get("service.host"), conf.getInt("service.port", 10000));
    List<String> tables = sh.client.get_all_tables("default");
    for (String table : tables) {
        System.out.println(table);
    }
    return 0;
}

From source file:com.hortonworks.hbase.replication.bridge.HBaseClient.java

License:Apache License

/**
 * Get the ping interval from configuration;
 * If not set in the configuration, return the default value.
 *
 * @param conf Configuration/*w w w. ja va 2s  . c  o  m*/
 * @return the ping interval
 */
static int getPingInterval(Configuration conf) {
    return conf.getInt(PING_INTERVAL_NAME, DEFAULT_PING_INTERVAL);
}

From source file:com.hortonworks.hbase.replication.bridge.HBaseClient.java

License:Apache License

/**
 * @return the socket timeout/*from  w  w  w.j  a  v a  2  s  .  c  o m*/
 */
static int getSocketTimeout(Configuration conf) {
    return conf.getInt(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT);
}