Example usage for org.apache.hadoop.conf Configuration getLong

List of usage examples for org.apache.hadoop.conf Configuration getLong

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getLong.

Prototype

public long getLong(String name, long defaultValue) 

Source Link

Document

Get the value of the name property as a long.

Usage

From source file:com.cloudera.impala.security.DelegationTokenManager.java

License:Apache License

protected DelegationTokenManager(Configuration conf, boolean generatesTokens, ZooKeeperSession zkSession)
        throws IOException {
    long secretKeyInterval = conf.getLong(DELEGATION_KEY_UPDATE_INTERVAL_KEY,
            DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
    long tokenMaxLifetime = conf.getLong(DELEGATION_TOKEN_MAX_LIFETIME_KEY,
            DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
    long tokenRenewInterval = conf.getLong(DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
            DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
    long tokenGcInterval = conf.getLong(DELEGATION_TOKEN_GC_INTERVAL, DELEGATION_TOKEN_GC_INTERVAL_DEFAULT);

    generatesTokens_ = generatesTokens;/* w w w. j a v  a  2  s  .c om*/
    if (zkSession != null) {
        ZooKeeperTokenStore store = new ZooKeeperTokenStore(zkSession);
        mgr_ = new PersistedDelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime,
                tokenRenewInterval, tokenGcInterval, store);
    } else {
        mgr_ = new DelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime, tokenRenewInterval,
                tokenGcInterval);
    }
    mgr_.startThreads();
}

From source file:com.cloudera.llama.am.impl.PhasingOutRMConnector.java

License:Apache License

public PhasingOutRMConnector(Configuration conf, ScheduledExecutorService stp,
        RmConnectorCreator newConnectorCreator) throws LlamaException {
    this.conf = conf;
    this.newConnectorCreator = newConnectorCreator;
    this.active = newConnectorCreator.create();

    long interval = conf.getLong(LlamaAM.RM_CONNECTOR_RECYCLE_INTERVAL_KEY,
            LlamaAM.RM_CONNECTOR_RECYCLE_INTERVAL_DEFAULT);
    this.future = stp.scheduleAtFixedRate(this, interval, interval, TimeUnit.MINUTES);
}

From source file:com.cloudera.recordservice.examples.terasort.TeraInputFormat.java

License:Apache License

/**
 * Use the input splits to take samples of the input and generate sample
 * keys. By default reads 100,000 keys from 10 locations in the input, sorts
 * them and picks N-1 keys to generate N equally sized partitions.
 * @param job the job to sample/*from   w  w  w. j ava 2  s  .c om*/
 * @param partFile where to write the output file to
 * @throws Throwable if something goes wrong
 */
public static void writePartitionFile(final JobContext job, Path partFile) throws Throwable {
    long t1 = System.currentTimeMillis();
    Configuration conf = job.getConfiguration();
    final TeraInputFormat inFormat = new TeraInputFormat();
    final TextSampler sampler = new TextSampler();
    int partitions = job.getNumReduceTasks();
    long sampleSize = conf.getLong(SAMPLE_SIZE, 100000);
    final List<InputSplit> splits = inFormat.getSplits(job);
    long t2 = System.currentTimeMillis();
    System.out.println("Computing input splits took " + (t2 - t1) + "ms");
    int samples = Math.min(conf.getInt(NUM_PARTITIONS, 10), splits.size());
    System.out.println("Sampling " + samples + " splits of " + splits.size());
    final long recordsPerSample = sampleSize / samples;
    final int sampleStep = splits.size() / samples;
    Thread[] samplerReader = new Thread[samples];
    SamplerThreadGroup threadGroup = new SamplerThreadGroup("Sampler Reader Thread Group");
    // take N samples from different parts of the input
    for (int i = 0; i < samples; ++i) {
        final int idx = i;
        samplerReader[i] = new Thread(threadGroup, "Sampler Reader " + idx) {
            {
                setDaemon(true);
            }

            @Override
            public void run() {
                long records = 0;
                try {
                    TaskAttemptContext context = new TaskAttemptContextImpl(job.getConfiguration(),
                            new TaskAttemptID());
                    RecordReader<Text, Text> reader = inFormat.createRecordReader(splits.get(sampleStep * idx),
                            context);
                    reader.initialize(splits.get(sampleStep * idx), context);
                    while (reader.nextKeyValue()) {
                        sampler.addKey(new Text(reader.getCurrentKey()));
                        records += 1;
                        if (recordsPerSample <= records) {
                            break;
                        }
                    }
                } catch (IOException ie) {
                    System.err.println(
                            "Got an exception while reading splits " + StringUtils.stringifyException(ie));
                    throw new RuntimeException(ie);
                } catch (InterruptedException e) {

                }
            }
        };
        samplerReader[i].start();
    }
    FileSystem outFs = partFile.getFileSystem(conf);
    DataOutputStream writer = outFs.create(partFile, true, 64 * 1024, (short) 10,
            outFs.getDefaultBlockSize(partFile));
    for (int i = 0; i < samples; i++) {
        try {
            samplerReader[i].join();
            if (threadGroup.getThrowable() != null) {
                throw threadGroup.getThrowable();
            }
        } catch (InterruptedException e) {
        }
    }
    for (Text split : sampler.createPartitions(partitions)) {
        split.write(writer);
    }
    writer.close();
    long t3 = System.currentTimeMillis();
    System.out.println("Computing parititions took " + (t3 - t2) + "ms");
}

From source file:com.cloudera.recordservice.mr.WorkerUtil.java

License:Apache License

/**
 * Creates a builder for RecordService worker client from the configuration and
 * the delegation token.//from ww w  .j a va 2 s. com
 * @param jobConf the hadoop configuration
 * @param delegationToken the delegation token that the worker client should use to
 *                        talk to the RS worker process.
 * @throws IOException
 */
public static Builder getBuilder(Configuration jobConf, DelegationToken delegationToken) {
    // Try to get the delegation token from the credentials. If it is there, use it.
    RecordServiceWorkerClient.Builder builder = new RecordServiceWorkerClient.Builder();
    int fetchSize = jobConf.getInt(ConfVars.FETCH_SIZE_CONF.name, DEFAULT_FETCH_SIZE);
    long memLimit = jobConf.getLong(ConfVars.MEM_LIMIT_CONF.name, -1);
    long limit = jobConf.getLong(ConfVars.RECORDS_LIMIT_CONF.name, -1);
    int maxAttempts = jobConf.getInt(ConfVars.WORKER_RETRY_ATTEMPTS_CONF.name, -1);
    int taskSleepMs = jobConf.getInt(ConfVars.WORKER_RETRY_SLEEP_MS_CONF.name, -1);
    int connectionTimeoutMs = jobConf.getInt(ConfVars.WORKER_CONNECTION_TIMEOUT_MS_CONF.name, -1);
    int rpcTimeoutMs = jobConf.getInt(ConfVars.WORKER_RPC_TIMEOUT_MS_CONF.name, -1);
    boolean enableLogging = jobConf.getBoolean(ConfVars.WORKER_ENABLE_SERVER_LOGGING_CONF.name, false);

    if (fetchSize != -1)
        builder.setFetchSize(fetchSize);
    if (memLimit != -1)
        builder.setMemLimit(memLimit);
    if (limit != -1)
        builder.setLimit(limit);
    if (maxAttempts != -1)
        builder.setMaxAttempts(maxAttempts);
    if (taskSleepMs != -1)
        builder.setSleepDurationMs(taskSleepMs);
    if (connectionTimeoutMs != -1)
        builder.setConnectionTimeoutMs(connectionTimeoutMs);
    if (rpcTimeoutMs != -1)
        builder.setRpcTimeoutMs(rpcTimeoutMs);
    if (enableLogging)
        builder.setLoggingLevel(LOG);
    if (delegationToken != null)
        builder.setDelegationToken(delegationToken);

    return builder;
}

From source file:com.datatorrent.benchmark.memsql.MemsqlInputBenchmarkTest.java

License:Open Source License

@Test
public void testMethod() throws SQLException, IOException {
    Configuration conf = new Configuration();
    InputStream inputStream = new FileInputStream("src/site/conf/dt-site-memsql.xml");
    conf.addResource(inputStream);//from   w w  w .  ja v  a  2  s .com

    MemsqlStore memsqlStore = new MemsqlStore();
    memsqlStore.setDbUrl(conf.get("dt.rootDbUrl"));
    memsqlStore.setConnectionProperties(conf.get(
            "dt.application.MemsqlInputBenchmark.operator.memsqlInputOperator.store.connectionProperties"));

    AbstractMemsqlOutputOperatorTest.memsqlInitializeDatabase(memsqlStore);

    MemsqlOutputOperator outputOperator = new MemsqlOutputOperator();
    outputOperator.getStore()
            .setDbUrl(conf.get("dt.application.MemsqlInputBenchmark.operator.memsqlInputOperator.store.dbUrl"));
    outputOperator.getStore().setConnectionProperties(conf.get(
            "dt.application.MemsqlInputBenchmark.operator.memsqlInputOperator.store.connectionProperties"));
    outputOperator.setBatchSize(BATCH_SIZE);

    Random random = new Random();
    com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributeMap = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap();
    attributeMap.put(OperatorContext.PROCESSING_MODE, ProcessingMode.AT_LEAST_ONCE);
    attributeMap.put(OperatorContext.ACTIVATION_WINDOW_ID, -1L);
    attributeMap.put(DAG.APPLICATION_ID, APP_ID);
    OperatorContextTestHelper.TestIdOperatorContext context = new OperatorContextTestHelper.TestIdOperatorContext(
            OPERATOR_ID, attributeMap);

    long seedSize = conf.getLong("dt.seedSize", SEED_SIZE);

    outputOperator.setup(context);
    outputOperator.beginWindow(0);

    for (long valueCounter = 0; valueCounter < seedSize; valueCounter++) {
        outputOperator.input.put(random.nextInt());
    }

    outputOperator.endWindow();
    outputOperator.teardown();

    MemsqlInputBenchmark app = new MemsqlInputBenchmark();
    LocalMode lm = LocalMode.newInstance();

    try {
        lm.prepareDAG(app, conf);
        LocalMode.Controller lc = lm.getController();
        lc.run(20000);
    } catch (Exception ex) {
        DTThrowable.rethrow(ex);
    }

    IOUtils.closeQuietly(inputStream);
}

From source file:com.elex.dmp.lda.CachingCVB0Mapper.java

License:Apache License

@Override
protected void setup(Context context) throws IOException, InterruptedException {
    log.info("Retrieving configuration");
    Configuration conf = context.getConfiguration();
    float eta = conf.getFloat(CVB0Driver.TERM_TOPIC_SMOOTHING, Float.NaN);
    float alpha = conf.getFloat(CVB0Driver.DOC_TOPIC_SMOOTHING, Float.NaN);
    long seed = conf.getLong(CVB0Driver.RANDOM_SEED, 1234L);
    numTopics = conf.getInt(CVB0Driver.NUM_TOPICS, -1);
    int numTerms = conf.getInt(CVB0Driver.NUM_TERMS, -1);
    int numUpdateThreads = conf.getInt(CVB0Driver.NUM_UPDATE_THREADS, 1);
    int numTrainThreads = conf.getInt(CVB0Driver.NUM_TRAIN_THREADS, 4);
    maxIters = conf.getInt(CVB0Driver.MAX_ITERATIONS_PER_DOC, 10);
    float modelWeight = conf.getFloat(CVB0Driver.MODEL_WEIGHT, 1.0f);

    log.info("Initializing read model");
    TopicModel readModel;//from ww  w.j a  v a  2  s . co  m
    Path[] modelPaths = CVB0Driver.getModelPaths(conf);
    if (modelPaths != null && modelPaths.length > 0) {
        readModel = new TopicModel(conf, eta, alpha, null, numUpdateThreads, modelWeight, modelPaths);
    } else {
        log.info("No model files found");
        readModel = new TopicModel(numTopics, numTerms, eta, alpha, RandomUtils.getRandom(seed), null,
                numTrainThreads, modelWeight);
    }

    log.info("Initializing write model");
    TopicModel writeModel = modelWeight == 1
            ? new TopicModel(numTopics, numTerms, eta, alpha, null, numUpdateThreads)
            : readModel;

    log.info("Initializing model trainer");
    modelTrainer = new ModelTrainer(readModel, writeModel, numTrainThreads, numTopics, numTerms);
    modelTrainer.start();
}

From source file:com.elex.dmp.lda.CachingCVB0PerplexityMapper.java

License:Apache License

@Override
protected void setup(Context context) throws IOException, InterruptedException {
    MemoryUtil.startMemoryLogger(5000);/*from   w  ww .jav a 2  s.  com*/

    log.info("Retrieving configuration");
    Configuration conf = context.getConfiguration();
    float eta = conf.getFloat(CVB0Driver.TERM_TOPIC_SMOOTHING, Float.NaN);
    float alpha = conf.getFloat(CVB0Driver.DOC_TOPIC_SMOOTHING, Float.NaN);
    long seed = conf.getLong(CVB0Driver.RANDOM_SEED, 1234L);
    random = RandomUtils.getRandom(seed);
    numTopics = conf.getInt(CVB0Driver.NUM_TOPICS, -1);
    int numTerms = conf.getInt(CVB0Driver.NUM_TERMS, -1);
    int numUpdateThreads = conf.getInt(CVB0Driver.NUM_UPDATE_THREADS, 1);
    int numTrainThreads = conf.getInt(CVB0Driver.NUM_TRAIN_THREADS, 4);
    maxIters = conf.getInt(CVB0Driver.MAX_ITERATIONS_PER_DOC, 10);
    float modelWeight = conf.getFloat(CVB0Driver.MODEL_WEIGHT, 1.0f);
    testFraction = conf.getFloat(CVB0Driver.TEST_SET_FRACTION, 0.1f);

    log.info("Initializing read model");
    TopicModel readModel;
    Path[] modelPaths = CVB0Driver.getModelPaths(conf);
    if (modelPaths != null && modelPaths.length > 0) {
        readModel = new TopicModel(conf, eta, alpha, null, numUpdateThreads, modelWeight, modelPaths);
    } else {
        log.info("No model files found");
        readModel = new TopicModel(numTopics, numTerms, eta, alpha, RandomUtils.getRandom(seed), null,
                numTrainThreads, modelWeight);
    }

    log.info("Initializing model trainer");
    modelTrainer = new ModelTrainer(readModel, null, numTrainThreads, numTopics, numTerms);

    log.info("Initializing topic vector");
    topicVector = new DenseVector(new double[numTopics]);
}

From source file:com.facebook.hive.orc.OrcConf.java

License:Open Source License

public static long getLongVar(Configuration conf, ConfVars var) {
    return conf.getLong(var.varname, var.defaultLongVal);
}

From source file:com.facebook.hiveio.conf.LongConfOption.java

License:Apache License

/**
 * Lookup value/*from  ww  w .  ja  v a 2s  .com*/
 * @param conf Configuration
 * @return value set for key, or defaultValue
 */
public long get(Configuration conf) {
    return conf.getLong(getKey(), defaultValue);
}

From source file:com.facebook.presto.hive.s3.PrestoS3FileSystem.java

License:Apache License

@Override
public void initialize(URI uri, Configuration conf) throws IOException {
    requireNonNull(uri, "uri is null");
    requireNonNull(conf, "conf is null");
    super.initialize(uri, conf);
    setConf(conf);/*  ww  w  .  j a v a  2s .c om*/

    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR));

    HiveS3Config defaults = new HiveS3Config();
    this.stagingDirectory = new File(
            conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
    this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1;
    this.maxBackoffTime = Duration
            .valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString()));
    this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString()));
    int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration
            .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
    Duration socketTimeout = Duration
            .valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
    int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections());
    long minFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE, defaults.getS3MultipartMinFileSize().toBytes());
    long minPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE, defaults.getS3MultipartMinPartSize().toBytes());
    this.isPathStyleAccess = conf.getBoolean(S3_PATH_STYLE_ACCESS, defaults.isS3PathStyleAccess());
    this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS,
            defaults.isS3UseInstanceCredentials());
    this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION,
            defaults.isPinS3ClientToCurrentRegion());
    this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled());
    this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name()));
    this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId());
    String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix());

    ClientConfiguration configuration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries)
            .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
            .withConnectionTimeout(toIntExact(connectTimeout.toMillis()))
            .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections)
            .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(S3_USER_AGENT_SUFFIX);

    this.s3 = createAmazonS3Client(uri, conf, configuration);

    transferConfig.setMultipartUploadThreshold(minFileSize);
    transferConfig.setMinimumUploadPartSize(minPartSize);
}