Example usage for org.apache.hadoop.conf Configuration getInt

List of usage examples for org.apache.hadoop.conf Configuration getInt

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getInt.

Prototype

public int getInt(String name, int defaultValue) 

Source Link

Document

Get the value of the name property as an int.

Usage

From source file:com.scaleoutsoftware.soss.hserver.GridInputFormat.java

License:Apache License

/**
 * Gets the number of input splits. First, tries the corresponding property,
 * then falls back to the number of available slots.
 *
 * @param context job context/* w  w w.  j av a  2  s . c o m*/
 * @return number of input splits
 */
private int getSuggestedNumberOfSplits(JobContext context) throws IOException {
    int numberOfSplits;
    Configuration conf = context.getConfiguration();
    numberOfSplits = conf.getInt(inputNumberOfSplitsProperty, -1);
    if (numberOfSplits > 0)
        return numberOfSplits;
    if (HServerParameters.isHServerJob(context.getConfiguration())) { //We are running a hServer job, not a Hadoop job
        return HSERVER_JOB_DEFAULT_NUMBER_OF_SPLITS;
    }
    try {
        ClusterStatus status = (new JobClient((JobConf) context.getConfiguration())).getClusterStatus();
        numberOfSplits = status.getMaxMapTasks() - status.getMapTasks();
        if (numberOfSplits > 0)
            return numberOfSplits;
    } catch (Throwable t) {
        //Do nothing, will fall back to default;
    }
    return DEFAULT_NUMBER_OF_SPLITS;
}

From source file:com.scaleoutsoftware.soss.hserver.GridOutputFormat.java

License:Apache License

@Override
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext taskAttemptContext)
        throws IOException, InterruptedException {

    Configuration configuration = taskAttemptContext.getConfiguration();

    if (configuration.getBoolean(outputIsNamedMapProperty, false)) { //This is a NamedMap
        String mapName = configuration.get(outputNamedMapProperty);
        Class<CustomSerializer<K>> keySerializerClass = (Class<CustomSerializer<K>>) configuration
                .getClass(outputNamedMapKeySerializerProperty, null);
        Class<CustomSerializer<V>> valueSerializerClass = (Class<CustomSerializer<V>>) configuration
                .getClass(outputNamedMapValueSerializerProperty, null);
        int smOrdinal = configuration.getInt(SERIALIZATION_MODE, SerializationMode.DEFAULT.ordinal());
        int amOrdinal = configuration.getInt(AVAILABILITY_MODE, AvailabilityMode.USE_REPLICAS.ordinal());
        SerializationMode serializationMode = SerializationMode.values()[smOrdinal];
        AvailabilityMode availabilityMode = AvailabilityMode.values()[amOrdinal];

        if (mapName == null || mapName.length() == 0 || keySerializerClass == null
                || valueSerializerClass == null) {
            throw new IOException("Input format is not configured with a valid NamedMap.");
        }//from ww w  .  j  a v a 2s.  c o  m

        CustomSerializer<K> keySerializer = ReflectionUtils.newInstance(keySerializerClass, configuration);
        keySerializer.setObjectClass((Class<K>) configuration.getClass(outputNamedMapKeyProperty, null));
        CustomSerializer<V> valueSerializer = ReflectionUtils.newInstance(valueSerializerClass, configuration);
        valueSerializer.setObjectClass((Class<V>) configuration.getClass(outputNamedMapValueProperty, null));
        NamedMap<K, V> namedMap = NamedMapFactory.getMap(mapName, keySerializer, valueSerializer);
        namedMap.setSerializationMode(serializationMode);
        namedMap.setAvailabilityMode(availabilityMode);
        return new NamedMapRecordWriter<K, V>(namedMap);
    } else { //This is a NamedCache
        String cacheName = configuration.get(outputNamedCacheProperty);
        if (cacheName == null || cacheName.length() == 0)
            throw new IOException("Output NamedCache not specified.");

        NamedCache cache;

        try {
            cache = CacheFactory.getCache(cacheName);
        } catch (NamedCacheException e) {
            throw new IOException("Cannot initialize NamedCache.", e);
        }

        Class valueClass = taskAttemptContext.getOutputValueClass();
        if (Writable.class.isAssignableFrom(valueClass)) {
            cache.setCustomSerialization(new WritableSerializer(valueClass));
        }

        return new NamedCacheRecordWriter<K, V>(cache);
    }
}

From source file:com.scaleoutsoftware.soss.hserver.HServerParameters.java

License:Apache License

/**
 * Gets the value for the given parameter.
 *
 * @param name          name of the parameter
 * @param configuration configuration object
 * @return value of the parameter//  ww  w  .j  a va2 s  . c  o  m
 */
public static int getSetting(String name, Configuration configuration) {
    if (!_defaults.containsKey(name)) {
        throw new RuntimeException("Cannot find default value for property" + name);
    }
    if (configuration == null)
        return (Integer) _defaults.get(name);
    return configuration.getInt(name, (Integer) _defaults.get(name));
}

From source file:com.scaleoutsoftware.soss.hserver.JobParameter.java

License:Apache License

/**
 * This method can be called from the mapper or the reducer to retrieve the job parameter
 * object. This job parameter object is set at the job invocation time by calling {@link HServerJob#setJobParameter(Object)},
 * and is distributed to all worker nodes running mappers and reducers.
 * This method is thread safe, so it can be called by multiple instances of the mapper concurrently.
 *
 * @param configuration configuration (from context)
 * @return parameter object//from  w  w  w  .  j a va 2 s . c o m
 * @throws IOException if a ScaleOut hServer access error occurred
 */
public static Object get(Configuration configuration) throws IOException {
    return HServerInvocationParameters
            .retrieveFromCache(configuration.getInt(HServerParameters.INVOCATION_ID, 0)).getJobParameter();
}

From source file:com.scaleoutsoftware.soss.hserver.NamedMapInputFormat.java

License:Apache License

@Override
@SuppressWarnings("unchecked")
public RecordReader<K, V> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext)
        throws IOException, InterruptedException {
    Configuration configuration = taskAttemptContext.getConfiguration();
    int mapId = configuration.getInt(inputAppIdProperty, 0);
    Class<CustomSerializer<K>> keySerializerClass = (Class<CustomSerializer<K>>) configuration
            .getClass(inputNamedMapKeySerializerProperty, null);
    Class<CustomSerializer<V>> valueSerializerClass = (Class<CustomSerializer<V>>) configuration
            .getClass(inputNamedMapValueSerializerProperty, null);

    if (mapId == 0 || keySerializerClass == null || valueSerializerClass == null) {
        throw new IOException("Input format is not configured with a valid NamedMap.");
    }/*ww  w . j  a v a 2s . co m*/

    CustomSerializer<K> keySerializer = ReflectionUtils.newInstance(keySerializerClass, configuration);
    keySerializer.setObjectClass((Class<K>) configuration.getClass(inputNamedMapKeyProperty, null));
    CustomSerializer<V> valueSerializer = ReflectionUtils.newInstance(valueSerializerClass, configuration);
    valueSerializer.setObjectClass((Class<V>) configuration.getClass(inputNamedMapValueProperty, null));
    int smOrdinal = configuration.getInt(SERIALIZATION_MODE, SerializationMode.DEFAULT.ordinal());
    SerializationMode serializationMode = SerializationMode.values()[smOrdinal];
    return new NamedMapReader<K, V>(configuration, mapId, keySerializer, valueSerializer, serializationMode);
}

From source file:com.sensei.indexing.hadoop.reduce.ShardWriter.java

License:Apache License

private void setParameters(Configuration conf) {
    int maxFieldLength = conf.getInt(SenseiJobConfig.MAX_FIELD_LENGTH, -1);
    if (maxFieldLength > 0) {
        writer.setMaxFieldLength(maxFieldLength);
    }//from w  w w.j a  va 2  s . c o  m
    writer.setUseCompoundFile(conf.getBoolean(SenseiJobConfig.USE_COMPOUND_FILE, false));
    maxNumSegments = conf.getInt(SenseiJobConfig.MAX_NUM_SEGMENTS, -1);

    if (maxFieldLength > 0) {
        logger.info(SenseiJobConfig.MAX_FIELD_LENGTH + " = " + writer.getMaxFieldLength());
    }
    logger.info(SenseiJobConfig.USE_COMPOUND_FILE + " = " + writer.getUseCompoundFile());
    logger.info(SenseiJobConfig.MAX_NUM_SEGMENTS + " = " + maxNumSegments);
}

From source file:com.skp.experiment.cf.als.hadoop.SolveImplicitFeedbackMultithreadedMapper.java

License:Apache License

@Override
protected void setup(Context ctx) throws IOException, InterruptedException {
    /** parse parameters from configuration */
    Configuration conf = ctx.getConfiguration();
    double lambda = Double.parseDouble(ctx.getConfiguration().get(LAMBDA));
    double alpha = Double.parseDouble(ctx.getConfiguration().get(ALPHA));
    int numFeatures = ctx.getConfiguration().getInt(NUM_FEATURES, -1);
    int numRows = ctx.getConfiguration().getInt(NUM_ROWS, -1);
    Path YPath = new Path(ctx.getConfiguration().get(FEATURE_MATRIX));
    Path YtransposeYPath = new Path(ctx.getConfiguration().get(FEATURE_MATRIX_TRANSPOSE));

    /** set file lock if necessary */
    lockPath = conf.get(LOCK_FILE);/*from w  ww. ja  v  a2  s.  c  om*/
    lockNums = conf.getInt(LOCK_FILE_NUMS, 1);
    if (lockPath != null) {
        checkLock(ctx, lockNums);
    }
    /** load necessary matrix U/M into memory */
    Y = ALSMatrixUtil.readMatrixByRowsMultithred(YPath, ctx.getConfiguration(), numRows, numFeatures);
    YtransposeY = ALSMatrixUtil.readDistributedRowMatrix(YtransposeYPath, numFeatures, numFeatures);
    /** initiate linear solver */
    solver = new ImplicitFeedbackAlternatingLeastSquaresSolver(numFeatures, lambda, alpha, Y, YtransposeY);
    LOG.info("Matrix dimension in memory " + Y.rowSize() + "," + Y.columnSize());
    Preconditions.checkArgument(numFeatures > 0, "numFeatures was not set correctly!");
}

From source file:com.soteradefense.dga.louvain.giraph.LouvainMasterCompute.java

License:Apache License

/**
 * Determine if progress is still being made or if the
 * computation should halt./*  w w w. j a va2s  .c  o m*/
 *
 * @param history
 * @return
 */
protected static boolean decideToHalt(List<Long> history, Configuration conf) {
    int minProgress = conf.getInt("minimum.progress", 0);
    int tries = conf.getInt("progress.tries", 1);

    // Halt if the most recent change was 0
    if (0 == history.get(history.size() - 1)) {
        return true;
    }

    //Halt if the change count has increased 4 times
    long previous = history.get(0);
    int count = 0;
    for (long current : history) {
        if (current >= previous - minProgress) {
            count++;
        }
        previous = current;
    }
    return (count > tries);
}

From source file:com.splicemachine.fs.s3.PrestoS3FileSystem.java

License:Apache License

@Override
public void initialize(URI uri, Configuration conf) throws IOException {
    requireNonNull(uri, "uri is null");
    requireNonNull(conf, "conf is null");
    super.initialize(uri, conf);
    setConf(conf);//from   w w w . j  a va 2  s. c  om

    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR));

    HiveS3Config defaults = new HiveS3Config();
    this.stagingDirectory = new File(
            conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
    this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1;
    this.maxBackoffTime = Duration
            .valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString()));
    this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString()));
    int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration
            .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
    Duration socketTimeout = Duration
            .valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
    int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections());
    long minFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE, defaults.getS3MultipartMinFileSize().toBytes());
    long minPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE, defaults.getS3MultipartMinPartSize().toBytes());
    this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS,
            defaults.isS3UseInstanceCredentials());
    this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION,
            defaults.isPinS3ClientToCurrentRegion());
    this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled());
    this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name()));
    this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId());
    String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix());

    ClientConfiguration configuration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries)
            .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
            .withConnectionTimeout(toIntExact(connectTimeout.toMillis()))
            .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections)
            .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(S3_USER_AGENT_SUFFIX);

    this.s3 = createAmazonS3Client(uri, conf, configuration);

    transferConfig.setMultipartUploadThreshold(minFileSize);
    transferConfig.setMinimumUploadPartSize(minPartSize);
}

From source file:com.splicemachine.orc.OrcConf.java

License:Open Source License

public static int getIntVar(Configuration conf, OrcConf.ConfVars var) {
    return conf.getInt(var.varname, var.defaultIntVal);
}