Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:org.apache.flume.sink.customhdfs.HDFSDataStream.java

License:Apache License

protected void doOpen(Configuration conf, Path dstPath, FileSystem hdfs) throws IOException {
    if (useRawLocalFileSystem) {
        if (hdfs instanceof LocalFileSystem) {
            hdfs = ((LocalFileSystem) hdfs).getRaw();
        } else {/*www  .jav a  2s . co m*/
            logger.warn("useRawLocalFileSystem is set to true but file system "
                    + "is not of type LocalFileSystem: " + hdfs.getClass().getName());
        }
    }

    boolean appending = false;
    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
        outStream = hdfs.append(dstPath);
        appending = true;
    } else {
        outStream = hdfs.create(dstPath);
    }

    serializer = EventSerializerFactory.getInstance(serializerType, serializerContext, outStream);
    if (appending && !serializer.supportsReopen()) {
        outStream.close();
        serializer = null;
        throw new IOException("serializer (" + serializerType + ") does not support append");
    }

    // must call superclass to check for replication issues
    registerCurrentStream(outStream, hdfs, dstPath);

    if (appending) {
        serializer.afterReopen();
    } else {
        serializer.afterCreate();
    }
}

From source file:org.apache.flume.sink.customhdfs.HDFSSequenceFile.java

License:Apache License

protected void open(Path dstPath, CompressionCodec codeC, CompressionType compType, Configuration conf,
        FileSystem hdfs) throws IOException {
    if (useRawLocalFileSystem) {
        if (hdfs instanceof LocalFileSystem) {
            hdfs = ((LocalFileSystem) hdfs).getRaw();
        } else {//  ww  w.  j a  v a 2  s  .  c  o  m
            logger.warn("useRawLocalFileSystem is set to true but file system "
                    + "is not of type LocalFileSystem: " + hdfs.getClass().getName());
        }
    }
    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
        outStream = hdfs.append(dstPath);
    } else {
        outStream = hdfs.create(dstPath);
    }
    writer = SequenceFile.createWriter(conf, outStream, serializer.getKeyClass(), serializer.getValueClass(),
            compType, codeC);

    registerCurrentStream(outStream, hdfs, dstPath);
}

From source file:org.apache.flume.sink.hdfs.HDFSCompressedDataStream.java

License:Apache License

@Override
public void open(String filePath, CompressionCodec codec, CompressionType cType) throws IOException {
    Configuration conf = new Configuration();
    Path dstPath = new Path(filePath);
    FileSystem hdfs = dstPath.getFileSystem(conf);
    if (useRawLocalFileSystem) {
        if (hdfs instanceof LocalFileSystem) {
            hdfs = ((LocalFileSystem) hdfs).getRaw();
        } else {/*from  w w w .  j a  va2s  . c o m*/
            logger.warn("useRawLocalFileSystem is set to true but file system "
                    + "is not of type LocalFileSystem: " + hdfs.getClass().getName());
        }
    }

    boolean appending = false;
    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
        fsOut = hdfs.append(dstPath);
        appending = true;
    } else {
        fsOut = hdfs.create(dstPath);
    }
    cmpOut = codec.createOutputStream(fsOut);
    serializer = EventSerializerFactory.getInstance(serializerType, serializerContext, cmpOut);
    if (appending && !serializer.supportsReopen()) {
        cmpOut.close();
        serializer = null;
        throw new IOException("serializer (" + serializerType + ") does not support append");
    }

    registerCurrentStream(fsOut, hdfs, dstPath);

    if (appending) {
        serializer.afterReopen();
    } else {
        serializer.afterCreate();
    }
    isFinished = false;
}

From source file:org.apache.giraph.graph.GiraphJob.java

License:Apache License

/**
 * Check if the configuration is local.  If it is local, do additional
 * checks due to the restrictions of LocalJobRunner.
 *
 * @param conf Configuration/*from ww w .  ja va  2  s  .co m*/
 */
private static void checkLocalJobRunnerConfiguration(Configuration conf) {
    String jobTracker = conf.get("mapred.job.tracker", null);
    if (!jobTracker.equals("local")) {
        // Nothing to check
        return;
    }

    int maxWorkers = conf.getInt(MAX_WORKERS, -1);
    if (maxWorkers != 1) {
        throw new IllegalArgumentException("checkLocalJobRunnerConfiguration: When using "
                + "LocalJobRunner, must have only one worker since " + "only 1 task at a time!");
    }
    if (conf.getBoolean(SPLIT_MASTER_WORKER, SPLIT_MASTER_WORKER_DEFAULT)) {
        throw new IllegalArgumentException("checkLocalJobRunnerConfiguration: When using "
                + "LocalJobRunner, you cannot run in split master / worker "
                + "mode since there is only 1 task at a time!");
    }
}

From source file:org.apache.giraph.hive.jython.HiveJythonUtils.java

License:Apache License

/**
 * Set arbitrary option of unknown type in Configuration
 *
 * @param conf Configuration//from ww  w  . ja  v  a  2s.  co  m
 * @param key String key
 * @param value Object to set
 */
private static void setOption(Configuration conf, String key, Object value) {
    if (value instanceof Boolean) {
        conf.getBoolean(key, (Boolean) value);
    } else if (value instanceof Byte || value instanceof Short || value instanceof Integer) {
        conf.setInt(key, ((Number) value).intValue());
    } else if (value instanceof Long) {
        conf.setLong(key, (Long) value);
    } else if (value instanceof Float || value instanceof Double) {
        conf.setFloat(key, ((Number) value).floatValue());
    } else if (value instanceof String) {
        conf.set(key, value.toString());
    } else if (value instanceof Class) {
        conf.set(key, ((Class) value).getName());
    } else {
        throw new IllegalArgumentException("Don't know how to handle option key: " + key + ", value: " + value
                + ", value type: " + value.getClass());
    }
}

From source file:org.apache.giraph.utils.MasterLoggingAggregator.java

License:Apache License

/**
 * Check if master logging aggregator is used.
 *
 * @param conf Configuration//from  w  w  w.  j a  v  a2s.  c om
 * @return True iff master logging aggregator is used
 */
public static boolean useMasterLoggingAggregator(Configuration conf) {
    return conf.getBoolean(USE_MASTER_LOGGING_AGGREGATOR, USE_MASTER_LOGGING_AGGREGATOR_DEFAULT);
}

From source file:org.apache.gobblin.runtime.mapreduce.MRJobLauncher.java

License:Apache License

@VisibleForTesting
static void serializeJobState(FileSystem fs, Path mrJobDir, Configuration conf, JobState jobState, Job job)
        throws IOException {
    Path jobStateFilePath = new Path(mrJobDir, JOB_STATE_FILE_NAME);
    // Write the job state with an empty task set (work units are read by the mapper from a different file)
    try (DataOutputStream dataOutputStream = new DataOutputStream(fs.create(jobStateFilePath))) {
        jobState.write(dataOutputStream, false, conf.getBoolean(SERIALIZE_PREVIOUS_WORKUNIT_STATES_KEY,
                DEFAULT_SERIALIZE_PREVIOUS_WORKUNIT_STATES));
    }/*from w w w  . j  a v  a 2  s  .c  o  m*/

    job.getConfiguration().set(ConfigurationKeys.JOB_STATE_FILE_PATH_KEY, jobStateFilePath.toString());

    DistributedCache.addCacheFile(jobStateFilePath.toUri(), job.getConfiguration());
    job.getConfiguration().set(ConfigurationKeys.JOB_STATE_DISTRIBUTED_CACHE_NAME, jobStateFilePath.getName());
}

From source file:org.apache.hama.bsp.message.io.DualChannelByteBufferStream.java

License:Apache License

public void init(Configuration conf) {

    boolean directAlloc = conf.getBoolean(Constants.BYTEBUFFER_DIRECT, Constants.BYTEBUFFER_DIRECT_DEFAULT);
    int size = conf.getInt(Constants.BYTEBUFFER_SIZE, Constants.BUFFER_DEFAULT_SIZE);
    if (directAlloc) {
        buffer = ByteBuffer.allocateDirect(size);
    } else {/*from ww w  .  j  a v  a 2  s  .  c o m*/
        buffer = ByteBuffer.allocateDirect(size);
    }
    fileName = conf.get(Constants.DATA_SPILL_PATH) + File.separatorChar
            + new BigInteger(128, new SecureRandom()).toString(32);
    outputMode = true;
    outputStream = new SyncFlushByteBufferOutputStream(fileName);
    outputBuffer = new DirectByteBufferOutputStream(outputStream);
    outputStream.setBuffer(buffer);

}

From source file:org.apache.hama.bsp.message.queue.SpillingQueue.java

License:Apache License

@SuppressWarnings("unchecked")
@Override/*  ww  w.  j  a v  a  2s  .c om*/
public void init(Configuration conf, TaskAttemptID arg1) {

    bufferCount = conf.getInt(SPILLBUFFER_COUNT, 3);
    bufferSize = conf.getInt(SPILLBUFFER_SIZE, Constants.BUFFER_DEFAULT_SIZE);
    direct = conf.getBoolean(SPILLBUFFER_DIRECT, true);
    threshold = conf.getInt(SPILLBUFFER_THRESHOLD, Constants.BUFFER_DEFAULT_SIZE);
    fileName = conf.get(SPILLBUFFER_FILENAME, System.getProperty("java.io.tmpdir") + File.separatorChar
            + new BigInteger(128, new SecureRandom()).toString(32));

    messageClass = (Class<M>) conf.getClass(Constants.MESSAGE_CLASS, null);
    objectWritableMode = messageClass == null;

    SpilledDataProcessor processor;
    try {
        processor = new CombineSpilledDataProcessor<M>(fileName);
        processor.init(conf);
    } catch (FileNotFoundException e) {
        LOG.error("Error initializing spilled data stream.", e);
        throw new RuntimeException(e);
    }
    spillOutputBuffer = new SpillingDataOutputBuffer(bufferCount, bufferSize, threshold, direct, processor);
    objectWritable = new ObjectWritable();
    objectWritable.setConf(conf);
    this.conf = conf;
}

From source file:org.apache.hama.ipc.AsyncClient.java

License:Apache License

/**
 * The time after which a RPC will timeout. If ping is not enabled (via
 * ipc.client.ping), then the timeout value is the same as the pingInterval.
 * If ping is enabled, then there is no timeout value.
 * //from   w  w w.  jav a 2  s. c  o  m
 * @param conf Configuration
 * @return the timeout period in milliseconds. -1 if no timeout value is set
 */
final public static int getTimeout(Configuration conf) {
    if (!conf.getBoolean("ipc.client.ping", true)) {
        return getPingInterval(conf);
    }
    return -1;
}