Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:com.netflix.bdp.s3mper.metastore.impl.DynamoDBMetastore.java

License:Apache License

/**
 * Creates the metastore table in DynamoDB if it doesn't exist with the configured
 * read and write units./*from   ww  w.  ja v a  2s .  c  om*/
 * 
 * @param uri
 * @param conf
 * @throws Exception 
 */
@Override
public void initalize(URI uri, Configuration conf) throws Exception {
    scheme = uri.getScheme();

    String keyId = conf.get("fs." + uri.getScheme() + ".awsAccessKeyId");
    String keySecret = conf.get("fs." + uri.getScheme() + ".awsSecretAccessKey");

    //An override option for accessing across accounts
    keyId = conf.get("s3mper.override.awsAccessKeyId", keyId);
    keySecret = conf.get("s3mper.override.awsSecretAccessKey", keySecret);

    db = new AmazonDynamoDBClient(new BasicAWSCredentials(keyId, keySecret));

    readUnits = conf.getLong("s3mper.metastore.read.units", readUnits);
    writeUnits = conf.getLong("s3mper.metastore.write.units", writeUnits);

    retryCount = conf.getInt("s3mper.metastore.retry", retryCount);
    timeout = conf.getInt("s3mper.metastore.timeout", timeout);

    tableName = conf.get("s3mper.metastore.name", tableName);

    deleteMarkerEnabled = conf.getBoolean("s3mper.metastore.deleteMarker.enabled", false);

    boolean checkTableExists = conf.getBoolean("s3mper.metastore.create", false);

    if (checkTableExists) {
        ListTablesResult tables = db.listTables();

        if (!tables.getTableNames().contains(tableName)) {
            createTable();
        }
    }
}

From source file:com.neusoft.hbase.test.hadoop.dataload.HFileOutputFormat2.java

License:Apache License

static <V extends Cell> RecordWriter<ImmutableBytesWritable, V> createRecordWriter(
        final TaskAttemptContext context) throws IOException, InterruptedException {
    // Get the path of the temporary output file
    final Path outputPath = FileOutputFormat.getOutputPath(context);
    final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
    final Configuration conf = context.getConfiguration();
    final FileSystem fs = outputdir.getFileSystem(conf);
    // These configs. are from hbase-*.xml
    final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE);
    // Invented config.  Add to hbase-*.xml if other than default compression.
    final String defaultCompression = conf.get("hfile.compression", Compression.Algorithm.NONE.getName());
    final boolean compactionExclude = conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
            false);/*from  www. j av a 2 s  .co m*/

    // create a map from column family to the compression algorithm
    final Map<byte[], String> compressionMap = createFamilyCompressionMap(conf);
    final Map<byte[], String> bloomTypeMap = createFamilyBloomMap(conf);
    final Map<byte[], String> blockSizeMap = createFamilyBlockSizeMap(conf);

    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_CONF_KEY);
    final HFileDataBlockEncoder encoder;
    if (dataBlockEncodingStr == null) {
        encoder = NoOpDataBlockEncoder.INSTANCE;
    } else {
        try {
            encoder = new HFileDataBlockEncoderImpl(DataBlockEncoding.valueOf(dataBlockEncodingStr));
        } catch (IllegalArgumentException ex) {
            throw new RuntimeException("Invalid data block encoding type configured for the param "
                    + DATABLOCK_ENCODING_CONF_KEY + " : " + dataBlockEncodingStr);
        }
    }

    return new RecordWriter<ImmutableBytesWritable, V>() {
        // Map of families to writers and how much has been output on the writer.
        private final Map<byte[], WriterLength> writers = new TreeMap<byte[], WriterLength>(
                Bytes.BYTES_COMPARATOR);
        private byte[] previousRow = HConstants.EMPTY_BYTE_ARRAY;
        private final byte[] now = Bytes.toBytes(System.currentTimeMillis());
        private boolean rollRequested = false;

        public void write(ImmutableBytesWritable row, V cell) throws IOException {
            KeyValue kv = KeyValueUtil.ensureKeyValue(cell);

            // null input == user explicitly wants to flush
            if (row == null && kv == null) {
                rollWriters();
                return;
            }

            byte[] rowKey = kv.getRow();
            long length = kv.getLength();
            byte[] family = kv.getFamily();
            WriterLength wl = this.writers.get(family);

            // If this is a new column family, verify that the directory exists
            if (wl == null) {
                fs.mkdirs(new Path(outputdir, Bytes.toString(family)));
            }

            // If any of the HFiles for the column families has reached
            // maxsize, we need to roll all the writers
            if (wl != null && wl.written + length >= maxsize) {
                this.rollRequested = true;
            }

            // This can only happen once a row is finished though
            if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
                rollWriters();
            }

            // create a new HLog writer, if necessary
            if (wl == null || wl.writer == null) {
                wl = getNewWriter(family, conf);
            }

            // we now have the proper HLog writer. full steam ahead
            kv.updateLatestStamp(this.now);
            wl.writer.append(kv);
            wl.written += length;

            // Copy the row so we know when a row transition.
            this.previousRow = rowKey;
        }

        private void rollWriters() throws IOException {
            for (WriterLength wl : this.writers.values()) {
                if (wl.writer != null) {
                    LOG.info("Writer=" + wl.writer.getPath()
                            + ((wl.written == 0) ? "" : ", wrote=" + wl.written));
                    close(wl.writer);
                }
                wl.writer = null;
                wl.written = 0;
            }
            this.rollRequested = false;
        }

        /* Create a new StoreFile.Writer.
         * @param family
         * @return A WriterLength, containing a new StoreFile.Writer.
         * @throws IOException
         */
        private WriterLength getNewWriter(byte[] family, Configuration conf) throws IOException {
            WriterLength wl = new WriterLength();
            Path familydir = new Path(outputdir, Bytes.toString(family));
            String compression = compressionMap.get(family);
            compression = compression == null ? defaultCompression : compression;
            String bloomTypeStr = bloomTypeMap.get(family);
            BloomType bloomType = BloomType.NONE;
            if (bloomTypeStr != null) {
                bloomType = BloomType.valueOf(bloomTypeStr);
            }
            String blockSizeString = blockSizeMap.get(family);
            int blockSize = blockSizeString == null ? HConstants.DEFAULT_BLOCKSIZE
                    : Integer.parseInt(blockSizeString);
            Configuration tempConf = new Configuration(conf);
            tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
            wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
                    .withOutputDir(familydir).withBloomType(bloomType).withComparator(KeyValue.COMPARATOR)
                    .build();

            this.writers.put(family, wl);
            return wl;
        }

        private void close(final StoreFile.Writer w) throws IOException {
            if (w != null) {
                w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
                w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
                        Bytes.toBytes(context.getTaskAttemptID().toString()));
                w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
                w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
                w.appendTrackedTimestampsToMetadata();
                w.close();
            }
        }

        public void close(TaskAttemptContext c) throws IOException, InterruptedException {
            for (WriterLength wl : this.writers.values()) {
                close(wl.writer);
            }
        }
    };
}

From source file:com.neusoft.hbase.test.hadoop.dataload.HFileOutputFormatBase.java

License:Apache License

public RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(// cellKeyValue
        //static <V extends Cell> RecordWriter<ImmutableBytesWritable, V> createRecordWriter()
        //getRecordWriter()
        final TaskAttemptContext context) throws IOException, InterruptedException {

    // Get the path of the temporary output file
    final Path outputPath = FileOutputFormat.getOutputPath(context);
    final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
    final Path ignoreOutputPath = getDeleteRowKeyFile(outputPath);// 

    final Configuration conf = context.getConfiguration();
    final FileSystem fs = outputdir.getFileSystem(conf);
    // These configs. are from hbase-*.xml
    final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE);
    // Invented config. Add to hbase-*.xml if other than default
    // compression.
    final String defaultCompression = conf.get("hfile.compression", Compression.Algorithm.NONE.getName());
    final boolean compactionExclude = conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
            false);//from w w  w . j av a2  s.c o  m

    // create a map from column family to the compression algorithm
    final Map<byte[], String> compressionMap = createFamilyCompressionMap(conf);
    final Map<byte[], String> bloomTypeMap = createFamilyBloomMap(conf);
    final Map<byte[], String> blockSizeMap = createFamilyBlockSizeMap(conf);

    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_CONF_KEY);
    final HFileDataBlockEncoder encoder;
    if (dataBlockEncodingStr == null) {
        encoder = NoOpDataBlockEncoder.INSTANCE;
    } else {
        try {
            encoder = new HFileDataBlockEncoderImpl(DataBlockEncoding.valueOf(dataBlockEncodingStr));
        } catch (IllegalArgumentException ex) {
            throw new RuntimeException("Invalid data block encoding type configured for the param "
                    + DATABLOCK_ENCODING_CONF_KEY + " : " + dataBlockEncodingStr);
        }
    }

    return new RecordWriter<ImmutableBytesWritable, KeyValue>() {// VKeyValue

        // Map of families to writers and how much has been output on the
        // writer.
        private final Map<byte[], WriterLength> writers = new TreeMap<byte[], WriterLength>(
                Bytes.BYTES_COMPARATOR);
        private final FSDataOutputStream dos = fs.create(ignoreOutputPath);
        private byte[] previousRow = HConstants.EMPTY_BYTE_ARRAY;
        private final byte[] now = Bytes.toBytes(System.currentTimeMillis());
        private boolean rollRequested = false;

        public void write(ImmutableBytesWritable row, KeyValue kv)// V cellKeyValue kv

                throws IOException {
            // KeyValue kv = KeyValueUtil.ensureKeyValue(cell);//

            // null input == user explicitly wants to flush
            if (row == null && kv == null) {
                rollWriters();
                return;
            }

            byte[] rowKey = kv.getRow();
            long length = kv.getLength();
            byte[] family = kv.getFamily();

            if (ignore(kv)) {// if
                byte[] readBuf = rowKey;
                dos.write(readBuf, 0, readBuf.length);
                dos.write(Bytes.toBytes("\n"));
                return;
            }

            WriterLength wl = this.writers.get(family);

            // If this is a new column family, verify that the directory
            // exists
            if (wl == null) {
                fs.mkdirs(new Path(outputdir, Bytes.toString(family)));
            }

            // If any of the HFiles for the column families has reached
            // maxsize, we need to roll all the writers
            if (wl != null && wl.written + length >= maxsize) {
                this.rollRequested = true;
            }

            // This can only happen once a row is finished though
            if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
                rollWriters();
            }

            // create a new HLog writer, if necessary
            if (wl == null || wl.writer == null) {
                wl = getNewWriter(family, conf);
            }

            // we now have the proper HLog writer. full steam ahead
            kv.updateLatestStamp(this.now);
            wl.writer.append(kv);
            wl.written += length;

            // Copy the row so we know when a row transition.
            this.previousRow = rowKey;
        }

        private void rollWriters() throws IOException {
            for (WriterLength wl : this.writers.values()) {
                if (wl.writer != null) {
                    LOG.info("Writer=" + wl.writer.getPath()
                            + ((wl.written == 0) ? "" : ", wrote=" + wl.written));
                    close(wl.writer);
                }
                wl.writer = null;
                wl.written = 0;
            }
            this.rollRequested = false;
        }

        /*
         * Create a new StoreFile.Writer.
         * 
         * @param family
         * 
         * @return A WriterLength, containing a new StoreFile.Writer.
         * 
         * @throws IOException
         */
        private WriterLength getNewWriter(byte[] family, Configuration conf) throws IOException {
            WriterLength wl = new WriterLength();
            Path familydir = new Path(outputdir, Bytes.toString(family));
            String compression = compressionMap.get(family);
            compression = compression == null ? defaultCompression : compression;
            String bloomTypeStr = bloomTypeMap.get(family);
            BloomType bloomType = BloomType.NONE;
            if (bloomTypeStr != null) {
                bloomType = BloomType.valueOf(bloomTypeStr);
            }
            String blockSizeString = blockSizeMap.get(family);
            int blockSize = blockSizeString == null ? HConstants.DEFAULT_BLOCKSIZE
                    : Integer.parseInt(blockSizeString);
            Configuration tempConf = new Configuration(conf);
            tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
            wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
                    .withOutputDir(familydir).withBloomType(bloomType).withComparator(KeyValue.COMPARATOR)
                    .build();

            this.writers.put(family, wl);
            return wl;
        }

        private void close(final StoreFile.Writer w) throws IOException {
            if (w != null) {
                w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
                w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
                        Bytes.toBytes(context.getTaskAttemptID().toString()));
                w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
                w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
                w.appendTrackedTimestampsToMetadata();
                w.close();
            }
        }

        public void close(TaskAttemptContext c) throws IOException, InterruptedException {
            dos.flush();// 
            dos.close();// 
            for (WriterLength wl : this.writers.values()) {
                close(wl.writer);
            }
        }

    };
}

From source file:com.ngdata.hbaseindexer.HBaseIndexerConfiguration.java

License:Apache License

private static void checkDefaultsVersion(Configuration conf) {
    if (conf.getBoolean("hbaseindexer.defaults.for.version.skip", Boolean.FALSE))
        return;/*from  www . j av  a  2 s  . c o  m*/
    String defaultsVersion = conf.get("hbaseindexer.defaults.for.version");
    String thisVersion = VersionInfo.getVersion();
    if (!thisVersion.equals(defaultsVersion)) {
        throw new RuntimeException(
                "hbase-indexer-default.xml file seems to be for and old version of HBase Indexer ("
                        + defaultsVersion + "), this version is " + thisVersion);
    }
}

From source file:com.ngdata.hbaseindexer.mr.HBaseIndexerMapper.java

License:Apache License

private Indexer createIndexer(String indexName, Context context, IndexerConf indexerConf, String tableName,
        ResultToSolrMapper mapper, Map<String, String> indexConnectionParams)
        throws IOException, SharderException {
    Configuration conf = context.getConfiguration();
    if (conf.getBoolean(INDEX_DIRECT_WRITE_CONF_KEY, false)) {
        String solrMode = getSolrMode(indexConnectionParams);
        if (solrMode.equals("cloud")) {
            DirectSolrInputDocumentWriter writer = createCloudSolrWriter(context, indexConnectionParams);
            solrDocWriter = wrapInBufferedWriter(context, writer);
            return Indexer.createIndexer(indexName, indexerConf, tableName, mapper, null, null, solrDocWriter);
        } else if (solrMode.equals("classic")) {
            DirectSolrClassicInputDocumentWriter classicSolrWriter = createClassicSolrWriter(context,
                    indexConnectionParams);
            Sharder sharder = createSharder(indexConnectionParams, classicSolrWriter.getNumServers());
            solrDocWriter = wrapInBufferedWriter(context, classicSolrWriter);
            return Indexer.createIndexer(indexName, indexerConf, tableName, mapper, null, sharder,
                    solrDocWriter);/*ww w . j av  a 2 s  . co m*/
        } else {
            throw new RuntimeException(
                    "Only 'cloud' and 'classic' are valid values for solr.mode, but got " + solrMode);
        }
    } else {
        solrDocWriter = new MapReduceSolrInputDocumentWriter(context);
        return Indexer.createIndexer(indexName, indexerConf, tableName, mapper, null, null, solrDocWriter);
    }
}

From source file:com.odiago.flumebase.exec.BucketedAggregationElement.java

License:Apache License

public BucketedAggregationElement(FlowElementContext ctxt, AggregateNode aggregateNode) {
    super(ctxt, (Schema) aggregateNode.getAttr(PlanNode.OUTPUT_SCHEMA_ATTR));

    Configuration conf = aggregateNode.getConf();
    assert null != conf;
    mNumBuckets = conf.getInt(NUM_BUCKETS_KEY, DEFAULT_NUM_BUCKETS);
    mContinuousOutput = conf.getBoolean(CONTINUOUS_OUTPUT_KEY, DEFAULT_CONTINUOUS_OUTPUT);
    mMaxPriorEmitInterval = conf.getLong(MAX_PRIOR_EMIT_INTERVAL_KEY, DEFAULT_MAX_PRIOR_EMIT_INTERVAL);
    int slackTime = conf.getInt(SLACK_INTERVAL_KEY, DEFAULT_SLACK_INTERVAL);
    if (slackTime < 0) {
        mSlackTime = DEFAULT_SLACK_INTERVAL;
    } else {// ww w  .  j a v a2 s .  co m
        mSlackTime = slackTime;
    }

    assert mMaxPriorEmitInterval > 0;
    assert mMaxPriorEmitInterval > mSlackTime;

    List<TypedField> groupByFields = aggregateNode.getGroupByFields();
    if (null == groupByFields) {
        mGroupByFields = Collections.emptyList();
    } else {
        mGroupByFields = groupByFields;
    }

    mAggregateExprs = aggregateNode.getAggregateExprs();
    assert mAggregateExprs != null;
    mPropagateFields = aggregateNode.getPropagateFields();

    Expr windowExpr = aggregateNode.getWindowExpr();
    assert windowExpr.isConstant();
    try {
        mWindowSpec = (WindowSpec) windowExpr.eval(new EmptyEventWrapper());
        assert mWindowSpec.getRangeSpec().isConstant();
        mTimeSpan = (TimeSpan) mWindowSpec.getRangeSpec().eval(new EmptyEventWrapper());
    } catch (IOException ioe) {
        // The only way this can be thrown is if the window expr isn't actually constant.
        // This should not happen due to the assert above..
        LOG.error("Got IOException when calculating window width: " + ioe);
        throw new RuntimeException(ioe);
    }

    mBucketMap = new HashMap<Pair<Long, HashedEvent>, List<Bucket>>(mNumBuckets);
    mBucketsByGroup = new HashMap<HashedEvent, List<Pair<Long, List<Bucket>>>>();

    // Calculate the width of each bucket.
    mTimeModulus = mTimeSpan.getWidth() / mNumBuckets;
    if (mTimeModulus * mNumBuckets != mTimeSpan.getWidth()) {
        LOG.warn("Aggregation time step does not cleanly divide the time interval; "
                + "results may be inaccurate. Set " + NUM_BUCKETS_KEY + " to a better divisor.");
    }
}

From source file:com.osohm.nutch.parse.html.filter.DOMContentUtils.java

License:Apache License

public void setConf(Configuration conf) {
    // forceTags is used to override configurable tag ignoring, later on
    Collection<String> forceTags = new ArrayList<String>(1);

    this.conf = conf;
    linkParams.clear();//  ww  w . j a va  2s .c om
    linkParams.put("a", new LinkParams("a", "href", 1));
    linkParams.put("area", new LinkParams("area", "href", 0));

    if (conf.getBoolean("parser.html.form.use_action", true)) {
        linkParams.put("form", new LinkParams("form", "action", 1));
        if (conf.get("parser.html.form.use_action") != null)
            forceTags.add("form");
    }

    linkParams.put("frame", new LinkParams("frame", "src", 0));
    linkParams.put("iframe", new LinkParams("iframe", "src", 0));
    linkParams.put("script", new LinkParams("script", "src", 0));
    linkParams.put("link", new LinkParams("link", "href", 0));
    linkParams.put("img", new LinkParams("img", "src", 0));

    // remove unwanted link tags from the linkParams map
    String[] ignoreTags = conf.getStrings("parser.html.outlinks.ignore_tags");
    for (int i = 0; ignoreTags != null && i < ignoreTags.length; i++) {
        if (!forceTags.contains(ignoreTags[i]))
            linkParams.remove(ignoreTags[i]);
    }
}

From source file:com.practicalHadoop.outputformat.MultpleDirectories.FileOutputCommitter.java

License:Apache License

private static boolean shouldMarkOutputDir(Configuration conf) {
    return conf.getBoolean(SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, true);
}

From source file:com.qubole.rubix.spi.CacheConfig.java

License:Apache License

public static boolean isStrictMode(Configuration c) {
    return c.getBoolean(DATA_CACHE_STRICT_MODE, false);
}

From source file:com.qubole.rubix.spi.CacheConfig.java

License:Apache License

static boolean isCacheDataEnabled(Configuration c) {
    return c.getBoolean(DATA_CACHE_ENABLED, true);
}