Example usage for org.apache.hadoop.conf Configuration get

List of usage examples for org.apache.hadoop.conf Configuration get

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration get.

Prototype

public String get(String name, String defaultValue) 

Source Link

Document

Get the value of the name.

Usage

From source file:com.chinamobile.bcbsp.http.HttpServer.java

License:Apache License

/**
 * Configure an ssl listener on the server.
 * @param addr//from   www. j ava 2  s.co m
 *        address to listen on
 * @param sslConf
 *        conf to retrieve ssl options
 * @param needClientAuth
 *        whether client authentication is required
 */
public void addSslListener(InetSocketAddress addr, Configuration sslConf, boolean needClientAuth)
        throws IOException {
    if (webServer.isStarted()) {
        throw new IOException("Failed to add ssl listener");
    }
    if (needClientAuth) {
        /** Set up SSL truststore for authenticating clients */
        System.setProperty("javax.net.ssl.trustStore", sslConf.get("ssl.server.truststore.location", ""));
        System.setProperty("javax.net.ssl.trustStorePassword",
                sslConf.get("ssl.server.truststore.password", ""));
        System.setProperty("javax.net.ssl.trustStoreType", sslConf.get("ssl.server.truststore.type", "jks"));
    }
    SslSocketConnector sslListener = new SslSocketConnector();
    sslListener.setHost(addr.getHostName());
    sslListener.setPort(addr.getPort());
    sslListener.setKeystore(sslConf.get("ssl.server.keystore.location"));
    sslListener.setPassword(sslConf.get("ssl.server.keystore.password", ""));
    sslListener.setKeyPassword(sslConf.get("ssl.server.keystore.keypassword", ""));
    sslListener.setKeystoreType(sslConf.get("ssl.server.keystore.type", "jks"));
    sslListener.setNeedClientAuth(needClientAuth);
    webServer.addConnector(sslListener);
}

From source file:com.chinamobile.bcbsp.io.titan.TitanInputFormat.java

License:Apache License

@Override
public void initialize(Configuration configuration) {
    /*The configuration is Initialization "titan.xml"*/
    conf = new Configuration(configuration);
    conf.set("TITAN_SERVER_ADDRESS", configuration.get("titan.server.address", "localhost"));
    conf.set("TITAN_INPUT_TABLE_NAME", configuration.get("titan.input.table.name", "graph"));
    conf.set("HBASE_MASTER_ADDRESS", configuration.get("hbase.master.address", "localhost"));
    conf.set("HBASE_INPUT_TABLE_NAME", configuration.get("hbase.input.table.name", "titan"));
    try {//from www .java  2s  .  c  om
        client = RexsterClientFactory.open(conf.get("TITAN_SERVER_ADDRESS"),
                conf.get("TITAN_INPUT_TABLE_NAME"));
    } catch (Exception e) {
        LOG.error("The client of database can not obtain!");
        return;
    }
    conf = HBaseConfiguration.create(conf);
    try {
        hTable = new HTable(conf, conf.get("HBASE_INPUT_TABLE_NAME"));
    } catch (IOException e) {
        LOG.error("The table in HBase can not obtain");
        return;
    }
}

From source file:com.chinamobile.bcbsp.io.titan.TitanOutputFormat.java

License:Apache License

@Override
public void initialize(Configuration configuration) {
    conf = new Configuration(configuration);
    conf.set("TITAN_SERVER_ADDRESS", configuration.get("titan.server.address", "localhost"));
    conf.set("TITAN_OUTPUT_TABLE_NAME", configuration.get("titan.output.table.name", "graph"));
    conf.set("HBASE_MASTER_ADDRESS", configuration.get("hbase.master.address", "localhost"));
    conf.set("HBASE_OUTPUT_TABLE_NAME", configuration.get("hbase.output.table.name", "titan"));
    try {/*from w w w. j a  v  a  2s .c  o  m*/
        client = RexsterClientFactory.open(conf.get("TITAN_SERVER_ADDRESS"),
                conf.get("TITAN_OUTPUT_TABLE_NAME"));
    } catch (Exception e) {
        LOG.error("The client of database can not obtain!");
        return;
    }
}

From source file:com.chinamobile.bcbsp.workermanager.WorkerAgentForJob.java

License:Apache License

/**
 * Constructor.//from   w  w  w.ja v  a 2  s. c  om
 * 
 * @param conf
 *        Configuration
 * @param jobId
 *        BSPJobID
 * @param jobConf
 *        BSPJob
 * @param workerManager
 *        WorkerManager
 */
public WorkerAgentForJob(Configuration conf, BSPJobID jobId, BSPJob jobConf, WorkerManager workerManager)
        throws IOException {
    this.jobId = jobId;
    this.jobConf = jobConf;
    this.workerManager = workerManager;
    this.workerManagerName = conf.get(Constants.BC_BSP_WORKERAGENT_HOST, Constants.BC_BSP_WORKERAGENT_HOST);
    this.wssc = new WorkerSSController(jobId, this.workerManagerName);
    this.conf = conf;
    String bindAddress = conf.get(Constants.BC_BSP_WORKERAGENT_HOST, Constants.DEFAULT_BC_BSP_WORKERAGENT_HOST);
    int bindPort = conf.getInt(Constants.BC_BSP_WORKERAGENT_PORT, Constants.DEFAULT_BC_BSP_WORKERAGENT_PORT);
    bindPort = bindPort + Integer.parseInt(jobId.toString().substring(17));
    portForJob = bindPort;
    workAddress = new InetSocketAddress(bindAddress, bindPort);
    reinitialize();
    // For Aggregation
    loadAggregators();
}

From source file:com.ci.backports.hadoop.hbase.ZHFileOutputFormat.java

License:Apache License

public RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(final TaskAttemptContext context)
        throws IOException, InterruptedException {
    // Get the path of the temporary output file
    final Path outputPath = FileOutputFormat.getOutputPath(context);
    final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
    Configuration conf = context.getConfiguration();
    final FileSystem fs = outputdir.getFileSystem(conf);
    // These configs. are from hbase-*.xml
    final long maxsize = conf.getLong("hbase.hregion.max.filesize", HConstants.DEFAULT_MAX_FILE_SIZE);
    final int blocksize = conf.getInt("hfile.min.blocksize.size", HFile.DEFAULT_BLOCKSIZE);
    // Invented config.  Add to hbase-*.xml if other than default compression.
    final String compression = conf.get("hfile.compression", Compression.Algorithm.NONE.getName());

    return new RecordWriter<ImmutableBytesWritable, KeyValue>() {
        // Map of families to writers and how much has been output on the writer.
        private final Map<byte[], WriterLength> writers = new TreeMap<byte[], WriterLength>(
                Bytes.BYTES_COMPARATOR);
        private byte[] previousRow = HConstants.EMPTY_BYTE_ARRAY;
        private final byte[] now = Bytes.toBytes(System.currentTimeMillis());
        private boolean rollRequested = false;

        public void write(ImmutableBytesWritable row, KeyValue kv) throws IOException {
            // null input == user explicitly wants to flush
            if (row == null && kv == null) {
                rollWriters();/*from w  ww. j a v  a2 s.c  o m*/
                return;
            }

            byte[] rowKey = kv.getRow();
            long length = kv.getLength();
            byte[] family = kv.getFamily();
            WriterLength wl = this.writers.get(family);

            // If this is a new column family, verify that the directory exists
            if (wl == null) {
                fs.mkdirs(new Path(outputdir, Bytes.toString(family)));
            }

            // If any of the HFiles for the column families has reached
            // maxsize, we need to roll all the writers
            if (wl != null && wl.written + length >= maxsize) {
                this.rollRequested = true;
            }

            // This can only happen once a row is finished though
            if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
                rollWriters();
            }

            // create a new HLog writer, if necessary
            if (wl == null || wl.writer == null) {
                wl = getNewWriter(family);
            }

            // we now have the proper HLog writer. full steam ahead
            kv.updateLatestStamp(this.now);
            wl.writer.append(kv);
            wl.written += length;

            // Copy the row so we know when a row transition.
            this.previousRow = rowKey;
        }

        private void rollWriters() throws IOException {
            for (WriterLength wl : this.writers.values()) {
                if (wl.writer != null) {
                    LOG.info("Writer=" + wl.writer.getPath()
                            + ((wl.written == 0) ? "" : ", wrote=" + wl.written));
                    close(wl.writer);
                }
                wl.writer = null;
                wl.written = 0;
            }
            this.rollRequested = false;
        }

        /* Create a new HFile.Writer.
         * @param family
         * @return A WriterLength, containing a new HFile.Writer.
         * @throws IOException
         */
        private WriterLength getNewWriter(byte[] family) throws IOException {
            WriterLength wl = new WriterLength();
            Path familydir = new Path(outputdir, Bytes.toString(family));
            wl.writer = new HFile.Writer(fs, StoreFile.getUniqueFile(fs, familydir), blocksize, compression,
                    KeyValue.KEY_COMPARATOR);
            this.writers.put(family, wl);
            return wl;
        }

        private void close(final HFile.Writer w) throws IOException {
            if (w != null) {
                w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
                w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
                        Bytes.toBytes(context.getTaskAttemptID().toString()));
                w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
                w.close();
            }
        }

        public void close(TaskAttemptContext c) throws IOException, InterruptedException {
            for (WriterLength wl : this.writers.values()) {
                close(wl.writer);
            }
        }
    };
}

From source file:com.citic.zxyjs.zwlscx.mapreduce.lib.input.HFileOutputFormatBase.java

License:Apache License

public RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(final TaskAttemptContext context)
        throws IOException, InterruptedException {
    // Get the path of the temporary output file
    final Path outputPath = FileOutputFormat.getOutputPath(context);
    final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
    final Path ignoreOutputPath = new Path(outputPath + "_ignore");

    final Configuration conf = context.getConfiguration();
    final FileSystem fs = outputdir.getFileSystem(conf);
    // These configs. are from hbase-*.xml
    final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE);
    // Invented config. Add to hbase-*.xml if other than default
    // compression.
    final String defaultCompression = conf.get("hfile.compression", Compression.Algorithm.NONE.getName());
    final boolean compactionExclude = conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
            false);// w  w w .  j  a va2 s. c o  m

    if (fs.exists(ignoreOutputPath)) {
        LOG.info("Deleted " + ignoreOutputPath.toString() + " success.");
        fs.delete(ignoreOutputPath, true);
    }

    // create a map from column family to the compression algorithm
    final Map<byte[], String> compressionMap = createFamilyCompressionMap(conf);
    final Map<byte[], String> bloomTypeMap = createFamilyBloomMap(conf);
    final Map<byte[], String> blockSizeMap = createFamilyBlockSizeMap(conf);

    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_CONF_KEY);
    final HFileDataBlockEncoder encoder;
    if (dataBlockEncodingStr == null) {
        encoder = NoOpDataBlockEncoder.INSTANCE;
    } else {
        try {
            encoder = new HFileDataBlockEncoderImpl(DataBlockEncoding.valueOf(dataBlockEncodingStr));
        } catch (IllegalArgumentException ex) {
            throw new RuntimeException("Invalid data block encoding type configured for the param "
                    + DATABLOCK_ENCODING_CONF_KEY + " : " + dataBlockEncodingStr);
        }
    }

    return new RecordWriter<ImmutableBytesWritable, KeyValue>() {
        // Map of families to writers and how much has been output on the
        // writer.
        private final Map<byte[], WriterLength> writers = new TreeMap<byte[], WriterLength>(
                Bytes.BYTES_COMPARATOR);
        private final FSDataOutputStream dos = fs.create(ignoreOutputPath);
        private byte[] previousRow = HConstants.EMPTY_BYTE_ARRAY;
        private final byte[] now = Bytes.toBytes(System.currentTimeMillis());
        private boolean rollRequested = false;

        public void write(ImmutableBytesWritable row, KeyValue kv) throws IOException {
            // null input == user explicitly wants to flush
            if (row == null && kv == null) {
                rollWriters();
                return;
            }

            byte[] rowKey = kv.getRow();
            long length = kv.getLength();
            byte[] family = kv.getFamily();

            if (ignore(kv)) {
                byte[] readBuf = rowKey;
                dos.write(readBuf, 0, readBuf.length);
                dos.write(Bytes.toBytes("\n"));
                return;
            }
            WriterLength wl = this.writers.get(family);

            // If this is a new column family, verify that the directory
            // exists
            if (wl == null) {
                Path path = null;
                path = new Path(outputdir, Bytes.toString(family));
                fs.mkdirs(path);
            }

            // If any of the HFiles for the column families has reached
            // maxsize, we need to roll all the writers
            if (wl != null && wl.written + length >= maxsize) {
                this.rollRequested = true;
            }

            // This can only happen once a row is finished though
            if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
                rollWriters();
            }

            // create a new HLog writer, if necessary
            if (wl == null || wl.writer == null) {
                wl = getNewWriter(family, conf);
            }

            // we now have the proper HLog writer. full steam ahead
            kv.updateLatestStamp(this.now);
            wl.writer.append(kv);
            wl.written += length;

            // Copy the row so we know when a row transition.
            this.previousRow = rowKey;
        }

        private void rollWriters() throws IOException {
            for (WriterLength wl : this.writers.values()) {
                if (wl.writer != null) {
                    LOG.info("Writer=" + wl.writer.getPath()
                            + ((wl.written == 0) ? "" : ", wrote=" + wl.written));
                    close(wl.writer);
                }
                wl.writer = null;
                wl.written = 0;
            }
            this.rollRequested = false;
        }

        /*
         * Create a new StoreFile.Writer.
         * @param family
         * @return A WriterLength, containing a new StoreFile.Writer.
         * @throws IOException
         */
        private WriterLength getNewWriter(byte[] family, Configuration conf) throws IOException {
            WriterLength wl = new WriterLength();
            Path familydir = new Path(outputdir, Bytes.toString(family));
            String compression = compressionMap.get(family);
            compression = compression == null ? defaultCompression : compression;
            String bloomTypeStr = bloomTypeMap.get(family);
            BloomType bloomType = BloomType.NONE;
            if (bloomTypeStr != null) {
                bloomType = BloomType.valueOf(bloomTypeStr);
            }
            String blockSizeString = blockSizeMap.get(family);
            int blockSize = blockSizeString == null ? HConstants.DEFAULT_BLOCKSIZE
                    : Integer.parseInt(blockSizeString);
            Configuration tempConf = new Configuration(conf);
            tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
            wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs, blockSize)
                    .withOutputDir(familydir)
                    .withCompression(AbstractHFileWriter.compressionByName(compression))
                    .withBloomType(bloomType).withComparator(KeyValue.COMPARATOR).withDataBlockEncoder(encoder)
                    .withChecksumType(HStore.getChecksumType(conf))
                    .withBytesPerChecksum(HStore.getBytesPerChecksum(conf)).build();

            this.writers.put(family, wl);
            return wl;
        }

        private void close(final StoreFile.Writer w) throws IOException {
            if (w != null) {
                w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
                w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
                        Bytes.toBytes(context.getTaskAttemptID().toString()));
                w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
                w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
                w.appendTrackedTimestampsToMetadata();
                w.close();
            }
        }

        public void close(TaskAttemptContext c) throws IOException, InterruptedException {
            dos.flush();
            dos.close();
            for (WriterLength wl : this.writers.values()) {
                close(wl.writer);
            }
        }
    };
}

From source file:com.citic.zxyjs.zwlscx.mapreduce.lib.input.HFileOutputFormatBase.java

License:Apache License

/**
 * Run inside the task to deserialize column family to given conf value map.
 * //from  w  w  w. j  av  a2 s.c o  m
 * @param conf
 * @param confName
 * @return a map of column family to the given configuration value
 */
private static Map<byte[], String> createFamilyConfValueMap(Configuration conf, String confName) {
    Map<byte[], String> confValMap = new TreeMap<byte[], String>(Bytes.BYTES_COMPARATOR);
    String confVal = conf.get(confName, "");
    for (String familyConf : confVal.split("&")) {
        String[] familySplit = familyConf.split("=");
        if (familySplit.length != 2) {
            continue;
        }
        try {
            confValMap.put(URLDecoder.decode(familySplit[0], "UTF-8").getBytes(),
                    URLDecoder.decode(familySplit[1], "UTF-8"));
        } catch (UnsupportedEncodingException e) {
            // will not happen with UTF-8 encoding
            throw new AssertionError(e);
        }
    }
    return confValMap;
}

From source file:com.cloudera.branchreduce.onezero.ImplicitEnumerationSolver.java

License:Open Source License

@Override
public void initialize(BranchReduceContext<PartialSolution, CurrentBestSolution> context) {
    if (objective == null) {
        Configuration conf = context.getConfiguration();
        String lpProblem = conf.get(LP_PROBLEM, "");
        if (lpProblem.isEmpty()) {
            throw new IllegalArgumentException("No branchreduce.lp.problem config value specified, exiting");
        }/*www. j a v a  2  s.co  m*/
        SimplifiedLpParser parser = new SimplifiedLpParser(lpProblem);
        parser.parse();
        init(parser.getObjective(), parser.getConstraints());
    }
}

From source file:com.cloudera.hadoop.hdfs.nfs.nfs4.WritableFileFileHandleStore.java

License:Apache License

@Override
protected synchronized void initialize() throws IOException {

    Configuration configuration = getConf();

    mFileHandleStoreFile = new File(
            configuration.get(NFS_FILEHANDLE_STORE_FILE, DEFAULT_NFS_FILEHANDLE_STORE_FILE));

    Pair<List<FileHandleStoreEntry>, Boolean> pair = readFile();
    boolean fileHandleStoreIsBad = pair.getSecond();

    try {//  www .j av  a 2  s. c  o m
        mFileHandleStoreFile.delete();
        FileOutputStream fos = new FileOutputStream(mFileHandleStoreFile);
        mFileHandleStoreChannel = fos.getChannel();
        mFileHandleStore = new DataOutputStream(fos);
        List<FileHandleStoreEntry> entryList = pair.getFirst();
        Collections.sort(entryList);
        for (FileHandleStoreEntry entry : entryList) {
            storeFileHandle(entry);
        }
        if (fileHandleStoreIsBad) {
            LOGGER.info("FileHandleStore fixed");
        }
    } catch (IOException ex) {
        throw new IOException("Unable to create filehandle store file: " + mFileHandleStoreFile, ex);
    }

}

From source file:com.cloudera.impala.authorization.AuthorizationConfig.java

License:Apache License

/**
 * Creates a new authorization configuration object.
 * Only be called when loading all configurations from sentry-site.xml.
 * @param sentryConfigFile - Absolute path and file name of the sentry service.
 */// w w  w .  j a va2  s. c  o m
public AuthorizationConfig(String sentryConfigFile) {
    sentryConfig_ = new SentryConfig(sentryConfigFile);
    if (!Strings.isNullOrEmpty(sentryConfigFile))
        sentryConfig_.loadConfig();
    Configuration conf = sentryConfig_.getConfig();
    serverName_ = conf.get(SentryConfig.AUTHZ_SERVER_NAME, "");
    policyFile_ = conf.get(SentryConfig.AUTHZ_PROVIDER_RESOURCE, "");
    policyProviderClassName_ = conf.get(SentryConfig.AUTHZ_PROVIDER, "");
    loadAllFromSentrySite_ = true;
}