Example usage for org.apache.hadoop.conf Configuration addResource

List of usage examples for org.apache.hadoop.conf Configuration addResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addResource.

Prototype

public void addResource(Configuration conf) 

Source Link

Document

Add a configuration resource.

Usage

From source file:com.marklogic.mapreduce.examples.WikiLoader.java

License:Apache License

public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    if (args.length < 2) {
        System.err.println("Usage: WikiLoader configFile inputDir");
        System.exit(2);// w  w  w .  j a  v a 2 s. c o  m
    }
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();

    Job job = Job.getInstance(conf, "wiki loader");
    job.setJarByClass(WikiLoader.class);
    job.setInputFormatClass(WikiInputFormat.class);
    job.setMapperClass(ArticleMapper.class);
    job.setMapOutputKeyClass(DocumentURI.class);
    job.setMapOutputValueClass(Text.class);
    job.setOutputFormatClass(ContentOutputFormat.class);

    ContentInputFormat.setInputPaths(job, new Path(otherArgs[1]));

    conf = job.getConfiguration();
    conf.addResource(otherArgs[0]);

    System.exit(job.waitForCompletion(true) ? 0 : 1);
}

From source file:com.marklogic.mapreduce.examples.ZipContentLoader.java

License:Apache License

public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length != 2) {
        System.err.println("Usage: ZipContentLoader configFile inputDir");
        System.exit(2);//from   w  ww  .  jav a 2s .c o m
    }

    Job job = Job.getInstance(conf, "zip content loader");
    job.setJarByClass(ZipContentLoader.class);
    job.setInputFormatClass(ZipContentInputFormat.class);
    job.setMapperClass(ZipContentMapper.class);
    job.setMapOutputKeyClass(DocumentURI.class);
    job.setMapOutputValueClass(Text.class);
    job.setOutputFormatClass(ContentOutputFormat.class);

    ZipContentInputFormat.setInputPaths(job, new Path(otherArgs[1]));

    conf = job.getConfiguration();
    conf.addResource(otherArgs[0]);

    System.exit(job.waitForCompletion(true) ? 0 : 1);
}

From source file:com.marklogic.mapreduce.test.CustomQuery.java

License:Apache License

public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    if (args.length < 1) {
        System.err.println("Usage: CustomQuery configFile");
        System.exit(2);/*from  www  . j  a v  a  2s  .  com*/
    }
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();

    Job job = Job.getInstance(conf, "custom query");
    job.setJarByClass(CustomQuery.class);

    job.setInputFormatClass(NodeInputFormat.class);
    job.setMapperClass(QueryMapper.class);
    job.setMapOutputKeyClass(IntWritable.class);
    job.setMapOutputValueClass(Text.class);

    job.setReducerClass(QueryReducer.class);
    job.setOutputFormatClass(KeyValueOutputFormat.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    conf = job.getConfiguration();
    conf.addResource(otherArgs[0]);

    System.exit(job.waitForCompletion(true) ? 0 : 1);
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

/**
 * Modify the config and start up additional DataNodes. The info port for
 * DataNodes is guaranteed to use a free port.
 * /*  w  w w  . j  a va2s .c o m*/
 * Data nodes can run with the name node in the mini cluster or
 * a real name node. For example, running with a real name node is useful
 * when running simulated data nodes with a real name node.
 * If minicluster's name node is null assume that the conf has been
 * set with the right address:port of the name node.
 *
 * @param conf
 *            the base configuration to use in starting the DataNodes. This
 *            will be modified as necessary.
 * @param numDataNodes
 *            Number of DataNodes to start; may be zero
 * @param manageDfsDirs
 *            if true, the data directories for DataNodes will be
 *            created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be
 *            set in the conf
 * @param operation
 *            the operation with which to start the DataNodes. If null
 *            or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
 * @param racks
 *            array of strings indicating the rack that each DataNode is on
 * @param hosts
 *            array of strings indicating the hostnames for each DataNode
 * @param simulatedCapacities
 *            array of capacities of the simulated data nodes
 * @param setupHostsFile
 *            add new nodes to dfs hosts files
 * @param checkDataNodeAddrConfig
 *            if true, only set DataNode port addresses if not already set in config
 * @param checkDataNodeHostConfig
 *            if true, only set DataNode hostname key if not already set in config
 * @param dnConfOverlays
 *            An array of {@link Configuration} objects that will overlay the
 *            global MiniDFSCluster Configuration for the corresponding DataNode.
 * @throws IllegalStateException
 *             if NameNode has been shutdown
 */
public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType storageType,
        boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts,
        long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig,
        boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays) throws IOException {
    if (operation == StartupOption.RECOVER) {
        return;
    }
    if (checkDataNodeHostConfig) {
        conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    } else {
        conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    }

    int curDatanodesNum = dataNodes.size();
    // for mincluster's the default initialDelay for BRs is 0
    if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
        conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
    }
    // If minicluster's name node is null assume that the conf has been
    // set with the right address:port of the name node.
    //
    if (racks != null && numDataNodes > racks.length) {
        throw new IllegalArgumentException("The length of racks [" + racks.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    if (hosts != null && numDataNodes > hosts.length) {
        throw new IllegalArgumentException("The length of hosts [" + hosts.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    // Generate some hostnames if required
    if (racks != null && hosts == null) {
        hosts = new String[numDataNodes];
        for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
            hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
        }
    }

    if (simulatedCapacities != null && numDataNodes > simulatedCapacities.length) {
        throw new IllegalArgumentException("The length of simulatedCapacities [" + simulatedCapacities.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    if (dnConfOverlays != null && numDataNodes > dnConfOverlays.length) {
        throw new IllegalArgumentException("The length of dnConfOverlays [" + dnConfOverlays.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    String[] dnArgs = (operation == null || operation != StartupOption.ROLLBACK) ? null
            : new String[] { operation.getName() };

    for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
        Configuration dnConf = new HdfsConfiguration(conf);
        if (dnConfOverlays != null) {
            dnConf.addResource(dnConfOverlays[i]);
        }
        // Set up datanode address
        setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
        if (manageDfsDirs) {
            String dirs = makeDataNodeDirs(i, storageType);
            dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
            conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
        }
        if (simulatedCapacities != null) {
            SimulatedFSDataset.setFactory(dnConf);
            dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
                    simulatedCapacities[i - curDatanodesNum]);
        }
        LOG.info("Starting DataNode " + i + " with " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
                + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
        if (hosts != null) {
            dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
            LOG.info("Starting DataNode " + i + " with hostname set to: "
                    + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
        }
        if (racks != null) {
            String name = hosts[i - curDatanodesNum];
            LOG.info("Adding node with hostname : " + name + " to rack " + racks[i - curDatanodesNum]);
            StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum]);
        }
        Configuration newconf = new HdfsConfiguration(dnConf); // save config
        if (hosts != null) {
            NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
        }

        SecureResources secureResources = null;
        if (UserGroupInformation.isSecurityEnabled() && conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY) == null) {
            try {
                secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
            } catch (Exception ex) {
                ex.printStackTrace();
            }
        }
        final int maxRetriesOnSasl = conf.getInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY,
                IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT);
        int numRetries = 0;
        DataNode dn = null;
        while (true) {
            try {
                dn = DataNode.instantiateDataNode(dnArgs, dnConf, secureResources);
                break;
            } catch (IOException e) {
                // Work around issue testing security where rapidly starting multiple
                // DataNodes using the same principal gets rejected by the KDC as a
                // replay attack.
                if (UserGroupInformation.isSecurityEnabled() && numRetries < maxRetriesOnSasl) {
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException ie) {
                        Thread.currentThread().interrupt();
                        break;
                    }
                    ++numRetries;
                    continue;
                }
                throw e;
            }
        }
        if (dn == null)
            throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
        // since the HDFS does things based on host|ip:port, we need to add the
        // mapping for the service to rackId
        String service = SecurityUtil.buildTokenService(dn.getXferAddress()).toString();
        if (racks != null) {
            LOG.info("Adding node with service : " + service + " to rack " + racks[i - curDatanodesNum]);
            StaticMapping.addNodeToRack(service, racks[i - curDatanodesNum]);
        }
        dn.runDatanodeDaemon();
        dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort()));
    }
    curDatanodesNum += numDataNodes;
    this.numDataNodes += numDataNodes;
    waitActive();
}

From source file:com.metlife.paymenthistory.dao.PaymentDaoHbaseGet.java

License:Apache License

public static ResponseDto getAllData() {
    try {/*w ww  .ja v  a  2s  . c  o m*/
        Configuration config = HBaseConfiguration.create();
        config.addResource(new Path("/etc/hbase/conf/core-site.xml"));
        config.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));

        // config.set("hbase.zookeeper.quorum", "192.168.1.114");
        config.set("hbase.zookeeper.quorum", "10.20.0.199");
        config.set("hbase.zookeeper.property.clientPort", "2181");

        Map<String, BigDecimal> data = new HashMap<String, BigDecimal>();
        List<AggregateDto> aggregates = new ArrayList<AggregateDto>();

        HTable tableBatch = new HTable(config, "state_total_batch");
        Scan s = new Scan();

        long startTimeBatch = System.nanoTime();
        ResultScanner sstableBatch = tableBatch.getScanner(s);
        long endTimeBatch = System.nanoTime();
        for (Result r : sstableBatch) {
            for (KeyValue kv : r.raw()) {
                if ((new String(kv.getFamily())).equals("totalPremium")
                        && (new String(kv.getQualifier())).equals("amount")) {
                    BigDecimal d = new BigDecimal(new String(kv.getValue()));
                    data.put(new String(kv.getRow()), d);
                }
            }
        }

        HTable tableStream = new HTable(config, "state_total_stream");
        long startTimeStream = System.nanoTime();
        ResultScanner ssStream = tableStream.getScanner(s);
        long endTimeStream = System.nanoTime();
        for (Result r : ssStream) {
            for (KeyValue kv : r.raw()) {
                if ((new String(kv.getFamily())).equals("totalPremium")
                        && (new String(kv.getQualifier())).equals("totalPremium")) {
                    BigDecimal d = new BigDecimal(new String(kv.getValue()));
                    BigDecimal sum = data.get(new String(kv.getRow())).add(d);
                    // System.out.println(Math.round(sum.divide(new
                    // BigDecimal(10000))));
                    data.put(new String(kv.getRow()), sum);
                }
            }
        }

        for (String key : data.keySet()) {
            AggregateDto agg = new AggregateDto();
            agg.setAggregationKey(key);
            agg.setValue(data.get(key).longValue());
            aggregates.add(agg);
        }

        ResponseDto response = new ResponseDto();
        response.setAggregates(aggregates);
        response.setQueryExecutionTime(
                (endTimeBatch + endTimeStream - startTimeBatch - startTimeStream) / 1000000);
        tableBatch.close();
        tableStream.close();
        return response;

    } catch (Exception e) {
        e.printStackTrace();
        return new ResponseDto();
    }
}

From source file:com.metlife.paymenthistory.dao.PaymentDaoHbaseGet.java

License:Apache License

public static ResponseDto getFilteredData(final String id/*
                                                          * , final String
                                                          * state
                                                          */) {
    try {// w ww. j  a  v a 2s  . c om

        Configuration config = HBaseConfiguration.create();
        config.addResource(new Path("/etc/hbase/conf/core-site.xml"));
        config.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));

        // config.set("hbase.zookeeper.quorum", "192.168.1.114");
        config.set("hbase.zookeeper.quorum", "10.20.0.199");
        config.set("hbase.zookeeper.property.clientPort", "2181");

        Filter filterId = new PrefixFilter(Bytes.toBytes(id + "_"));
        ResponseDto response = new ResponseDto();
        UserDto user = new UserDto();
        // user.setState(state);
        List<PaymentHistoryDto> payments = new ArrayList<PaymentHistoryDto>();

        /************** START BATCH ***************************/
        HTable tableBatch = new HTable(config, "payment_history_batch1");
        Get get = new Get(id.getBytes());
        // get.addFamily(Bytes.toBytes("transactioninfo"));
        // get.addFamily(Bytes.toBytes("personalinfo"));
        get.setMaxVersions(20);

        long startTimeBatch = System.nanoTime();
        Result rs = tableBatch.get(get);
        long endTimeBatch = System.nanoTime();

        if (rs.size() != 0) {
            String[][] aryStrings = new String[3][(rs.size() - 8) / 3];

            int i = 0;
            int j = 0;
            int k = 0;
            int setUser = 0;

            for (KeyValue kv : rs.raw()) {
                //  if (setUser == 0) {
                if ("Age".equals(new String(kv.getQualifier()))) {
                    user.setAge(new String(kv.getValue()));
                } else if ("Country".equals(new String(kv.getQualifier()))) {
                    user.setCountry(new String(kv.getValue()));
                } else if ("Fname".equals(new String(kv.getQualifier()))) {
                    user.setFirstName(new String(kv.getValue()).toUpperCase());
                } else if ("Gender".equals(new String(kv.getQualifier()))) {
                    user.setGender(new String(kv.getValue()));
                } else if ("Lname".equals(new String(kv.getQualifier()))) {
                    user.setLastName(new String(kv.getValue()).toUpperCase());
                } else if ("PolicyNo".equals(new String(kv.getQualifier()))) {
                    user.setPolicyNo(new String(kv.getValue()));
                } else if ("State".equals(new String(kv.getQualifier()))) {
                    user.setState(new String(kv.getValue()).toUpperCase());
                } else if ("UserID".equals(new String(kv.getQualifier()))) {
                    user.setId(new String(kv.getValue()));
                    //  setUser = 1;
                }
                // }

                if ("PaymentDate".equals(new String(kv.getQualifier()))) {
                    aryStrings[0][i] = new String(kv.getQualifier()) + ":" + new String(kv.getValue());
                    i++;
                } else if ("PremiumAmount".equals(new String(kv.getQualifier()))) {
                    aryStrings[1][j] = new String(kv.getQualifier()) + ":" + new String(kv.getValue());
                    j++;
                } else if ("TransactionType".equals(new String(kv.getQualifier()))) {
                    aryStrings[2][k] = new String(kv.getQualifier()) + ":" + new String(kv.getValue());
                    k++;
                }
            }

            int row = 3;
            int col = (rs.size() - 8) / 3;
            int i1, j1;
            for (i1 = 0; i1 < col; i1++) {
                PaymentHistoryDto py = new PaymentHistoryDto();
                for (j1 = 0; j1 < row; j1++) {
                    String[] split = aryStrings[j1][i1].split(":");
                    if (split[0].equalsIgnoreCase("PaymentDate")) {
                        py.setPaymentDate(getFormattedDate(split[1]));
                    } else if (split[0].equalsIgnoreCase("PremiumAmount")) {
                        int int1 = new Integer(split[1]) / 10;
                        py.setAmount(String.valueOf(int1));
                    } else if (split[0].equalsIgnoreCase("TransactionType")) {
                        py.setTransactionType(split[1]);
                    }
                }
                payments.add(py);
            }
        }
        /**************************** END BATCH ***************************************/

        /**************************** START STREAM ***************************************/
        HTable tableStream = new HTable(config, "payment_history_stream");
        Scan sStream = new Scan();
        sStream.setFilter(filterId);

        long startTimeStream = System.nanoTime();
        ResultScanner sstableStream = tableStream.getScanner(sStream);
        long endTimeStream = System.nanoTime();
        for (Result r : sstableStream) {
            PaymentHistoryDto py = new PaymentHistoryDto();
            for (KeyValue kv : r.raw()) {
                String q = new String(kv.getQualifier());
                if ("personalinfo".equals(new String(kv.getFamily()))) {
                    if (q.equalsIgnoreCase("Fname")) {
                        user.setFirstName(new String(kv.getValue()).toUpperCase());
                    } else if (q.equalsIgnoreCase("Lname")) {
                        user.setLastName(new String(kv.getValue()).toUpperCase());
                    } else if (q.equalsIgnoreCase("Age")) {
                        user.setAge(new String(kv.getValue()));
                    } else if (q.equalsIgnoreCase("Gender")) {
                        user.setGender(new String(kv.getValue()));
                    } else if (q.equalsIgnoreCase("State")) {
                        user.setState(new String(kv.getValue()).toUpperCase());
                    } else if (q.equalsIgnoreCase("Country")) {
                        user.setCountry(new String(kv.getValue()));
                    } else if (q.equalsIgnoreCase("PolicyNo")) {
                        user.setPolicyNo(new String(kv.getValue()));
                    } else if (q.equalsIgnoreCase("UserID")) {
                        user.setId(new String(kv.getValue()));
                    }
                } else if ("transactioninfo".equals(new String(kv.getFamily()))) {
                    if (q.equalsIgnoreCase("premiumamount")) {
                        // py.setAmount(new String(kv.getValue()));
                        int int1 = new Integer(new String(kv.getValue())) / 10;
                        // System.out.println(String.valueOf(int1));
                        py.setAmount(String.valueOf(int1));
                    } else if (q.equalsIgnoreCase("transactiondate")) {
                        py.setPaymentDate(getFormattedDate(new String(kv.getValue())));
                    } else if (q.equalsIgnoreCase("transactiontype")) {
                        py.setTransactionType(new String(kv.getValue()));
                    }
                }
            }
            payments.add(py);
        }

        /**************************** END STREAM ***************************************/

        response.setUser(user);
        response.setPayments(payments);
        /*
         * long endTimeBatch = 0; long startTimeBatch = 0;
         */
        tableBatch.close();
        tableStream.close();
        response.setQueryExecutionTime(
                (endTimeBatch + endTimeStream - startTimeBatch - startTimeStream) / 1000000);

        return response;

    } catch (Exception e) {
        e.printStackTrace();
        return new ResponseDto();
    }
}

From source file:com.moz.fiji.mapreduce.IntegrationTestJobHistoryFijiTable.java

License:Apache License

/**
 * Test of all the basic information recorded by a mapper.
 *///from   ww  w .j av  a  2  s. c  o m
@Test
public void testMappers() throws Exception {
    createAndPopulateFooTable();
    final Configuration jobConf = getConf();
    // Set a value in the configuration. We'll check to be sure we can retrieve it later.
    jobConf.set("conf.test.animal.string", "squirrel");
    final Fiji fiji = Fiji.Factory.open(getFijiURI());
    try {
        final FijiURI fooTableURI = FijiURI.newBuilder(getFijiURI()).withTableName("foo").build();
        final JobHistoryFijiTable jobHistory = JobHistoryFijiTable.open(fiji);

        try {
            // Construct a Producer for this table.
            final FijiProduceJobBuilder builder = FijiProduceJobBuilder.create().withConf(jobConf)
                    .withInputTable(fooTableURI).withProducer(EmailDomainProducer.class)
                    .withOutput(MapReduceJobOutputs.newDirectFijiTableMapReduceJobOutput(fooTableURI));
            FijiMapReduceJob mrJob = builder.build();

            // Record the jobId and run the job.
            String jobName = mrJob.getHadoopJob().getJobName();
            LOG.info("About to run job: " + jobName);
            assertTrue(mrJob.run());
            String jobId = mrJob.getHadoopJob().getJobID().toString();
            LOG.info("Job was run with id: " + jobId);

            // Retrieve the recorded values and sanity test them.
            JobHistoryEntry jobEntry = jobHistory.getJobDetails(jobId);
            assertEquals(jobEntry.getJobName(), jobName);
            assertEquals(jobEntry.getJobId(), jobId);
            assertTrue(jobEntry.getJobStartTime() < jobEntry.getJobEndTime());
            assertEquals("SUCCEEDED", jobEntry.getJobEndStatus());

            // Check counters. We don't know the exact number of rows in the foo table, so just check if
            // it's greater than 0.
            final String countersString = jobEntry.getJobCounters();
            final Pattern countersPattern = Pattern.compile("PRODUCER_ROWS_PROCESSED=(\\d+)");
            final Matcher countersMatcher = countersPattern.matcher(countersString);
            assertTrue(countersMatcher.find());
            assertTrue(Integer.parseInt(countersMatcher.group(1)) > 0);

            // Test to make sure the Configuration has the correct producer class, and records the value
            // we set previously.
            final String configString = jobEntry.getJobConfiguration();
            final Configuration config = new Configuration();
            config.addResource(new ByteArrayInputStream(configString.getBytes()));
            assertTrue(EmailDomainProducer.class == config.getClass(FijiConfKeys.FIJI_PRODUCER_CLASS, null));
            assertEquals("Couldn't retrieve configuration field from deserialized configuration.", "squirrel",
                    config.get("conf.test.animal.string"));
        } finally {
            jobHistory.close();
        }
    } finally {
        fiji.release();
    }
}

From source file:com.moz.fiji.mapreduce.kvstore.impl.XmlKeyValueStoreParser.java

License:Apache License

/**
 * Given a DOM Node object that represents a &lt;configuration&gt; block
 * within a &lt;store&gt; object, reformat this as an xml document that can be parsed
 * by {@link org.apache.hadoop.conf.Configuration}, and then return a
 * Configuration instance to pass into a KeyValueStore object to instantiate.
 *
 * @param configNode a node representing a &lt;configuration&gt; element
 *     in the DOM that is the root of the KeyValueStore's configuration.
 * @return a new Configuration containing the key-value pairs associated
 *     with this node./*  ww  w .j av  a  2  s  .  c o m*/
 * @throws IOException if there's an error processing the XML data.
 */
private Configuration parseConfiguration(Node configNode) throws IOException {
    if (null == configNode) {
        return null;
    } else if (!configNode.getNodeName().equals("configuration")) {
        throw new IOException("Expected <configuration> node, got " + configNode.getNodeName());
    }

    try {
        DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
        DocumentBuilder builder = factory.newDocumentBuilder();
        Document document = builder.newDocument();
        Element root = document.createElement("configuration");
        document.appendChild(root);
        copyConfigNodes(root, configNode, document);

        TransformerFactory tf = TransformerFactory.newInstance();
        Transformer transformer = tf.newTransformer();
        transformer.setOutputProperty(OutputKeys.METHOD, "xml");
        transformer.setOutputProperty(OutputKeys.INDENT, "yes");
        transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2");
        transformer.setOutputProperty(OutputKeys.ENCODING, "UTF-8");

        ByteArrayOutputStream outStream = new ByteArrayOutputStream();
        transformer.transform(new DOMSource(document), new StreamResult(outStream));

        String confXmlText = outStream.toString("UTF-8");

        // This only contains entries from the XML file component for this store; no defaults.
        Configuration conf = new Configuration(false);
        conf.addResource(new ByteArrayInputStream(confXmlText.getBytes("UTF-8")));

        // Use KeyValueStoreConfiguration.fromConf() to remap these nodes into a namespace
        // for this individual key-value store, but return the underlying Configuration object.
        return KeyValueStoreConfiguration.fromConf(conf).getDelegate();
    } catch (TransformerConfigurationException e) {
        throw new RuntimeException(e);
    } catch (TransformerException e) {
        throw new RuntimeException(e);
    } catch (ParserConfigurationException e) {
        throw new IOException(e);
    }
}

From source file:com.ngdata.hbaseindexer.HBaseIndexerConfiguration.java

License:Apache License

public static Configuration addHbaseIndexerResources(Configuration conf) {
    conf.addResource("hbase-default.xml");
    conf.addResource("hbase-site.xml");
    conf.addResource("hbase-indexer-default.xml");
    conf.addResource("hbase-indexer-site.xml");

    checkDefaultsVersion(conf);/*from  ww w .j  a  va 2  s.  c  o m*/
    return conf;
}

From source file:com.ngdata.sep.tools.monitoring.ReplicationStatusRetriever.java

License:Apache License

private Configuration getHBaseConf(ZooKeeperItf zk, int hbaseMasterPort)
        throws KeeperException, InterruptedException, IOException {
    // Read the HBase/Hadoop configuration via the master web ui
    // This is debatable, but it avoids any pitfalls with conf dirs and also works with launch-test-lily
    byte[] masterServerName = removeMetaData(zk.getData("/hbase/master", false, new Stat()));
    String hbaseMasterHostName = getServerName(masterServerName).getHostname();

    String url = String.format("http://%s:%d/conf", hbaseMasterHostName, hbaseMasterPort);
    System.out.println("Reading HBase configuration from " + url);
    byte[] data = readUrl(url);

    Configuration conf = new Configuration();
    conf.addResource(new ByteArrayInputStream(data));

    return conf;//  w  w  w. ja va 2s. co m
}