Example usage for org.apache.hadoop.conf Configuration writeXml

List of usage examples for org.apache.hadoop.conf Configuration writeXml

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration writeXml.

Prototype

public void writeXml(Writer out) throws IOException 

Source Link

Usage

From source file:org.apache.accumulo.test.RewriteTabletDirectoriesIT.java

License:Apache License

@Test
public void test() throws Exception {
    Connector c = getConnector();//from w ww . j  a  v a2s.c om
    c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
    final String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);

    // Write some data to a table and add some splits
    BatchWriter bw = c.createBatchWriter(tableName, null);
    final SortedSet<Text> splits = new TreeSet<>();
    for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) {
        splits.add(new Text(split));
        Mutation m = new Mutation(new Text(split));
        m.put(new byte[] {}, new byte[] {}, new byte[] {});
        bw.addMutation(m);
    }
    bw.close();
    c.tableOperations().addSplits(tableName, splits);

    BatchScanner scanner = c.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
    DIRECTORY_COLUMN.fetch(scanner);
    String tableId = c.tableOperations().tableIdMap().get(tableName);
    assertNotNull("TableID for " + tableName + " was null", tableId);
    scanner.setRanges(Collections.singletonList(TabletsSection.getRange(tableId)));
    // verify the directory entries are all on v1, make a few entries relative
    bw = c.createBatchWriter(MetadataTable.NAME, null);
    int count = 0;
    for (Entry<Key, Value> entry : scanner) {
        assertTrue("Expected " + entry.getValue() + " to contain " + v1,
                entry.getValue().toString().contains(v1.toString()));
        count++;
        if (count % 2 == 0) {
            String parts[] = entry.getValue().toString().split("/");
            Key key = entry.getKey();
            Mutation m = new Mutation(key.getRow());
            m.put(key.getColumnFamily(), key.getColumnQualifier(),
                    new Value((Path.SEPARATOR + parts[parts.length - 1]).getBytes()));
            bw.addMutation(m);
        }
    }
    bw.close();
    assertEquals(splits.size() + 1, count);

    // This should fail: only one volume
    assertEquals(1, cluster.exec(RandomizeVolumes.class, "-z", cluster.getZooKeepers(), "-i",
            c.getInstance().getInstanceName(), "-t", tableName).waitFor());

    cluster.stop();

    // add the 2nd volume
    Configuration conf = new Configuration(false);
    conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
    conf.set(Property.INSTANCE_VOLUMES.getKey(), v1.toString() + "," + v2.toString());
    BufferedOutputStream fos = new BufferedOutputStream(
            new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
    conf.writeXml(fos);
    fos.close();

    // initialize volume
    assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor());
    cluster.start();
    c = getConnector();

    // change the directory entries
    assertEquals(0, cluster.exec(Admin.class, "randomizeVolumes", "-t", tableName).waitFor());

    // verify a more equal sharing
    int v1Count = 0, v2Count = 0;
    for (Entry<Key, Value> entry : scanner) {
        if (entry.getValue().toString().contains(v1.toString())) {
            v1Count++;
        }
        if (entry.getValue().toString().contains(v2.toString())) {
            v2Count++;
        }
    }

    log.info("Count for volume1: " + v1Count);
    log.info("Count for volume2: " + v2Count);

    assertEquals(splits.size() + 1, v1Count + v2Count);
    // a fair chooser will differ by less than count(volumes)
    assertTrue("Expected the number of files to differ between volumes by less than 10. " + v1Count + " "
            + v2Count, Math.abs(v1Count - v2Count) < 2);
    // verify we can read the old data
    count = 0;
    for (Entry<Key, Value> entry : c.createScanner(tableName, Authorizations.EMPTY)) {
        assertTrue("Found unexpected entry in table: " + entry, splits.contains(entry.getKey().getRow()));
        count++;
    }
    assertEquals(splits.size(), count);
}

From source file:org.apache.accumulo.test.VolumeIT.java

License:Apache License

@Test
public void testAddVolumes() throws Exception {

    String[] tableNames = getUniqueNames(2);

    // grab this before shutting down cluster
    String uuid = new ZooKeeperInstance(cluster.getClientConfig()).getInstanceID();

    verifyVolumesUsed(tableNames[0], false, v1, v2);

    Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
    cluster.stop();//  w w w. jav a2  s  . c om

    Configuration conf = new Configuration(false);
    conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));

    File v3f = new File(volDirBase, "v3");
    assertTrue(v3f.mkdir() || v3f.isDirectory());
    Path v3 = new Path("file://" + v3f.getAbsolutePath());

    conf.set(Property.INSTANCE_VOLUMES.getKey(), v1.toString() + "," + v2.toString() + "," + v3.toString());
    BufferedOutputStream fos = new BufferedOutputStream(
            new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
    conf.writeXml(fos);
    fos.close();

    // initialize volume
    Assert.assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor());

    // check that all volumes are initialized
    for (Path volumePath : Arrays.asList(v1, v2, v3)) {
        FileSystem fs = volumePath.getFileSystem(CachedConfiguration.getInstance());
        Path vp = new Path(volumePath, ServerConstants.INSTANCE_ID_DIR);
        FileStatus[] iids = fs.listStatus(vp);
        Assert.assertEquals(1, iids.length);
        Assert.assertEquals(uuid, iids[0].getPath().getName());
    }

    // start cluster and verify that new volume is used
    cluster.start();

    verifyVolumesUsed(tableNames[1], false, v1, v2, v3);
}

From source file:org.apache.accumulo.test.VolumeIT.java

License:Apache License

@Test
public void testNonConfiguredVolumes() throws Exception {

    String[] tableNames = getUniqueNames(2);

    // grab this before shutting down cluster
    String uuid = new ZooKeeperInstance(cluster.getClientConfig()).getInstanceID();

    verifyVolumesUsed(tableNames[0], false, v1, v2);

    Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
    cluster.stop();/*from w w  w. j  a va 2s  . co  m*/

    Configuration conf = new Configuration(false);
    conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));

    File v3f = new File(volDirBase, "v3");
    assertTrue(v3f.mkdir() || v3f.isDirectory());
    Path v3 = new Path("file://" + v3f.getAbsolutePath());

    conf.set(Property.INSTANCE_VOLUMES.getKey(), v2.toString() + "," + v3.toString());
    BufferedOutputStream fos = new BufferedOutputStream(
            new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
    conf.writeXml(fos);
    fos.close();

    // initialize volume
    Assert.assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor());

    // check that all volumes are initialized
    for (Path volumePath : Arrays.asList(v1, v2, v3)) {
        FileSystem fs = volumePath.getFileSystem(CachedConfiguration.getInstance());
        Path vp = new Path(volumePath, ServerConstants.INSTANCE_ID_DIR);
        FileStatus[] iids = fs.listStatus(vp);
        Assert.assertEquals(1, iids.length);
        Assert.assertEquals(uuid, iids[0].getPath().getName());
    }

    // start cluster and verify that new volume is used
    cluster.start();

    // Make sure we can still read the tables (tableNames[0] is very likely to have a file still on v1)
    List<String> expected = new ArrayList<>();
    for (int i = 0; i < 100; i++) {
        String row = String.format("%06d", i * 100 + 3);
        expected.add(row + ":cf1:cq1:1");
    }

    verifyData(expected, getConnector().createScanner(tableNames[0], Authorizations.EMPTY));

    // v1 should not have any data for tableNames[1]
    verifyVolumesUsed(tableNames[1], false, v2, v3);
}

From source file:org.apache.accumulo.test.VolumeIT.java

License:Apache License

@Test
public void testRemoveVolumes() throws Exception {
    String[] tableNames = getUniqueNames(2);

    verifyVolumesUsed(tableNames[0], false, v1, v2);

    Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
    cluster.stop();/*  w ww  . j  a va 2  s.c  o  m*/

    Configuration conf = new Configuration(false);
    conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));

    conf.set(Property.INSTANCE_VOLUMES.getKey(), v2.toString());
    BufferedOutputStream fos = new BufferedOutputStream(
            new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
    conf.writeXml(fos);
    fos.close();

    // start cluster and verify that volume was decommisioned
    cluster.start();

    Connector conn = cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
    conn.tableOperations().compact(tableNames[0], null, null, true, true);

    verifyVolumesUsed(tableNames[0], true, v2);

    // check that root tablet is not on volume 1
    ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
    String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig()))
            + RootTable.ZROOT_TABLET_PATH;
    String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
    Assert.assertTrue(rootTabletDir.startsWith(v2.toString()));

    conn.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<String, String>(),
            new HashSet<String>());

    conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
    conn.tableOperations().flush(RootTable.NAME, null, null, true);

    verifyVolumesUsed(tableNames[0], true, v2);
    verifyVolumesUsed(tableNames[1], true, v2);

}

From source file:org.apache.accumulo.test.VolumeIT.java

License:Apache License

private void testReplaceVolume(boolean cleanShutdown) throws Exception {
    String[] tableNames = getUniqueNames(3);

    verifyVolumesUsed(tableNames[0], false, v1, v2);

    // write to 2nd table, but do not flush data to disk before shutdown
    writeData(tableNames[1], cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)));

    if (cleanShutdown)
        Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());

    cluster.stop();//from   ww  w . j  ava  2s.  c o  m

    File v1f = new File(v1.toUri());
    File v8f = new File(new File(v1.getParent().toUri()), "v8");
    Assert.assertTrue("Failed to rename " + v1f + " to " + v8f, v1f.renameTo(v8f));
    Path v8 = new Path(v8f.toURI());

    File v2f = new File(v2.toUri());
    File v9f = new File(new File(v2.getParent().toUri()), "v9");
    Assert.assertTrue("Failed to rename " + v2f + " to " + v9f, v2f.renameTo(v9f));
    Path v9 = new Path(v9f.toURI());

    Configuration conf = new Configuration(false);
    conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));

    conf.set(Property.INSTANCE_VOLUMES.getKey(), v8 + "," + v9);
    conf.set(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey(), v1 + " " + v8 + "," + v2 + " " + v9);
    BufferedOutputStream fos = new BufferedOutputStream(
            new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
    conf.writeXml(fos);
    fos.close();

    // start cluster and verify that volumes were replaced
    cluster.start();

    verifyVolumesUsed(tableNames[0], true, v8, v9);
    verifyVolumesUsed(tableNames[1], true, v8, v9);

    // verify writes to new dir
    getConnector().tableOperations().compact(tableNames[0], null, null, true, true);
    getConnector().tableOperations().compact(tableNames[1], null, null, true, true);

    verifyVolumesUsed(tableNames[0], true, v8, v9);
    verifyVolumesUsed(tableNames[1], true, v8, v9);

    // check that root tablet is not on volume 1 or 2
    ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
    String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig()))
            + RootTable.ZROOT_TABLET_PATH;
    String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
    Assert.assertTrue(rootTabletDir.startsWith(v8.toString()) || rootTabletDir.startsWith(v9.toString()));

    getConnector().tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<String, String>(),
            new HashSet<String>());

    getConnector().tableOperations().flush(MetadataTable.NAME, null, null, true);
    getConnector().tableOperations().flush(RootTable.NAME, null, null, true);

    verifyVolumesUsed(tableNames[0], true, v8, v9);
    verifyVolumesUsed(tableNames[1], true, v8, v9);
    verifyVolumesUsed(tableNames[2], true, v8, v9);
}

From source file:org.apache.atlas.security.SecureClientUtils.java

License:Apache License

public static void persistSSLClientConfiguration(org.apache.commons.configuration.Configuration clientConfig)
        throws AtlasException, IOException {
    //trust settings
    Configuration configuration = new Configuration(false);
    File sslClientFile = getSSLClientFile();
    if (!sslClientFile.exists()) {
        configuration.set("ssl.client.truststore.type", "jks");
        configuration.set("ssl.client.truststore.location", clientConfig.getString(TRUSTSTORE_FILE_KEY));
        if (clientConfig.getBoolean(CLIENT_AUTH_KEY, false)) {
            // need to get client key properties
            configuration.set("ssl.client.keystore.location", clientConfig.getString(KEYSTORE_FILE_KEY));
            configuration.set("ssl.client.keystore.type", "jks");
        }//  w ww .  j  av  a  2  s  . c o  m
        // add the configured credential provider
        configuration.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
                clientConfig.getString(CERT_STORES_CREDENTIAL_PROVIDER_PATH));
        String hostnameVerifier = clientConfig.getString(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY);
        if (hostnameVerifier != null) {
            configuration.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, hostnameVerifier);
        }

        configuration.writeXml(new FileWriter(sslClientFile));
    }
}

From source file:org.apache.blur.hive.BlurSerDeTest.java

License:Apache License

private void writeFile(File file, Configuration configuration) throws FileNotFoundException, IOException {
    FileOutputStream outputStream = new FileOutputStream(file);
    configuration.writeXml(outputStream);
    outputStream.close();//from   w w w  .  j  a va 2  s  . com
}

From source file:org.apache.blur.mapreduce.lib.update.BulkTableUpdateCommand.java

License:Apache License

public void addExtraConfig(Configuration configuration) throws IOException {
    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
    configuration.writeXml(outputStream);
    outputStream.close();//from  w  ww. java  2 s . co m
    extraConfigs.add(new String(outputStream.toByteArray()));
}

From source file:org.apache.falcon.converter.AbstractOozieEntityMapper.java

License:Apache License

protected void createHiveConf(FileSystem fs, Path confPath, String metastoreUrl, String prefix)
        throws IOException {
    Configuration hiveConf = new Configuration(false);
    hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, metastoreUrl);
    hiveConf.set("hive.metastore.local", "false");

    OutputStream out = null;/*from   w ww  .j a  va  2s  . c o  m*/
    try {
        out = fs.create(new Path(confPath, prefix + "hive-site.xml"));
        hiveConf.writeXml(out);
    } finally {
        IOUtils.closeQuietly(out);
    }
}

From source file:org.apache.falcon.oozie.feed.FeedReplicationCoordinatorBuilder.java

License:Apache License

protected void persistHiveConfiguration(FileSystem fs, Path confPath, Cluster cluster, String prefix)
        throws IOException {
    Configuration hiveConf = getHiveCredentialsAsConf(cluster);
    OutputStream out = null;/*from  w w  w  .ja  v a2  s  .c o  m*/
    try {
        out = fs.create(new Path(confPath, prefix + "hive-site.xml"));
        hiveConf.writeXml(out);
    } finally {
        IOUtils.closeQuietly(out);
    }
}