Example usage for org.apache.hadoop.conf Configuration set

List of usage examples for org.apache.hadoop.conf Configuration set

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration set.

Prototype

public void set(String name, String value) 

Source Link

Document

Set the value of the name property.

Usage

From source file:co.cask.hydrator.plugin.db.batch.source.DBSource.java

License:Apache License

@Override
public void prepareRun(BatchSourceContext context) throws Exception {
    sourceConfig.substituteMacros(context);
    LOG.debug(//from w  ww . jav a 2 s . c o  m
            "pluginType = {}; pluginName = {}; connectionString = {}; importQuery = {}; "
                    + "boundingQuery = {}",
            sourceConfig.jdbcPluginType, sourceConfig.jdbcPluginName, sourceConfig.connectionString,
            sourceConfig.getImportQuery(), sourceConfig.getBoundingQuery());
    Configuration hConf = new Configuration();
    hConf.clear();

    // Load the plugin class to make sure it is available.
    Class<? extends Driver> driverClass = context.loadPluginClass(getJDBCPluginId());
    if (sourceConfig.user == null && sourceConfig.password == null) {
        DBConfiguration.configureDB(hConf, driverClass.getName(), sourceConfig.connectionString);
    } else {
        DBConfiguration.configureDB(hConf, driverClass.getName(), sourceConfig.connectionString,
                sourceConfig.user, sourceConfig.password);
    }
    DataDrivenETLDBInputFormat.setInput(hConf, DBRecord.class, sourceConfig.getImportQuery(),
            sourceConfig.getBoundingQuery(), sourceConfig.getEnableAutoCommit());
    if (sourceConfig.numSplits == null || sourceConfig.numSplits != 1) {
        hConf.set(DBConfiguration.INPUT_ORDER_BY_PROPERTY, sourceConfig.splitBy);
    }
    if (sourceConfig.numSplits != null) {
        hConf.setInt(MRJobConfig.NUM_MAPS, sourceConfig.numSplits);
    }
    context.setInput(Input.of(sourceConfig.referenceName,
            new SourceInputFormatProvider(DataDrivenETLDBInputFormat.class, hConf)));
}

From source file:co.cask.hydrator.plugin.HDFSSinkTest.java

License:Apache License

@Before
public void beforeTest() throws Exception {
    // Setup Hadoop Minicluster
    File baseDir = temporaryFolder.newFolder();
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    dfsCluster = builder.build();/*from  w w w .  j  a  v  a2 s.c o m*/
    dfsCluster.waitActive();
    fileSystem = FileSystem.get(conf);
}

From source file:co.cask.hydrator.plugin.source.HBaseSource.java

License:Apache License

@Override
public void prepareRun(BatchSourceContext context) throws Exception {
    Configuration conf = new Configuration();
    String ioSerializations = conf.get("io.serializations");
    conf.clear();/* w ww .j ava  2 s  .  c  om*/

    conf.set(TableInputFormat.INPUT_TABLE, config.tableName);
    conf.set(TableInputFormat.SCAN_COLUMN_FAMILY, config.columnFamily);
    String zkQuorum = !Strings.isNullOrEmpty(config.zkQuorum) ? config.zkQuorum : "localhost";
    String zkClientPort = !Strings.isNullOrEmpty(config.zkClientPort) ? config.zkClientPort : "2181";
    conf.set("hbase.zookeeper.quorum", zkQuorum);
    conf.set("hbase.zookeeper.property.clientPort", zkClientPort);
    conf.setStrings(ioSerializations, MutationSerialization.class.getName(),
            ResultSerialization.class.getName(), KeyValueSerialization.class.getName());
    context.setInput(Input.of(config.referenceName, new SourceInputFormatProvider(TableInputFormat.class, conf))
            .alias(config.columnFamily));
}

From source file:co.cask.tephra.distributed.PooledClientProviderTest.java

License:Apache License

@Test
public void testClientConnectionPoolMaximumNumberOfClients() throws Exception {
    // We need a server for the client to connect to
    InMemoryZKServer zkServer = InMemoryZKServer.builder().setDataDir(tmpFolder.newFolder()).build();
    zkServer.startAndWait();/*  w  w  w .  j a  va2s.  co m*/

    try {
        Configuration conf = new Configuration();
        conf.set(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM, zkServer.getConnectionStr());
        conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, tmpFolder.newFolder().getAbsolutePath());
        conf.set("data.tx.client.count", Integer.toString(MAX_CLIENT_COUNT));
        conf.set("data.tx.client.obtain.timeout", Long.toString(CLIENT_OBTAIN_TIMEOUT));

        final TransactionServiceMain main = new TransactionServiceMain(conf);
        final CountDownLatch latch = new CountDownLatch(1);
        Thread t = new Thread() {
            @Override
            public void run() {
                try {
                    main.start();
                    latch.countDown();
                } catch (Exception e) {
                    throw Throwables.propagate(e);
                }
            }
        };

        try {
            t.start();
            // Wait for service to startup
            latch.await();

            startClientAndTestPool(conf);
        } finally {
            main.stop();
            t.join();
        }
    } finally {
        zkServer.stopAndWait();
    }
}

From source file:co.cask.tephra.distributed.ThriftTransactionServerTest.java

License:Apache License

@BeforeClass
public static void start() throws Exception {
    zkServer = InMemoryZKServer.builder().setDataDir(tmpFolder.newFolder()).build();
    zkServer.startAndWait();//from  w w w.  j  a  v  a 2  s .c  o m

    Configuration conf = new Configuration();
    conf.setBoolean(TxConstants.Manager.CFG_DO_PERSIST, false);
    conf.set(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM, zkServer.getConnectionStr());
    conf.set(TxConstants.Service.CFG_DATA_TX_CLIENT_RETRY_STRATEGY, "n-times");
    conf.setInt(TxConstants.Service.CFG_DATA_TX_CLIENT_ATTEMPTS, 1);
    conf.setInt(TxConstants.Service.CFG_DATA_TX_CLIENT_COUNT, NUM_CLIENTS);
    conf.setLong(TxConstants.Service.CFG_DATA_TX_CLIENT_TIMEOUT, TimeUnit.HOURS.toMillis(1));
    conf.setInt(TxConstants.Service.CFG_DATA_TX_SERVER_IO_THREADS, 2);
    conf.setInt(TxConstants.Service.CFG_DATA_TX_SERVER_THREADS, 4);

    injector = Guice.createInjector(new ConfigModule(conf), new ZKModule(),
            new DiscoveryModules().getDistributedModules(),
            Modules.override(new TransactionModules().getDistributedModules()).with(new AbstractModule() {
                @Override
                protected void configure() {
                    bind(TransactionStateStorage.class).to(SlowTransactionStorage.class).in(Scopes.SINGLETON);
                }
            }), new TransactionClientModule());

    zkClientService = injector.getInstance(ZKClientService.class);
    zkClientService.startAndWait();

    // start a tx server
    txService = injector.getInstance(TransactionService.class);
    storage = injector.getInstance(TransactionStateStorage.class);
    try {
        LOG.info("Starting transaction service");
        txService.startAndWait();
    } catch (Exception e) {
        LOG.error("Failed to start service: ", e);
    }
}

From source file:co.cask.tephra.examples.BalanceBooksTest.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    testUtil = new HBaseTestingUtility();
    Configuration conf = testUtil.getConfiguration();
    conf.setBoolean(TxConstants.Manager.CFG_DO_PERSIST, false);
    conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, "/tx.snapshot");

    // Tune down the connection thread pool size
    conf.setInt("hbase.hconnection.threads.core", 5);
    conf.setInt("hbase.hconnection.threads.max", 10);
    // Tunn down handler threads in regionserver
    conf.setInt("hbase.regionserver.handler.count", 10);

    // Set to random port
    conf.setInt("hbase.master.port", 0);
    conf.setInt("hbase.master.info.port", 0);
    conf.setInt("hbase.regionserver.port", 0);
    conf.setInt("hbase.regionserver.info.port", 0);

    testUtil.startMiniCluster();/*from  w w w  .  j a va2s .  c  om*/

    String zkClusterKey = testUtil.getClusterKey(); // hostname:clientPort:parentZnode
    String zkQuorum = zkClusterKey.substring(0, zkClusterKey.lastIndexOf(':'));
    LOG.info("Zookeeper Quorum is running at {}", zkQuorum);
    conf.set(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM, zkQuorum);

    Injector injector = Guice.createInjector(new ConfigModule(conf), new ZKModule(),
            new DiscoveryModules().getDistributedModules(),
            Modules.override(new TransactionModules().getDistributedModules()).with(new AbstractModule() {
                @Override
                protected void configure() {
                    bind(TransactionStateStorage.class).to(InMemoryTransactionStateStorage.class)
                            .in(Scopes.SINGLETON);
                }
            }), new TransactionClientModule());

    zkClientService = injector.getInstance(ZKClientService.class);
    zkClientService.startAndWait();

    // start a tx server
    txService = injector.getInstance(TransactionService.class);
    try {
        LOG.info("Starting transaction service");
        txService.startAndWait();
    } catch (Exception e) {
        LOG.error("Failed to start service: ", e);
    }

}

From source file:co.cask.tephra.hbase10.coprocessor.TransactionProcessorTest.java

License:Apache License

@BeforeClass
public static void setupBeforeClass() throws Exception {
    Configuration hConf = new Configuration();
    String rootDir = tmpFolder.newFolder().getAbsolutePath();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, rootDir);
    hConf.set(HConstants.HBASE_DIR, rootDir + "/hbase");

    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    dfsCluster.waitActive();/*from w  ww  . j ava 2s . c o m*/
    conf = HBaseConfiguration.create(dfsCluster.getFileSystem().getConf());

    conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER);
    conf.unset(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES);
    String localTestDir = tmpFolder.newFolder().getAbsolutePath();
    conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, localTestDir);
    conf.set(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES, DefaultSnapshotCodec.class.getName());

    // write an initial transaction snapshot
    TransactionSnapshot txSnapshot = TransactionSnapshot.copyFrom(System.currentTimeMillis(), V[6] - 1, V[7],
            invalidSet,
            // this will set visibility upper bound to V[6]
            Maps.newTreeMap(ImmutableSortedMap.of(V[6],
                    new TransactionManager.InProgressTx(V[6] - 1, Long.MAX_VALUE, TransactionType.SHORT))),
            new HashMap<Long, Set<ChangeId>>(), new TreeMap<Long, Set<ChangeId>>());
    txVisibilityState = new TransactionSnapshot(txSnapshot.getTimestamp(), txSnapshot.getReadPointer(),
            txSnapshot.getWritePointer(), txSnapshot.getInvalid(), txSnapshot.getInProgress());
    HDFSTransactionStateStorage tmpStorage = new HDFSTransactionStateStorage(conf,
            new SnapshotCodecProvider(conf), new TxMetricsCollector());
    tmpStorage.startAndWait();
    tmpStorage.writeSnapshot(txSnapshot);
    tmpStorage.stopAndWait();
}

From source file:co.cask.tephra.hbase94.coprocessor.TransactionProcessorTest.java

License:Apache License

@BeforeClass
public static void setupBeforeClass() throws Exception {
    Configuration hConf = new Configuration();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath());

    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    dfsCluster.waitActive();//from   ww  w  .  j a  va 2  s  . com
    conf = HBaseConfiguration.create(dfsCluster.getFileSystem().getConf());

    conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER);
    conf.unset(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES);
    String localTestDir = "/tmp/transactionDataJanitorTest";
    conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, localTestDir);
    conf.set(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES, DefaultSnapshotCodec.class.getName());

    // write an initial transaction snapshot
    txSnapshot = TransactionSnapshot.copyFrom(System.currentTimeMillis(), V[6] - 1, V[7], invalidSet,
            // this will set visibility upper bound to V[6]
            Maps.newTreeMap(ImmutableSortedMap.of(V[6],
                    new TransactionManager.InProgressTx(V[6] - 1, Long.MAX_VALUE, TransactionType.SHORT))),
            new HashMap<Long, Set<ChangeId>>(), new TreeMap<Long, Set<ChangeId>>());
    HDFSTransactionStateStorage tmpStorage = new HDFSTransactionStateStorage(conf,
            new SnapshotCodecProvider(conf));
    tmpStorage.startAndWait();
    tmpStorage.writeSnapshot(txSnapshot);
    tmpStorage.stopAndWait();
}

From source file:co.cask.tephra.persist.CommitMarkerCodecTest.java

License:Apache License

@BeforeClass
public static void setupBeforeClass() throws Exception {
    Configuration hConf = new Configuration();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TMP_FOLDER.newFolder().getAbsolutePath());

    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    conf = new Configuration(dfsCluster.getFileSystem().getConf());
    fs = FileSystem.newInstance(FileSystem.getDefaultUri(conf), conf);
}

From source file:co.cask.tephra.persist.HDFSTransactionLogTest.java

License:Apache License

@BeforeClass
public static void setupBeforeClass() throws Exception {
    Configuration hConf = new Configuration();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TMP_FOLDER.newFolder().getAbsolutePath());

    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    conf = new Configuration(dfsCluster.getFileSystem().getConf());
    metricsCollector = new TxMetricsCollector();
}