Example usage for org.apache.hadoop.conf Configuration get

List of usage examples for org.apache.hadoop.conf Configuration get

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration get.

Prototype

public String get(String name) 

Source Link

Document

Get the value of the name property, null if no such property exists.

Usage

From source file:co.cask.hydrator.plugin.db.batch.source.DataDrivenETLDBInputFormat.java

License:Apache License

@Override
public Connection getConnection() {
    if (this.connection == null) {
        Configuration conf = getConf();
        try {//w  ww .  j a  va 2 s  . c  o  m
            String url = conf.get(DBConfiguration.URL_PROPERTY);
            try {
                // throws SQLException if no suitable driver is found
                DriverManager.getDriver(url);
            } catch (SQLException e) {
                if (driverShim == null) {
                    if (driver == null) {
                        ClassLoader classLoader = conf.getClassLoader();
                        @SuppressWarnings("unchecked")
                        Class<? extends Driver> driverClass = (Class<? extends Driver>) classLoader
                                .loadClass(conf.get(DBConfiguration.DRIVER_CLASS_PROPERTY));
                        driver = driverClass.newInstance();

                        // De-register the default driver that gets registered when driver class is loaded.
                        DBUtils.deregisterAllDrivers(driverClass);
                    }
                    driverShim = new JDBCDriverShim(driver);
                    DriverManager.registerDriver(driverShim);
                    LOG.debug("Registered JDBC driver via shim {}. Actual Driver {}.", driverShim, driver);
                }
            }
            if (conf.get(DBConfiguration.USERNAME_PROPERTY) == null) {
                this.connection = DriverManager.getConnection(url);
            } else {
                this.connection = DriverManager.getConnection(url, conf.get(DBConfiguration.USERNAME_PROPERTY),
                        conf.get(DBConfiguration.PASSWORD_PROPERTY));
            }

            boolean autoCommitEnabled = conf.getBoolean(AUTO_COMMIT_ENABLED, false);
            if (autoCommitEnabled) {
                // hack to work around jdbc drivers like the hive driver that throw exceptions on commit
                this.connection = new NoOpCommitConnection(this.connection);
            } else {
                this.connection.setAutoCommit(false);
            }
            this.connection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
        } catch (Exception e) {
            throw Throwables.propagate(e);
        }
    }
    return this.connection;
}

From source file:co.cask.hydrator.plugin.source.HBaseSource.java

License:Apache License

@Override
public void prepareRun(BatchSourceContext context) throws Exception {
    Configuration conf = new Configuration();
    String ioSerializations = conf.get("io.serializations");
    conf.clear();/*from   w w w .  j ava2  s .  com*/

    conf.set(TableInputFormat.INPUT_TABLE, config.tableName);
    conf.set(TableInputFormat.SCAN_COLUMN_FAMILY, config.columnFamily);
    String zkQuorum = !Strings.isNullOrEmpty(config.zkQuorum) ? config.zkQuorum : "localhost";
    String zkClientPort = !Strings.isNullOrEmpty(config.zkClientPort) ? config.zkClientPort : "2181";
    conf.set("hbase.zookeeper.quorum", zkQuorum);
    conf.set("hbase.zookeeper.property.clientPort", zkClientPort);
    conf.setStrings(ioSerializations, MutationSerialization.class.getName(),
            ResultSerialization.class.getName(), KeyValueSerialization.class.getName());
    context.setInput(Input.of(config.referenceName, new SourceInputFormatProvider(TableInputFormat.class, conf))
            .alias(config.columnFamily));
}

From source file:co.cask.tephra.persist.HDFSTransactionLogTest.java

License:Apache License

private HDFSTransactionLog getHDFSTransactionLog(Configuration conf, FileSystem fs, long timeInMillis)
        throws Exception {
    String snapshotDir = conf.get(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR);
    Path newLog = new Path(snapshotDir, LOG_FILE_PREFIX + timeInMillis);
    return new HDFSTransactionLog(fs, conf, newLog, timeInMillis, metricsCollector);
}

From source file:co.cask.tephra.persist.HDFSTransactionLogTest.java

License:Apache License

private SequenceFile.Writer getSequenceFileWriter(Configuration configuration, FileSystem fs, long timeInMillis,
        boolean withMarker) throws IOException {
    String snapshotDir = configuration.get(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR);
    Path newLog = new Path(snapshotDir, LOG_FILE_PREFIX + timeInMillis);
    SequenceFile.Metadata metadata = new SequenceFile.Metadata();
    if (withMarker) {
        metadata.set(new Text(TxConstants.TransactionLog.VERSION_KEY),
                new Text(Byte.toString(TxConstants.TransactionLog.CURRENT_VERSION)));
    }/*from   w  ww .  ja va  2  s . c  om*/
    return SequenceFile.createWriter(fs, configuration, newLog, LongWritable.class, TransactionEdit.class,
            SequenceFile.CompressionType.NONE, null, null, metadata);
}

From source file:co.cask.tephra.persist.HDFSTransactionStateStorage.java

License:Apache License

@Inject
public HDFSTransactionStateStorage(Configuration hConf, SnapshotCodecProvider codecProvider,
        MetricsCollector metricsCollector) {
    super(codecProvider);
    this.hConf = hConf;
    this.configuredSnapshotDir = hConf.get(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR);
    this.metricsCollector = metricsCollector;
}

From source file:co.cask.tephra.persist.LocalFileTransactionStateStorage.java

License:Apache License

@Inject
public LocalFileTransactionStateStorage(Configuration conf, SnapshotCodecProvider codecProvider,
        MetricsCollector metricsCollector) {
    super(codecProvider);
    this.configuredSnapshotDir = conf.get(TxConstants.Manager.CFG_TX_SNAPSHOT_LOCAL_DIR);
    this.metricsCollector = metricsCollector;
}

From source file:co.cask.tephra.persist.LocalTransactionStateStorageTest.java

License:Apache License

@SuppressWarnings("deprecation")
@Test/*w ww  .  j  a v a  2  s. c o  m*/
public void testLongTxnBackwardsCompatibility() throws Exception {
    Configuration conf = getConfiguration("testLongTxnBackwardsCompatibility");

    // Use SnapshotCodec version 1
    String latestSnapshotCodec = conf.get(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES);
    conf.set(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES, DefaultSnapshotCodec.class.getName());

    TransactionStateStorage storage = null;
    try {
        storage = getStorage(conf);
        storage.startAndWait();

        // Create transaction snapshot and transaction edits with version when long running txns had -1 expiration.
        Collection<Long> invalid = Lists.newArrayList();
        NavigableMap<Long, TransactionManager.InProgressTx> inProgress = Maps.newTreeMap();
        long time1 = System.currentTimeMillis();
        long wp1 = time1 * TxConstants.MAX_TX_PER_MS;
        inProgress.put(wp1, new TransactionManager.InProgressTx(wp1 - 5, -1L));
        long time2 = time1 + 100;
        long wp2 = time2 * TxConstants.MAX_TX_PER_MS;
        inProgress.put(wp2, new TransactionManager.InProgressTx(wp2 - 50, time2 + 1000));
        Map<Long, Set<ChangeId>> committing = Maps.newHashMap();
        Map<Long, Set<ChangeId>> committed = Maps.newHashMap();
        TransactionSnapshot snapshot = new TransactionSnapshot(time2, 0, wp2, invalid, inProgress, committing,
                committed);
        long time3 = time1 + 200;
        long wp3 = time3 * TxConstants.MAX_TX_PER_MS;
        TransactionEdit edit1 = new TransactionEditV2(wp3, wp3 - 10, TransactionEdit.State.INPROGRESS, -1L,
                null, 0L, false, null);
        long time4 = time1 + 300;
        long wp4 = time4 * TxConstants.MAX_TX_PER_MS;
        TransactionEdit edit2 = new TransactionEditV2(wp4, wp4 - 10, TransactionEdit.State.INPROGRESS,
                time4 + 1000, null, 0L, false, null);

        // write snapshot and transaction edit
        storage.writeSnapshot(snapshot);
        TransactionLog log = storage.createLog(time2);
        log.append(edit1);
        log.append(edit2);
        log.close();

        // Start transaction manager
        conf.set(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES, latestSnapshotCodec);
        long longTimeout = TimeUnit.SECONDS.toMillis(conf.getLong(TxConstants.Manager.CFG_TX_LONG_TIMEOUT,
                TxConstants.Manager.DEFAULT_TX_LONG_TIMEOUT));
        TransactionManager txm = new TransactionManager(conf, storage, new TxMetricsCollector());
        txm.startAndWait();
        try {
            // Verify that the txns in old format were read correctly.
            // There should be four in-progress transactions, and no invalid transactions
            TransactionSnapshot snapshot1 = txm.getCurrentState();
            Assert.assertEquals(ImmutableSortedSet.of(wp1, wp2, wp3, wp4), snapshot1.getInProgress().keySet());
            verifyInProgress(snapshot1.getInProgress().get(wp1), TransactionType.LONG, time1 + longTimeout);
            verifyInProgress(snapshot1.getInProgress().get(wp2), TransactionType.SHORT, time2 + 1000);
            verifyInProgress(snapshot1.getInProgress().get(wp3), TransactionType.LONG, time3 + longTimeout);
            verifyInProgress(snapshot1.getInProgress().get(wp4), TransactionType.SHORT, time4 + 1000);
            Assert.assertEquals(0, snapshot1.getInvalid().size());
        } finally {
            txm.stopAndWait();
        }
    } finally {
        if (storage != null) {
            storage.stopAndWait();
        }
    }
}

From source file:co.cask.tephra.runtime.ZKModule.java

License:Apache License

@Provides
@Singleton/* w  w w  .java  2  s .  c om*/
private ZKClientService provideZKClientService(Configuration conf) {
    String zkStr = conf.get(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM);
    if (zkStr == null) {
        // Default to HBase one.
        zkStr = conf.get(TxConstants.HBase.ZOOKEEPER_QUORUM);
    }

    int timeOut = conf.getInt(TxConstants.HBase.ZK_SESSION_TIMEOUT,
            TxConstants.HBase.DEFAULT_ZK_SESSION_TIMEOUT);
    ZKClientService zkClientService = new TephraZKClientService(zkStr, timeOut, null,
            ArrayListMultimap.<String, byte[]>create());
    return ZKClientServices.delegate(ZKClients.reWatchOnExpire(ZKClients.retryOnFailure(zkClientService,
            RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS))));
}

From source file:co.cask.tephra.util.AbstractConfigurationProviderTest.java

License:Apache License

@Test
public void testConfigurationProvider() {
    Configuration conf = new Configuration();
    conf.set("foo", "bar");
    Configuration newConf = new ConfigurationFactory().get(conf);
    assertNotNull(newConf);/*from   w ww  . ja va 2s  . c  o m*/
    assertEquals("bar", newConf.get("foo"));
}

From source file:co.nubetech.apache.hadoop.mapred.DBQueryInputFormat.java

License:Apache License

@Override
protected RecordReader<LongWritable, GenericDBWritable> createDBRecordReader(
        org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit split, Configuration conf)
        throws IOException {

    org.apache.hadoop.mapreduce.lib.db.DBConfiguration dbConf = getDBConf();
    @SuppressWarnings("unchecked")
    // Class<T> inputClass = (Class<T>) (dbConf.getInputClass());
    String dbProductName = getDBProductName();

    logger.debug("Creating db record reader for db product: " + dbProductName);
    ArrayList params = null;/* w  w  w. java  2  s.  co m*/
    try {
        if (conf.get(HIHOConf.QUERY_PARAMS) != null) {
            logger.debug("creating stringifier in DBQueryInputFormat");
            DefaultStringifier<ArrayList> stringifier = new DefaultStringifier<ArrayList>(conf,
                    ArrayList.class);
            logger.debug("created stringifier");

            params = stringifier.fromString(conf.get(HIHOConf.QUERY_PARAMS));
            logger.debug("created params");
        }
        // use database product name to determine appropriate record reader.
        if (dbProductName.startsWith("MYSQL")) {
            // use MySQL-specific db reader.
            return new MySQLQueryRecordReader(split, conf, getConnection(), dbConf, dbConf.getInputConditions(),
                    dbConf.getInputFieldNames(), dbConf.getInputTableName(), params);
        } else {
            // Generic reader.
            return new DBQueryRecordReader(split, conf, getConnection(), dbConf, dbConf.getInputConditions(),
                    dbConf.getInputFieldNames(), dbConf.getInputTableName(), dbProductName, params);
        }
    } catch (SQLException ex) {
        throw new IOException(ex.getMessage());
    }
}