Example usage for org.apache.hadoop.conf Configuration get

List of usage examples for org.apache.hadoop.conf Configuration get

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration get.

Prototype

public String get(String name) 

Source Link

Document

Get the value of the name property, null if no such property exists.

Usage

From source file:co.cask.cdap.replication.StatusUtils.java

License:Apache License

public static String getReplicationStateTableName(Configuration conf) throws IOException {
    String name = conf.get(ReplicationConstants.ReplicationStatusTool.REPLICATION_STATE_TABLE_NAME);
    String ns = conf.get(ReplicationConstants.ReplicationStatusTool.REPLICATION_STATE_TABLE_NAMESPACE);
    String nsPrefix = conf.get(Constants.Dataset.TABLE_PREFIX);
    String tableName = (nsPrefix != null) ? nsPrefix
            : "cdap" + "_" + (ns != null ? ns
                    : ReplicationConstants.ReplicationStatusTool.REPLICATION_STATE_TABLE_DEFAULT_NAMESPACE)
                    + ":" + (name != null ? name
                            : ReplicationConstants.ReplicationStatusTool.REPLICATION_STATE_TABLE_DEFAULT_NAME);
    return tableName;
}

From source file:co.cask.cdap.replication.TableUpdater.java

License:Apache License

public TableUpdater(String rowType, final Configuration conf) {
    this.columnFamily = Bytes.toBytes(ReplicationConstants.ReplicationStatusTool.TIME_FAMILY);
    this.rowType = rowType;
    this.rsID = UUID.randomUUID();
    this.conf = conf;

    String configuredDelay = conf.get(ReplicationConstants.ReplicationStatusTool.REPLICATION_DELAY);
    String configuredPeriod = conf.get(ReplicationConstants.ReplicationStatusTool.REPLICATION_PERIOD);
    long delay = (configuredDelay != null) ? new Long(configuredDelay)
            : ReplicationConstants.ReplicationStatusTool.REPLICATION_DELAY_DEFAULT;
    long period = (configuredPeriod != null) ? new Long(configuredPeriod)
            : ReplicationConstants.ReplicationStatusTool.REPLICATION_PERIOD_DEFAULT;

    cachedUpdates = new HashMap<>();
    setupTimer(delay, period);/*from w ww  .  j  ava2s  .  com*/
}

From source file:co.cask.cdap.security.store.KMSSecureStore.java

License:Apache License

/**
 * Sets up the key provider. It reads the KMS URI from Hadoop conf to initialize the provider.
 * @param conf Hadoop configuration. core-site.xml contains the key provider URI.
 * @param namespaceQueryAdmin For querying namespace.
 * @throws IllegalArgumentException If the key provider URI is not set.
 * @throws URISyntaxException If the key provider path is not a valid URI.
 * @throws IOException If the authority or the port could not be read from the provider URI.
 *//*from   w  w  w  .  j  a v  a2s .  co m*/
@Inject
KMSSecureStore(Configuration conf, NamespaceQueryAdmin namespaceQueryAdmin)
        throws IOException, URISyntaxException {
    this.conf = conf;
    this.namespaceQueryAdmin = namespaceQueryAdmin;
    try {
        String keyProviderPath = conf.get(KeyProviderFactory.KEY_PROVIDER_PATH);
        if (Strings.isNullOrEmpty(keyProviderPath)) {
            throw new IllegalArgumentException("Could not find the key provider URI. Please make sure that "
                    + "hadoop.security.key.provider.path is set to the KMS URI in your " + "core-site.xml.");
        }
        URI providerUri = new URI(keyProviderPath);
        provider = KMSClientProvider.Factory.get(providerUri, conf);
    } catch (URISyntaxException e) {
        throw new URISyntaxException(
                "Secure store could not be loaded. The value for hadoop.security.key.provider.path"
                        + "in core-site.xml is not a valid URI.",
                e.getReason());
    } catch (IOException e) {
        throw new IOException("Secure store could not be loaded. KMS KeyProvider failed to initialize", e);
    }
    LOG.debug("KMS backed secure store initialized successfully.");
}

From source file:co.cask.cdap.template.etl.common.ETLDBInputFormat.java

License:Apache License

@Override
public Connection getConnection() {
    if (this.connection == null) {
        Configuration conf = getConf();
        try {/*from  w  w  w . ja va 2s  .  c  om*/
            String url = conf.get(DBConfiguration.URL_PROPERTY);
            try {
                // throws SQLException if no suitable driver is found
                DriverManager.getDriver(url);
            } catch (SQLException e) {
                if (driverShim == null) {
                    if (driver == null) {
                        ClassLoader classLoader = conf.getClassLoader();
                        @SuppressWarnings("unchecked")
                        Class<? extends Driver> driverClass = (Class<? extends Driver>) classLoader
                                .loadClass(conf.get(DBConfiguration.DRIVER_CLASS_PROPERTY));
                        driver = driverClass.newInstance();

                        // De-register the default driver that gets registered when driver class is loaded.
                        DBUtils.deregisterAllDrivers(driverClass);
                    }
                    driverShim = new JDBCDriverShim(driver);
                    DriverManager.registerDriver(driverShim);
                    LOG.debug("Registered JDBC driver via shim {}. Actual Driver {}.", driverShim, driver);
                }
            }
            if (conf.get(DBConfiguration.USERNAME_PROPERTY) == null) {
                this.connection = DriverManager.getConnection(url);
            } else {
                this.connection = DriverManager.getConnection(url, conf.get(DBConfiguration.USERNAME_PROPERTY),
                        conf.get(DBConfiguration.PASSWORD_PROPERTY));
            }
            this.connection.setAutoCommit(false);
            this.connection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
        } catch (Exception e) {
            throw Throwables.propagate(e);
        }
    }
    return this.connection;
}

From source file:co.cask.cdap.template.etl.common.ETLDBOutputFormat.java

License:Apache License

private Connection getConnection(Configuration conf) {
    Connection connection;//w  w w . j  av  a2s.c  o  m
    try {
        String url = conf.get(DBConfiguration.URL_PROPERTY);
        try {
            // throws SQLException if no suitable driver is found
            DriverManager.getDriver(url);
        } catch (SQLException e) {
            if (driverShim == null) {
                if (driver == null) {
                    ClassLoader classLoader = conf.getClassLoader();
                    @SuppressWarnings("unchecked")
                    Class<? extends Driver> driverClass = (Class<? extends Driver>) classLoader
                            .loadClass(conf.get(DBConfiguration.DRIVER_CLASS_PROPERTY));
                    driver = driverClass.newInstance();

                    // De-register the default driver that gets registered when driver class is loaded.
                    DBUtils.deregisterAllDrivers(driverClass);
                }

                driverShim = new JDBCDriverShim(driver);
                DriverManager.registerDriver(driverShim);
                LOG.debug("Registered JDBC driver via shim {}. Actual Driver {}.", driverShim, driver);
            }
        }

        if (conf.get(DBConfiguration.USERNAME_PROPERTY) == null) {
            connection = DriverManager.getConnection(url);
        } else {
            connection = DriverManager.getConnection(url, conf.get(DBConfiguration.USERNAME_PROPERTY),
                    conf.get(DBConfiguration.PASSWORD_PROPERTY));
        }
        connection.setAutoCommit(false);
        connection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
    return connection;
}

From source file:co.cask.hydrator.plugin.batch.CopybookInputFormat.java

License:Apache License

@Override
protected boolean isSplitable(JobContext context, Path file) {
    Configuration conf = context.getConfiguration();
    Path path = new Path(conf.get(COPYBOOK_INPUTFORMAT_DATA_HDFS_PATH));
    final CompressionCodec codec = new CompressionCodecFactory(context.getConfiguration()).getCodec(path);
    return (null == codec) ? true : codec instanceof SplittableCompressionCodec;
}

From source file:co.cask.hydrator.plugin.batch.CopybookRecordReader.java

License:Apache License

@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    // Get configuration
    Configuration conf = context.getConfiguration();
    int fileStructure = net.sf.JRecord.Common.Constants.IO_FIXED_LENGTH;
    Path path = new Path(conf.get(CopybookInputFormat.COPYBOOK_INPUTFORMAT_DATA_HDFS_PATH));
    FileSystem fs = FileSystem.get(path.toUri(), conf);
    // Create input stream for the COBOL copybook contents
    InputStream inputStream = IOUtils
            .toInputStream(conf.get(CopybookInputFormat.COPYBOOK_INPUTFORMAT_CBL_CONTENTS), "UTF-8");
    BufferedInputStream bufferedInputStream = new BufferedInputStream(inputStream);
    try {//from w w  w.  j  a  v  a  2 s.c om
        externalRecord = CopybookIOUtils.getExternalRecord(bufferedInputStream);
        recordByteLength = CopybookIOUtils.getRecordLength(externalRecord, fileStructure);

        LineProvider lineProvider = LineIOProvider.getInstance().getLineProvider(fileStructure,
                CopybookIOUtils.FONT);
        reader = LineIOProvider.getInstance().getLineReader(fileStructure, lineProvider);
        LayoutDetail copybook = CopybookIOUtils.getLayoutDetail(externalRecord);

        org.apache.hadoop.mapreduce.lib.input.FileSplit fileSplit = (org.apache.hadoop.mapreduce.lib.input.FileSplit) split;

        start = fileSplit.getStart();
        end = start + fileSplit.getLength();

        BufferedInputStream fileIn = new BufferedInputStream(fs.open(fileSplit.getPath()));
        // Jump to the point in the split at which the first complete record of the split starts,
        // if not the first InputSplit
        if (start != 0) {
            position = start - (start % recordByteLength) + recordByteLength;
            fileIn.skip(position);
        }
        reader.open(fileIn, copybook);

    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:co.cask.hydrator.plugin.batch.source.ExcelReaderRegexFilter.java

License:Apache License

@Override
public void setConf(@Nullable Configuration conf) {
    this.conf = conf;
    if (conf == null) {
        return;/*from   w ww. ja v a  2s.c o  m*/
    }
    pattern = Pattern.compile(conf.get(FILE_PATTERN));
    String processedFiles = conf.get(PROCESSED_FILES);
    if (!Strings.isNullOrEmpty(processedFiles)) {
        preProcessedFileList = GSON.fromJson(processedFiles, ARRAYLIST_PREPROCESSED_FILES);
    }
}

From source file:co.cask.hydrator.plugin.batch.source.XMLRecordReader.java

License:Apache License

public XMLRecordReader(FileSplit split, Configuration conf) throws IOException {
    file = split.getPath();//  w ww.  ja  v  a 2s.c om
    fileName = file.toUri().toString();
    fs = file.getFileSystem(conf);
    XMLInputFactory factory = XMLInputFactory.newInstance();
    FSDataInputStream fdDataInputStream = fs.open(file);
    inputStream = new TrackingInputStream(fdDataInputStream);
    availableBytes = inputStream.available();
    try {
        reader = factory.createXMLStreamReader(inputStream);
    } catch (XMLStreamException exception) {
        throw new RuntimeException("XMLStreamException exception : ", exception);
    }
    //Set required node path details.
    String nodePath = conf.get(XMLInputFormat.XML_INPUTFORMAT_NODE_PATH);
    //Remove preceding '/' in node path to avoid first unwanted element after split('/')
    if (nodePath.indexOf("/") == 0) {
        nodePath = nodePath.substring(1, nodePath.length());
    }
    nodes = nodePath.split("/");

    currentNodeLevelMap = new HashMap<Integer, String>();

    tempFilePath = conf.get(XMLInputFormat.XML_INPUTFORMAT_PROCESSED_DATA_TEMP_FOLDER);
    fileAction = conf.get(XMLInputFormat.XML_INPUTFORMAT_FILE_ACTION);
    targetFolder = conf.get(XMLInputFormat.XML_INPUTFORMAT_TARGET_FOLDER);
}

From source file:co.cask.hydrator.plugin.db.batch.sink.ETLDBOutputFormat.java

License:Apache License

private Connection getConnection(Configuration conf) {
    Connection connection;// w w w .  java  2  s.c om
    try {
        String url = conf.get(DBConfiguration.URL_PROPERTY);
        try {
            // throws SQLException if no suitable driver is found
            DriverManager.getDriver(url);
        } catch (SQLException e) {
            if (driverShim == null) {
                if (driver == null) {
                    ClassLoader classLoader = conf.getClassLoader();
                    @SuppressWarnings("unchecked")
                    Class<? extends Driver> driverClass = (Class<? extends Driver>) classLoader
                            .loadClass(conf.get(DBConfiguration.DRIVER_CLASS_PROPERTY));
                    driver = driverClass.newInstance();

                    // De-register the default driver that gets registered when driver class is loaded.
                    DBUtils.deregisterAllDrivers(driverClass);
                }

                driverShim = new JDBCDriverShim(driver);
                DriverManager.registerDriver(driverShim);
                LOG.debug("Registered JDBC driver via shim {}. Actual Driver {}.", driverShim, driver);
            }
        }

        if (conf.get(DBConfiguration.USERNAME_PROPERTY) == null) {
            connection = DriverManager.getConnection(url);
        } else {
            connection = DriverManager.getConnection(url, conf.get(DBConfiguration.USERNAME_PROPERTY),
                    conf.get(DBConfiguration.PASSWORD_PROPERTY));
        }

        boolean autoCommitEnabled = conf.getBoolean(AUTO_COMMIT_ENABLED, false);
        if (autoCommitEnabled) {
            // hack to work around jdbc drivers like the hive driver that throw exceptions on commit
            connection = new NoOpCommitConnection(connection);
        } else {
            connection.setAutoCommit(false);
        }
        connection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
    return connection;
}