List of usage examples for org.apache.hadoop.conf Configuration getBoolean
public boolean getBoolean(String name, boolean defaultValue)
name
property as a boolean
. From source file:co.cask.hydrator.plugin.db.batch.source.DataDrivenETLDBInputFormat.java
License:Apache License
@Override public Connection getConnection() { if (this.connection == null) { Configuration conf = getConf(); try {/*www . j a v a2 s . com*/ String url = conf.get(DBConfiguration.URL_PROPERTY); try { // throws SQLException if no suitable driver is found DriverManager.getDriver(url); } catch (SQLException e) { if (driverShim == null) { if (driver == null) { ClassLoader classLoader = conf.getClassLoader(); @SuppressWarnings("unchecked") Class<? extends Driver> driverClass = (Class<? extends Driver>) classLoader .loadClass(conf.get(DBConfiguration.DRIVER_CLASS_PROPERTY)); driver = driverClass.newInstance(); // De-register the default driver that gets registered when driver class is loaded. DBUtils.deregisterAllDrivers(driverClass); } driverShim = new JDBCDriverShim(driver); DriverManager.registerDriver(driverShim); LOG.debug("Registered JDBC driver via shim {}. Actual Driver {}.", driverShim, driver); } } if (conf.get(DBConfiguration.USERNAME_PROPERTY) == null) { this.connection = DriverManager.getConnection(url); } else { this.connection = DriverManager.getConnection(url, conf.get(DBConfiguration.USERNAME_PROPERTY), conf.get(DBConfiguration.PASSWORD_PROPERTY)); } boolean autoCommitEnabled = conf.getBoolean(AUTO_COMMIT_ENABLED, false); if (autoCommitEnabled) { // hack to work around jdbc drivers like the hive driver that throw exceptions on commit this.connection = new NoOpCommitConnection(this.connection); } else { this.connection.setAutoCommit(false); } this.connection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); } catch (Exception e) { throw Throwables.propagate(e); } } return this.connection; }
From source file:colossal.pipe.ColAvroSerialization.java
License:Apache License
/** Returns the specified output serializer. */ public Serializer<AvroWrapper<T>> getSerializer(Class<AvroWrapper<T>> c) { Configuration conf = getConf(); // Here we must rely on mapred.task.is.map to tell whether the map output // or final output is needed. boolean isMap = conf.getBoolean("mapred.task.is.map", false); Schema schema = !isMap ? AvroJob.getOutputSchema(conf) : Schema.parse(AvroKey.class.isAssignableFrom(c) ? conf.get(ColPhase.MAP_OUT_KEY_SCHEMA) : conf.get(ColPhase.MAP_OUT_VALUE_SCHEMA)); return new AvroWrapperSerializer(new SpecificDatumWriter<T>(schema)); }
From source file:com.ailk.oci.ocnosql.tools.load.csvbulkload.PhoenixCsvToKeyValueMapper.java
License:Apache License
@Override protected void setup(Context context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); String jdbcUrl = getJdbcUrl(conf); // This statement also ensures that the driver class is loaded LOG.info("Connection with driver {} with url {}", PhoenixDriver.class.getName(), jdbcUrl); try {//from w w w . j a v a 2 s .c o m conn = (PhoenixConnection) DriverManager.getConnection(jdbcUrl); } catch (SQLException e) { throw new RuntimeException(e); } upsertListener = new MapperUpsertListener(context, conf.getBoolean(IGNORE_INVALID_ROW_CONFKEY, true)); csvUpsertExecutor = buildUpsertExecutor(conf); csvLineParser = new CsvLineParser(conf.get(FIELD_DELIMITER_CONFKEY).charAt(0)); preUpdateProcessor = loadPreUpsertProcessor(conf); // ? // ? List<String> importColumnList = new ArrayList<String>(); for (ColumnInfo colInfo : buildColumnInfoList(conf)) { importColumnList.add(colInfo.getColumnName()); } // ?hash? List<String> rowPrefixColumns = Lists .newArrayList(Splitter.on(",").trimResults().split(conf.get(ROW_PREFIX_COLUMNS))); // rowkey?hash? rowPrefixColIdxs = new ArrayList<Integer>(); for (String rpCol : rowPrefixColumns) { // ?1csv?? rowPrefixColIdxs.add(importColumnList.indexOf(rpCol) - 1); } // rowkey List<String> rowColumns = Lists.newArrayList(Splitter.on(",").trimResults().split(conf.get(ROW_COLUMNS))); rowColIdxs = new ArrayList<Integer>(); for (String rCol : rowColumns) { // ?1csv?? rowColIdxs.add(importColumnList.indexOf(rCol) - 1); } // List<String> uniqueIndexColumns = Lists .newArrayList(Splitter.on(",").trimResults().split(conf.get(UNIQUE_INDEX_COLUMNS, "_allColumns"))); if (uniqueIndexColumns.size() == 1 && uniqueIndexColumns.get(0).equals("_allColumns")) { unqIdxColIdxs = null; } else { unqIdxColIdxs = new ArrayList<Integer>(); for (String rCol : uniqueIndexColumns) { // ?1csv?? unqIdxColIdxs.add(importColumnList.indexOf(rCol) - 1); } } // ?rowkey???(md5) rowKeyGenerator = buildRowKeyGenerator(conf.get(ROW_PREFIX_ALG, "md5")); separator = conf.get(FIELD_DELIMITER_CONFKEY); // ?? rowGentemp = new StringBuilder(); }
From source file:com.alexholmes.hadooputils.combine.common.mapreduce.SplitMetricsCombineInputFormat.java
License:Apache License
@Override public List<InputSplit> getSplits(JobContext job) throws IOException { List<InputSplit> splits = super.getSplits(job); Configuration conf = HadoopCompat.getConfiguration(job); if (conf.getBoolean("hadooputils.combine.sink.enabled", false)) { writeSplitsToSink(conf, organizeSplitsByLocation(splits)); }/*w w w . j a v a2 s .co m*/ return splits; }
From source file:com.alibaba.wasp.conf.WaspConfiguration.java
License:Apache License
private static void checkDefaultsVersion(Configuration conf) { if (conf.getBoolean("wasp.defaults.for.version.skip", Boolean.TRUE)) return;/*from ww w. j ava2 s . c o m*/ String defaultsVersion = conf.get("wasp.defaults.for.version"); String thisVersion = VersionInfo.getVersion(); if (!thisVersion.equals(defaultsVersion)) { throw new RuntimeException("wasp-default.xml file seems to be for and old version of Wasp (" + defaultsVersion + "), this version is " + thisVersion); } }
From source file:com.alibaba.wasp.fserver.FSDumpServlet.java
License:Apache License
private boolean isShowQueueDump(Configuration conf) { return conf.getBoolean("wasp.fserver.servlet.show.queuedump", true); }
From source file:com.alibaba.wasp.LocalWaspCluster.java
License:Apache License
/** * @param c// w w w . ja v a 2 s. c o m * Configuration to check. * @return True if a 'local' address in wasp.master value. */ public static boolean isLocal(final Configuration c) { boolean mode = c.getBoolean(FConstants.CLUSTER_DISTRIBUTED, FConstants.DEFAULT_CLUSTER_DISTRIBUTED); return (mode == FConstants.CLUSTER_IS_LOCAL); }
From source file:com.alibaba.wasp.master.FMaster.java
License:Apache License
/** * Stall startup if we are designated a backup master; i.e. we want someone * else to become the master before proceeding. * //www . j a v a 2s .c o m * @param c * @param amm * @throws InterruptedException */ private static void stallIfBackupMaster(final Configuration c, final ActiveMasterManager amm) throws InterruptedException { // If we're a backup master, stall until a primary to writes his address if (!c.getBoolean(FConstants.MASTER_TYPE_BACKUP, FConstants.DEFAULT_MASTER_TYPE_BACKUP)) { return; } LOG.debug("FMaster started in backup mode. " + "Stalling until master znode is written."); // This will only be a minute or so while the cluster starts up, // so don't worry about setting watches on the parent znode while (!amm.isActiveMaster()) { LOG.debug("Waiting for master address ZNode to be written " + "(Also watching cluster state node)"); Thread.sleep(c.getInt("zookeeper.session.timeout", 180 * 1000)); } }
From source file:com.alibaba.wasp.zookeeper.ZKConfig.java
License:Apache License
/** * Parse ZooKeeper's zoo.cfg, injecting Wasp Configuration variables in. * This method is used for testing so we can pass our own InputStream. * @param conf WaspConfiguration to use for injecting variables. * @param inputStream InputStream to read from. * @return Properties parsed from config stream with variables substituted. * @throws java.io.IOException if anything goes wrong parsing config */// ww w .ja v a 2 s . c o m public static Properties parseZooCfg(Configuration conf, InputStream inputStream) throws IOException { Properties properties = new Properties(); try { properties.load(inputStream); } catch (IOException e) { final String msg = "fail to read properties from " + FConstants.ZOOKEEPER_CONFIG_NAME; LOG.fatal(msg); throw new IOException(msg, e); } for (Entry<Object, Object> entry : properties.entrySet()) { String value = entry.getValue().toString().trim(); String key = entry.getKey().toString().trim(); StringBuilder newValue = new StringBuilder(); int varStart = value.indexOf(VARIABLE_START); int varEnd = 0; while (varStart != -1) { varEnd = value.indexOf(VARIABLE_END, varStart); if (varEnd == -1) { String msg = "variable at " + varStart + " has no end marker"; LOG.fatal(msg); throw new IOException(msg); } String variable = value.substring(varStart + VARIABLE_START_LENGTH, varEnd); String substituteValue = System.getProperty(variable); if (substituteValue == null) { substituteValue = conf.get(variable); } if (substituteValue == null) { String msg = "variable " + variable + " not set in system property " + "or wasp configs"; LOG.fatal(msg); throw new IOException(msg); } newValue.append(substituteValue); varEnd += VARIABLE_END_LENGTH; varStart = value.indexOf(VARIABLE_START, varEnd); } // Special case for 'wasp.cluster.distributed' property being 'true' if (key.startsWith("server.")) { boolean mode = conf.getBoolean(FConstants.CLUSTER_DISTRIBUTED, FConstants.DEFAULT_CLUSTER_DISTRIBUTED); if (mode == FConstants.CLUSTER_IS_DISTRIBUTED && value.startsWith(FConstants.LOCALHOST)) { String msg = "The server in zoo.cfg cannot be set to localhost " + "in a fully-distributed setup because it won't be reachable. " + "See \"Getting Started\" for more information."; LOG.fatal(msg); throw new IOException(msg); } } newValue.append(value.substring(varEnd)); properties.setProperty(key, newValue.toString()); } return properties; }
From source file:com.aliyun.fs.oss.utils.ResourceLoader.java
License:Apache License
private static List<URL> geClassLoaderURLs(Configuration conf) throws Exception { String dependPath = conf.get("fs.oss.core.dependency.path"); String[] sdkDeps = null;/*w ww . j a v a2 s . c o m*/ Boolean runLocal = conf.getBoolean("mapreduce.job.run-local", false); if ((dependPath == null || dependPath.isEmpty()) && !runLocal) { throw new RuntimeException( "Job dose not run locally, set " + "\"fs.oss.core.dependency.path\" first please."); } else if (dependPath == null || dependPath.isEmpty()) { LOG.info("\"mapreduce.job.run-local\" set true."); } else { sdkDeps = dependPath.split(","); } ArrayList<URL> urls = new ArrayList<URL>(); if (sdkDeps != null) { for (String dep : sdkDeps) { urls.add(new URL("file://" + dep)); } } String[] cp; if (conf.getBoolean("mapreduce.job.run-local", false)) { if (SystemUtils.IS_OS_WINDOWS) { cp = System.getProperty("java.class.path").split(";"); } else { cp = System.getProperty("java.class.path").split(":"); } for (String entity : cp) { urls.add(new URL("file:" + entity)); } } return urls; }