List of usage examples for org.apache.hadoop.conf Configuration setBoolean
public void setBoolean(String name, boolean value)
name
property to a boolean
. From source file:co.cask.hydrator.common.batch.JobUtils.java
License:Apache License
/** * Creates a new instance of {@link Job}. Note that the job created is not meant for actual MR * submission. It's just for setting up configurations. *//*from ww w. ja va2 s . c o m*/ public static Job createInstance() throws IOException { Job job = Job.getInstance(); Configuration conf = job.getConfiguration(); conf.clear(); if (UserGroupInformation.isSecurityEnabled()) { // If runs in secure cluster, this program runner is running in a yarn container, hence not able // to get authenticated with the history. conf.unset("mapreduce.jobhistory.address"); conf.setBoolean(Job.JOB_AM_ACCESS_DISABLED, false); Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); job.getCredentials().addAll(credentials); } return job; }
From source file:co.cask.hydrator.plugin.batch.source.ExcelInputFormat.java
License:Apache License
public static void setConfigurations(Job job, String filePattern, String sheetName, boolean reprocess, int sheetNo, String columnList, boolean skipFirstRow, String terminateIfEmptyRow, String rowLimit, String ifErrorRecord, String processedFiles) { Configuration configuration = job.getConfiguration(); configuration.set(FILE_PATTERN, filePattern); configuration.set(SHEET_NAME, sheetName); configuration.setBoolean(RE_PROCESS, reprocess); configuration.setInt(SHEET_NO, sheetNo); configuration.set(COLUMN_LIST, columnList); configuration.setBoolean(SKIP_FIRST_ROW, skipFirstRow); configuration.set(TERMINATE_IF_EMPTY_ROW, terminateIfEmptyRow); if (!Strings.isNullOrEmpty(rowLimit)) { configuration.set(ROWS_LIMIT, rowLimit); }//from ww w. j a v a 2 s . c o m configuration.set(IF_ERROR_RECORD, ifErrorRecord); configuration.set(PROCESSED_FILES, processedFiles); }
From source file:co.cask.hydrator.plugin.db.batch.source.DataDrivenETLDBInputFormat.java
License:Apache License
public static void setInput(Configuration conf, Class<? extends DBWritable> inputClass, String inputQuery, String inputBoundingQuery, boolean enableAutoCommit) { DBConfiguration dbConf = new DBConfiguration(conf); dbConf.setInputClass(inputClass);// ww w . j a v a 2s . c om dbConf.setInputQuery(inputQuery); dbConf.setInputBoundingQuery(inputBoundingQuery); conf.setBoolean(AUTO_COMMIT_ENABLED, enableAutoCommit); }
From source file:co.cask.tephra.distributed.ThriftTransactionServerTest.java
License:Apache License
@BeforeClass public static void start() throws Exception { zkServer = InMemoryZKServer.builder().setDataDir(tmpFolder.newFolder()).build(); zkServer.startAndWait();/* ww w . ja v a 2s .c o m*/ Configuration conf = new Configuration(); conf.setBoolean(TxConstants.Manager.CFG_DO_PERSIST, false); conf.set(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM, zkServer.getConnectionStr()); conf.set(TxConstants.Service.CFG_DATA_TX_CLIENT_RETRY_STRATEGY, "n-times"); conf.setInt(TxConstants.Service.CFG_DATA_TX_CLIENT_ATTEMPTS, 1); conf.setInt(TxConstants.Service.CFG_DATA_TX_CLIENT_COUNT, NUM_CLIENTS); conf.setLong(TxConstants.Service.CFG_DATA_TX_CLIENT_TIMEOUT, TimeUnit.HOURS.toMillis(1)); conf.setInt(TxConstants.Service.CFG_DATA_TX_SERVER_IO_THREADS, 2); conf.setInt(TxConstants.Service.CFG_DATA_TX_SERVER_THREADS, 4); injector = Guice.createInjector(new ConfigModule(conf), new ZKModule(), new DiscoveryModules().getDistributedModules(), Modules.override(new TransactionModules().getDistributedModules()).with(new AbstractModule() { @Override protected void configure() { bind(TransactionStateStorage.class).to(SlowTransactionStorage.class).in(Scopes.SINGLETON); } }), new TransactionClientModule()); zkClientService = injector.getInstance(ZKClientService.class); zkClientService.startAndWait(); // start a tx server txService = injector.getInstance(TransactionService.class); storage = injector.getInstance(TransactionStateStorage.class); try { LOG.info("Starting transaction service"); txService.startAndWait(); } catch (Exception e) { LOG.error("Failed to start service: ", e); } }
From source file:co.cask.tephra.examples.BalanceBooksTest.java
License:Apache License
@BeforeClass public static void setup() throws Exception { testUtil = new HBaseTestingUtility(); Configuration conf = testUtil.getConfiguration(); conf.setBoolean(TxConstants.Manager.CFG_DO_PERSIST, false); conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, "/tx.snapshot"); // Tune down the connection thread pool size conf.setInt("hbase.hconnection.threads.core", 5); conf.setInt("hbase.hconnection.threads.max", 10); // Tunn down handler threads in regionserver conf.setInt("hbase.regionserver.handler.count", 10); // Set to random port conf.setInt("hbase.master.port", 0); conf.setInt("hbase.master.info.port", 0); conf.setInt("hbase.regionserver.port", 0); conf.setInt("hbase.regionserver.info.port", 0); testUtil.startMiniCluster();/* w w w . j av a 2 s. c o m*/ String zkClusterKey = testUtil.getClusterKey(); // hostname:clientPort:parentZnode String zkQuorum = zkClusterKey.substring(0, zkClusterKey.lastIndexOf(':')); LOG.info("Zookeeper Quorum is running at {}", zkQuorum); conf.set(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM, zkQuorum); Injector injector = Guice.createInjector(new ConfigModule(conf), new ZKModule(), new DiscoveryModules().getDistributedModules(), Modules.override(new TransactionModules().getDistributedModules()).with(new AbstractModule() { @Override protected void configure() { bind(TransactionStateStorage.class).to(InMemoryTransactionStateStorage.class) .in(Scopes.SINGLETON); } }), new TransactionClientModule()); zkClientService = injector.getInstance(ZKClientService.class); zkClientService.startAndWait(); // start a tx server txService = injector.getInstance(TransactionService.class); try { LOG.info("Starting transaction service"); txService.startAndWait(); } catch (Exception e) { LOG.error("Failed to start service: ", e); } }
From source file:co.cask.tephra.ThriftTransactionSystemTest.java
License:Apache License
@BeforeClass public static void start() throws Exception { zkServer = InMemoryZKServer.builder().setDataDir(tmpFolder.newFolder()).build(); zkServer.startAndWait();/*from www. ja va 2 s. c om*/ Configuration conf = new Configuration(); conf.setBoolean(TxConstants.Manager.CFG_DO_PERSIST, false); conf.set(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM, zkServer.getConnectionStr()); conf.set(TxConstants.Service.CFG_DATA_TX_CLIENT_RETRY_STRATEGY, "n-times"); conf.setInt(TxConstants.Service.CFG_DATA_TX_CLIENT_ATTEMPTS, 1); Injector injector = Guice.createInjector(new ConfigModule(conf), new ZKModule(), new DiscoveryModules().getDistributedModules(), Modules.override(new TransactionModules().getDistributedModules()).with(new AbstractModule() { @Override protected void configure() { bind(TransactionStateStorage.class).to(InMemoryTransactionStateStorage.class) .in(Scopes.SINGLETON); } }), new TransactionClientModule()); zkClientService = injector.getInstance(ZKClientService.class); zkClientService.startAndWait(); // start a tx server txService = injector.getInstance(TransactionService.class); storage = injector.getInstance(TransactionStateStorage.class); txClient = injector.getInstance(TransactionSystemClient.class); try { LOG.info("Starting transaction service"); txService.startAndWait(); } catch (Exception e) { LOG.error("Failed to start service: ", e); } }
From source file:co.nubetech.hiho.mapreduce.TestMySQLLoadMapper.java
License:Apache License
/** * @param string/*from w w w.j a va2 s. co m*/ * @throws IOException * @throws SQLException * @throws InterruptedException */ private void runMapper(String tablename) throws IOException, SQLException, InterruptedException { Context context = mock(Context.class); MySQLLoadDataMapper mapper = new MySQLLoadDataMapper(); FSDataInputStream val; val = new FSDataInputStream(new MyInputStream()); Connection con = mock(Connection.class); com.mysql.jdbc.Statement stmt = mock(com.mysql.jdbc.Statement.class); mapper.setConnection(con); String query = "load data local infile 'abc.txt' into table tablename " + QUERY_SUFFIX + " (col1,col2,col3)"; when(con.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE)).thenReturn(stmt); Configuration conf = new Configuration(); conf.set(HIHOConf.LOAD_QUERY_SUFFIX, QUERY_SUFFIX); conf.setBoolean(HIHOConf.LOAD_KEY_IS_TABLENAME, true); conf.setBoolean(HIHOConf.LOAD_HAS_HEADER, true); when(context.getConfiguration()).thenReturn(conf); when(stmt.executeUpdate(query)).thenReturn(10); Counter counter = mock(Counter.class); when(context.getCounter("MySQLLoadCounters", "ROWS_INSERTED_TABLE_tablename")).thenReturn(counter); when(context.getCounter("MySQLLoadCounters", "ROWS_INSERTED_TOTAL")).thenReturn(counter); mapper.map(new Text(tablename), val, context); verify(stmt).setLocalInfileInputStream(val); verify(stmt).executeUpdate(query); verify(counter, times(2)).increment(10); }
From source file:com.ailk.oci.ocnosql.tools.load.csvbulkload.CsvBulkImportUtil.java
License:Apache License
/** * Configure a job configuration for a bulk CSV import. * * @param conf job configuration to be set up * @param tableName name of the table to be imported to, can include a schema name * @param fieldDelimiter field delimiter character for the CSV input * @param arrayDelimiter array delimiter character, can be null * @param columnInfoList list of columns to be imported * @param ignoreInvalidRows flag to ignore invalid input rows *///from ww w . j ava 2 s .c o m public static void initCsvImportJob(Configuration conf, String tableName, char fieldDelimiter, String arrayDelimiter, List<ColumnInfo> columnInfoList, boolean ignoreInvalidRows) { Preconditions.checkNotNull(tableName); Preconditions.checkNotNull(columnInfoList); Preconditions.checkArgument(!columnInfoList.isEmpty(), "Column info list is empty"); conf.set(PhoenixCsvToKeyValueMapper.TABLE_NAME_CONFKEY, tableName); conf.set(PhoenixCsvToKeyValueMapper.FIELD_DELIMITER_CONFKEY, String.valueOf(fieldDelimiter)); if (arrayDelimiter != null) { conf.set(PhoenixCsvToKeyValueMapper.ARRAY_DELIMITER_CONFKEY, arrayDelimiter); } PhoenixCsvToKeyValueMapper.configureColumnInfoList(conf, columnInfoList); conf.setBoolean(PhoenixCsvToKeyValueMapper.IGNORE_INVALID_ROW_CONFKEY, ignoreInvalidRows); }
From source file:com.asakusafw.lang.compiler.mapreduce.testing.MapReduceRunner.java
License:Apache License
private static void configure(Configuration conf, String executionId, Map<String, String> arguments) { conf.set(StageConstants.PROP_EXECUTION_ID, executionId); conf.set(StageConstants.PROP_USER, System.getProperty("user.name")); //$NON-NLS-1$ conf.set(StageConstants.PROP_ASAKUSA_BATCH_ARGS, serialize(arguments)); conf.setBoolean(InProcessStageConfigurator.KEY_FORCE, true); }
From source file:com.asakusafw.runtime.stage.launcher.ApplicationLauncher.java
License:Apache License
/** * Executes launcher./*from w ww . ja v a2 s.co m*/ * @param configuration the Hadoop configuration for the application * @param args the launcher arguments * @return the exit status */ public static int exec(Configuration configuration, String... args) { if (LOG.isDebugEnabled()) { LOG.debug(MessageFormat.format("Preparing application: {0}", //$NON-NLS-1$ Arrays.toString(args))); } configuration.setBoolean(KEY_LAUNCHER_USED, true); LauncherOptions options; try { options = LauncherOptionsParser.parse(configuration, args); } catch (Exception e) { LOG.error(MessageFormat.format("Exception occurred in launcher: {0}", Arrays.toString(args)), e); return LAUNCH_ERROR; } try { Configuration conf = options.getConfiguration(); conf.setClassLoader(options.getApplicationClassLoader()); Tool tool; try { tool = ReflectionUtils.newInstance(options.getApplicationClass(), conf); } catch (Exception e) { LOG.error(MessageFormat.format("Exception occurred in launcher: {0}{1}", options.getApplicationClass().getName(), options.getApplicationArguments()), e); return LAUNCH_ERROR; } try { return launch(conf, tool, options.getApplicationArgumentArray()); } catch (Exception e) { LOG.error(MessageFormat.format("Exception occurred in launcher: {0}{1}", options.getApplicationClass().getName(), options.getApplicationArguments()), e); return CLIENT_ERROR; } } finally { disposeClassLoader(options.getApplicationClassLoader()); for (File file : options.getApplicationCacheDirectories()) { if (delete(file) == false) { LOG.warn(MessageFormat.format("Failed to delete the application cache directory: {0}", file)); } } } }