List of usage examples for org.apache.hadoop.conf Configuration addResource
public void addResource(Configuration conf)
From source file:org.apache.accumulo.test.VolumeIT.java
License:Apache License
@Test public void testRemoveVolumes() throws Exception { String[] tableNames = getUniqueNames(2); verifyVolumesUsed(tableNames[0], false, v1, v2); Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); cluster.stop();/*from w ww . j a va 2 s.c o m*/ Configuration conf = new Configuration(false); conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml")); conf.set(Property.INSTANCE_VOLUMES.getKey(), v2.toString()); BufferedOutputStream fos = new BufferedOutputStream( new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml"))); conf.writeXml(fos); fos.close(); // start cluster and verify that volume was decommisioned cluster.start(); Connector conn = cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)); conn.tableOperations().compact(tableNames[0], null, null, true, true); verifyVolumesUsed(tableNames[0], true, v2); // check that root tablet is not on volume 1 ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000); String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig())) + RootTable.ZROOT_TABLET_PATH; String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8); Assert.assertTrue(rootTabletDir.startsWith(v2.toString())); conn.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<String, String>(), new HashSet<String>()); conn.tableOperations().flush(MetadataTable.NAME, null, null, true); conn.tableOperations().flush(RootTable.NAME, null, null, true); verifyVolumesUsed(tableNames[0], true, v2); verifyVolumesUsed(tableNames[1], true, v2); }
From source file:org.apache.accumulo.test.VolumeIT.java
License:Apache License
private void testReplaceVolume(boolean cleanShutdown) throws Exception { String[] tableNames = getUniqueNames(3); verifyVolumesUsed(tableNames[0], false, v1, v2); // write to 2nd table, but do not flush data to disk before shutdown writeData(tableNames[1], cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD))); if (cleanShutdown) Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); cluster.stop();/* w ww .j a v a 2 s . c o m*/ File v1f = new File(v1.toUri()); File v8f = new File(new File(v1.getParent().toUri()), "v8"); Assert.assertTrue("Failed to rename " + v1f + " to " + v8f, v1f.renameTo(v8f)); Path v8 = new Path(v8f.toURI()); File v2f = new File(v2.toUri()); File v9f = new File(new File(v2.getParent().toUri()), "v9"); Assert.assertTrue("Failed to rename " + v2f + " to " + v9f, v2f.renameTo(v9f)); Path v9 = new Path(v9f.toURI()); Configuration conf = new Configuration(false); conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml")); conf.set(Property.INSTANCE_VOLUMES.getKey(), v8 + "," + v9); conf.set(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey(), v1 + " " + v8 + "," + v2 + " " + v9); BufferedOutputStream fos = new BufferedOutputStream( new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml"))); conf.writeXml(fos); fos.close(); // start cluster and verify that volumes were replaced cluster.start(); verifyVolumesUsed(tableNames[0], true, v8, v9); verifyVolumesUsed(tableNames[1], true, v8, v9); // verify writes to new dir getConnector().tableOperations().compact(tableNames[0], null, null, true, true); getConnector().tableOperations().compact(tableNames[1], null, null, true, true); verifyVolumesUsed(tableNames[0], true, v8, v9); verifyVolumesUsed(tableNames[1], true, v8, v9); // check that root tablet is not on volume 1 or 2 ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000); String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig())) + RootTable.ZROOT_TABLET_PATH; String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8); Assert.assertTrue(rootTabletDir.startsWith(v8.toString()) || rootTabletDir.startsWith(v9.toString())); getConnector().tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<String, String>(), new HashSet<String>()); getConnector().tableOperations().flush(MetadataTable.NAME, null, null, true); getConnector().tableOperations().flush(RootTable.NAME, null, null, true); verifyVolumesUsed(tableNames[0], true, v8, v9); verifyVolumesUsed(tableNames[1], true, v8, v9); verifyVolumesUsed(tableNames[2], true, v8, v9); }
From source file:org.apache.airavata.gfac.hadoop.provider.utils.HadoopUtils.java
License:Apache License
public static Configuration createHadoopConfiguration(JobExecutionContext jobExecutionContext, boolean isWhirrBasedDeployment, File hadoopConfigDir) throws FileNotFoundException { MessageContext inMessageContext = jobExecutionContext.getInMessageContext(); Configuration hadoopConf = new Configuration(); if (isWhirrBasedDeployment) { hadoopConf.addResource( new FileInputStream(new File((String) inMessageContext.getParameter("HADOOP_SITE_XML")))); } else {/*from w w w. j av a 2 s.c o m*/ readHadoopClusterConfigurationFromDirectory(hadoopConfigDir, hadoopConf); } return hadoopConf; }
From source file:org.apache.airavata.gfac.hadoop.provider.utils.HadoopUtils.java
License:Apache License
private static void readHadoopClusterConfigurationFromDirectory(File localHadoopConfigurationDirectory, Configuration hadoopConf) throws FileNotFoundException { Collection hadoopConfigurationFiles = FileUtils.listFiles(localHadoopConfigurationDirectory, null, false); for (Object f : hadoopConfigurationFiles) { hadoopConf.addResource(new FileInputStream((File) f)); }/*w ww .j av a2 s. co m*/ }
From source file:org.apache.ambari.servicemonitor.clients.BaseClient.java
License:Apache License
private void processBaseOptions(Configuration conf, CommandLine commandLine) { if (commandLine.hasOption('v')) { out.setVerbose(true);/*from w w w . ja v a 2 s.c om*/ } if (commandLine.hasOption('c')) { out.setColorsSet(true); } if (commandLine.hasOption('b')) { LOG.info("blocking"); blockingIO = true; } else { LOG.info("non-blocking"); } if (commandLine.hasOption('h')) { keepHistory = true; } attemptLimit = getIntOption(commandLine, "al"); int processTimeout = getIntOption(commandLine, "processtimeout"); if (processTimeout > 0) { killHungProcess = new KillHungProcess(processTimeout, "Timeout executing process"); } failLimit = getIntOption(commandLine, "fl"); successLimit = getIntOption(commandLine, "sl"); sleepTime = getIntOption(commandLine, "st"); if (sleepTime < 0) { sleepTime = DEFAULT_SLEEP; } if (commandLine.hasOption("fs")) { FileSystem.setDefaultUri(conf, commandLine.getOptionValue("fs")); } if (commandLine.hasOption("jt")) { conf.set("mapred.job.tracker", commandLine.getOptionValue("jt")); } if (commandLine.hasOption("conf")) { String[] values = commandLine.getOptionValues("conf"); for (String value : values) { conf.addResource(new Path(value)); } } }
From source file:org.apache.apex.benchmark.CouchBaseBenchmarkTest.java
License:Apache License
@Test public void testCouchBaseAppOutput() throws FileNotFoundException, IOException { Configuration conf = new Configuration(); InputStream is = new FileInputStream("src/site/conf/dt-site-couchbase.xml"); conf.addResource(is); conf.get("dt.application.CouchBaseAppOutput.operator.couchbaseOutput.store.uriString"); conf.get("dt.application.CouchBaseAppOutput.operator.couchbaseOutput.store.password"); conf.get("dt.application.CouchBaseAppOutput.operator.couchbaseOutput.store.bucket"); conf.get("dt.application.couchbaseAppOutput.operator.couchbaseOutput.store.max_tuples"); conf.get("dt.application.couchbaseAppOutput.operator.couchbaseOutput.store.queueSize"); conf.get("dt.application.couchbaseAppOutput.operator.couchbaseOutput.store.blocktime"); conf.get("dt.application.couchbaseAppOutput.operator.couchbaseOutput.store.timeout"); LocalMode lm = LocalMode.newInstance(); try {/*from w ww. j a v a 2 s.c o m*/ lm.prepareDAG(new CouchBaseAppOutput(), conf); LocalMode.Controller lc = lm.getController(); //lc.setHeartbeatMonitoringEnabled(false); lc.run(20000); } catch (Exception ex) { logger.info(ex.getCause()); } is.close(); }
From source file:org.apache.apex.benchmark.CouchBaseBenchmarkTest.java
License:Apache License
@Test public void testCouchBaseAppInput() throws FileNotFoundException, IOException { Configuration conf = new Configuration(); InputStream is = new FileInputStream("src/site/conf/dt-site-couchbase.xml"); conf.addResource(is); conf.get("dt.application.CouchBaseAppInput.operator.couchbaseInput.store.uriString"); conf.get("dt.application.CouchBaseAppInput.operator.couchbaseInput.store.blocktime"); conf.get("dt.application.CouchBaseAppInput.operator.couchbaseInput.store.timeout"); conf.get("dt.application.CouchBaseAppInput.operator.couchbaseInput.store.bucket"); conf.get("dt.application.CouchBaseAppInput.operator.couchbaseInput.store.password"); LocalMode lm = LocalMode.newInstance(); try {/*from w w w . ja v a2 s . c o m*/ lm.prepareDAG(new CouchBaseAppInput(), conf); LocalMode.Controller lc = lm.getController(); lc.run(20000); } catch (Exception ex) { logger.info(ex.getCause()); } is.close(); }
From source file:org.apache.apex.benchmark.kafka.KafkaInputBenchmarkTest.java
License:Apache License
@Test public void testBenchmark() throws FileNotFoundException { Configuration conf = new Configuration(); InputStream is = new FileInputStream("src/site/conf/dt-site-kafka.xml"); conf.addResource(is); LocalMode lma = LocalMode.newInstance(); try {//ww w .jav a 2 s .c o m lma.prepareDAG(new KafkaInputBenchmark(), conf); LocalMode.Controller lc = lma.getController(); lc.run(30000); } catch (Exception ex) { throw new RuntimeException(ex); } }
From source file:org.apache.apex.benchmark.memsql.MemsqlInputBenchmarkTest.java
License:Apache License
@Test public void testMethod() throws SQLException, IOException { Configuration conf = new Configuration(); InputStream inputStream = new FileInputStream("src/site/conf/dt-site-memsql.xml"); conf.addResource(inputStream); MemsqlStore memsqlStore = new MemsqlStore(); memsqlStore.setDatabaseUrl(conf.get("dt.rootDbUrl")); memsqlStore.setConnectionProperties(conf.get( "dt.application.MemsqlInputBenchmark.operator.memsqlInputOperator.store.connectionProperties")); AbstractMemsqlOutputOperatorTest.memsqlInitializeDatabase(memsqlStore); MemsqlPOJOOutputOperator outputOperator = new MemsqlPOJOOutputOperator(); outputOperator.getStore().setDatabaseUrl( conf.get("dt.application.MemsqlInputBenchmark.operator.memsqlInputOperator.store.dbUrl")); outputOperator.getStore().setConnectionProperties(conf.get( "dt.application.MemsqlInputBenchmark.operator.memsqlInputOperator.store.connectionProperties")); outputOperator.setBatchSize(BATCH_SIZE); Random random = new Random(); com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributeMap = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap(); attributeMap.put(OperatorContext.PROCESSING_MODE, ProcessingMode.AT_LEAST_ONCE); attributeMap.put(OperatorContext.ACTIVATION_WINDOW_ID, -1L); attributeMap.put(DAG.APPLICATION_ID, APP_ID); OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap); long seedSize = conf.getLong("dt.seedSize", SEED_SIZE); outputOperator.setup(context);/*from w ww . java 2 s.c o m*/ outputOperator.beginWindow(0); for (long valueCounter = 0; valueCounter < seedSize; valueCounter++) { outputOperator.input.put(random.nextInt()); } outputOperator.endWindow(); outputOperator.teardown(); MemsqlInputBenchmark app = new MemsqlInputBenchmark(); LocalMode lm = LocalMode.newInstance(); try { lm.prepareDAG(app, conf); LocalMode.Controller lc = lm.getController(); lc.run(20000); } catch (Exception ex) { DTThrowable.rethrow(ex); } IOUtils.closeQuietly(inputStream); }
From source file:org.apache.apex.benchmark.memsql.MemsqlOutputBenchmarkTest.java
License:Apache License
@Test public void testMethod() throws SQLException, FileNotFoundException { Configuration conf = new Configuration(); InputStream inputStream = new FileInputStream("src/site/conf/dt-site-memsql.xml"); conf.addResource(inputStream); MemsqlStore memsqlStore = new MemsqlStore(); memsqlStore.setDatabaseUrl(conf.get("dt.rootDbUrl")); memsqlStore.setConnectionProperties(conf.get( "dt.application.MemsqlOutputBenchmark.operator.memsqlOutputOperator.store.connectionProperties")); AbstractMemsqlOutputOperatorTest.memsqlInitializeDatabase(memsqlStore); MemsqlOutputBenchmark app = new MemsqlOutputBenchmark(); LocalMode lm = LocalMode.newInstance(); try {//from w w w.j av a 2 s . c om lm.prepareDAG(app, conf); LocalMode.Controller lc = lm.getController(); lc.run(20000); } catch (Exception ex) { DTThrowable.rethrow(ex); } IOUtils.closeQuietly(inputStream); }