List of usage examples for org.apache.hadoop.conf Configuration addResource
public void addResource(Configuration conf)
From source file:org.apache.accumulo.core.file.rfile.RFileTest.java
License:Apache License
private AccumuloConfiguration setAndGetAccumuloConfig(String cryptoConfSetting) { ConfigurationCopy result = new ConfigurationCopy(AccumuloConfiguration.getDefaultConfiguration()); Configuration conf = new Configuration(false); conf.addResource(cryptoConfSetting); for (Entry<String, String> e : conf) { result.set(e.getKey(), e.getValue()); }/*from w ww . j a va 2 s. c om*/ return result; }
From source file:org.apache.accumulo.minicluster.MiniAccumuloClusterTest.java
License:Apache License
@Test public void testRandomPorts() throws Exception { File confDir = new File(testDir, "conf"); File accumuloSite = new File(confDir, "accumulo-site.xml"); Configuration conf = new Configuration(false); conf.addResource(accumuloSite.toURI().toURL()); for (Property randomPortProp : new Property[] { Property.TSERV_CLIENTPORT, Property.MONITOR_PORT, Property.MONITOR_LOG4J_PORT, Property.MASTER_CLIENTPORT, Property.TRACE_PORT, Property.GC_PORT }) { String value = conf.get(randomPortProp.getKey()); Assert.assertNotNull("Found no value for " + randomPortProp, value); Assert.assertEquals("0", value); }//from w w w .j a va 2 s. co m }
From source file:org.apache.accumulo.minicluster.MiniAccumuloInstance.java
License:Apache License
private static String getZooKeepersFromDir(File directory) throws FileNotFoundException { if (!directory.isDirectory()) throw new IllegalArgumentException("Not a directory " + directory.getPath()); File configFile = new File(new File(directory, "conf"), "accumulo-site.xml"); Configuration conf = new Configuration(false); try {//from w ww.j a v a2 s . c om conf.addResource(configFile.toURI().toURL()); } catch (MalformedURLException e) { throw new FileNotFoundException("Missing file: " + configFile.getPath()); } return conf.get(Property.INSTANCE_ZK_HOST.getKey()); }
From source file:org.apache.accumulo.server.master.CoordinateRecoveryTask.java
License:Apache License
void cleanupOldJobs() { try {//from w ww .jav a 2 s .c o m Configuration conf = CachedConfiguration.getInstance(); @SuppressWarnings("deprecation") JobClient jc = new JobClient(new org.apache.hadoop.mapred.JobConf(conf)); for (JobStatus status : jc.getAllJobs()) { if (!status.isJobComplete()) { RunningJob job = jc.getJob(status.getJobID()); if (job.getJobName().equals(LogSort.getJobName())) { log.info("found a running " + job.getJobName()); Configuration jobConfig = new Configuration(false); log.info("fetching configuration from " + job.getJobFile()); jobConfig.addResource(TraceFileSystem .wrap(FileUtil.getFileSystem(conf, ServerConfiguration.getSiteConfiguration())) .open(new Path(job.getJobFile()))); if (HdfsZooInstance.getInstance().getInstanceID() .equals(jobConfig.get(LogSort.INSTANCE_ID_PROPERTY))) { log.info("Killing job " + job.getID().toString()); } } } } FileStatus[] children = fs.listStatus(new Path(ServerConstants.getRecoveryDir())); if (children != null) { for (FileStatus child : children) { log.info("Deleting recovery directory " + child); fs.delete(child.getPath(), true); } } } catch (IOException e) { log.error("Error cleaning up old Log Sort jobs" + e); } catch (Exception e) { log.error("Unknown error cleaning up old jobs", e); } }
From source file:org.apache.accumulo.server.util.ConvertConfig.java
License:Apache License
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "code runs in same security context as user who provided input") @Override//from w w w . ja v a 2 s . c o m public void execute(String[] args) throws Exception { Opts opts = new Opts(); opts.parseArgs("accumulo convert-config", args); File xmlFile = new File(opts.xmlPath); if (!xmlFile.exists()) { throw new IllegalArgumentException("xml config file does not exist at " + opts.xmlPath); } Path propsPath = Paths.get(opts.propsPath); if (propsPath.toFile().exists()) { throw new IllegalArgumentException("properties file already exists at " + opts.propsPath); } Configuration xmlConfig = new Configuration(false); xmlConfig.addResource(xmlFile.toURI().toURL()); try (BufferedWriter w = Files.newBufferedWriter(propsPath, Charset.forName("UTF-8"))) { StreamSupport.stream(xmlConfig.spliterator(), false).sorted(Map.Entry.comparingByKey()) .forEach(e -> writeLine(w, e.toString())); } }
From source file:org.apache.accumulo.test.functional.ConfigurableMacBase.java
License:Apache License
private void createMiniAccumulo() throws Exception { // createTestDir will give us a empty directory, we don't need to clean it up ourselves File baseDir = createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName()); MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, ROOT_PASSWORD); String nativePathInDevTree = NativeMapIT.nativeMapLocation().getAbsolutePath(); String nativePathInMapReduce = new File(System.getProperty("user.dir")).toString(); cfg.setNativeLibPaths(nativePathInDevTree, nativePathInMapReduce); cfg.setProperty(Property.GC_FILE_ARCHIVE, Boolean.TRUE.toString()); Configuration coreSite = new Configuration(false); configure(cfg, coreSite);// w w w . j a v a2 s .c o m cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString()); configureForEnvironment(cfg, getClass(), getSslDir(baseDir)); cluster = new MiniAccumuloClusterImpl(cfg); if (coreSite.size() > 0) { File csFile = new File(cluster.getConfig().getConfDir(), "core-site.xml"); if (csFile.exists()) { coreSite.addResource(new Path(csFile.getAbsolutePath())); } File tmp = new File(csFile.getAbsolutePath() + ".tmp"); OutputStream out = new BufferedOutputStream(new FileOutputStream(tmp)); coreSite.writeXml(out); out.close(); assertTrue(tmp.renameTo(csFile)); } beforeClusterStart(cfg); }
From source file:org.apache.accumulo.test.functional.ReadWriteIT.java
License:Apache License
@Test public void sunnyDay() throws Exception { // Start accumulo, create a table, insert some data, verify we can read it out. // Shutdown cleanly. log.debug("Starting Monitor"); cluster.getClusterControl().startAllServers(ServerType.MONITOR); Connector connector = getConnector(); String tableName = getUniqueNames(1)[0]; ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName); verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName); String monitorLocation = null; while (null == monitorLocation) { monitorLocation = MonitorUtil.getLocation(getConnector().getInstance()); if (null == monitorLocation) { log.debug("Could not fetch monitor HTTP address from zookeeper"); Thread.sleep(2000);//from w w w.jav a 2s . c o m } } String scheme = "http://"; if (getCluster() instanceof StandaloneAccumuloCluster) { StandaloneAccumuloCluster standaloneCluster = (StandaloneAccumuloCluster) getCluster(); File accumuloSite = new File(standaloneCluster.getServerAccumuloConfDir(), "accumulo-site.xml"); if (accumuloSite.isFile()) { Configuration conf = new Configuration(false); conf.addResource(new Path(accumuloSite.toURI())); String monitorSslKeystore = conf.get(Property.MONITOR_SSL_KEYSTORE.getKey()); if (null != monitorSslKeystore) { log.info("Setting scheme to HTTPS since monitor ssl keystore configuration was observed in {}", accumuloSite); scheme = "https://"; SSLContext ctx = SSLContext.getInstance("SSL"); TrustManager[] tm = new TrustManager[] { new TestTrustManager() }; ctx.init(new KeyManager[0], tm, new SecureRandom()); SSLContext.setDefault(ctx); HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory()); HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier()); } } else { log.info("{} is not a normal file, not checking for monitor running with SSL", accumuloSite); } } URL url = new URL(scheme + monitorLocation); log.debug("Fetching web page " + url); String result = FunctionalTestUtils.readAll(url.openStream()); assertTrue(result.length() > 100); log.debug("Stopping accumulo cluster"); ClusterControl control = cluster.getClusterControl(); control.adminStopAll(); ZooReader zreader = new ZooReader(connector.getInstance().getZooKeepers(), connector.getInstance().getZooKeepersSessionTimeOut()); ZooCache zcache = new ZooCache(zreader, null); byte[] masterLockData; do { masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(connector.getInstance()) + Constants.ZMASTER_LOCK, null); if (null != masterLockData) { log.info("Master lock is still held"); Thread.sleep(1000); } } while (null != masterLockData); control.stopAllServers(ServerType.GARBAGE_COLLECTOR); control.stopAllServers(ServerType.MONITOR); control.stopAllServers(ServerType.TRACER); log.debug("success!"); // Restarting everything cluster.start(); }
From source file:org.apache.accumulo.test.RewriteTabletDirectoriesIT.java
License:Apache License
@Test public void test() throws Exception { Connector c = getConnector();//w ww . j a va2 s.c o m c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE); final String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName); // Write some data to a table and add some splits BatchWriter bw = c.createBatchWriter(tableName, null); final SortedSet<Text> splits = new TreeSet<>(); for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) { splits.add(new Text(split)); Mutation m = new Mutation(new Text(split)); m.put(new byte[] {}, new byte[] {}, new byte[] {}); bw.addMutation(m); } bw.close(); c.tableOperations().addSplits(tableName, splits); BatchScanner scanner = c.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1); DIRECTORY_COLUMN.fetch(scanner); String tableId = c.tableOperations().tableIdMap().get(tableName); assertNotNull("TableID for " + tableName + " was null", tableId); scanner.setRanges(Collections.singletonList(TabletsSection.getRange(tableId))); // verify the directory entries are all on v1, make a few entries relative bw = c.createBatchWriter(MetadataTable.NAME, null); int count = 0; for (Entry<Key, Value> entry : scanner) { assertTrue("Expected " + entry.getValue() + " to contain " + v1, entry.getValue().toString().contains(v1.toString())); count++; if (count % 2 == 0) { String parts[] = entry.getValue().toString().split("/"); Key key = entry.getKey(); Mutation m = new Mutation(key.getRow()); m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value((Path.SEPARATOR + parts[parts.length - 1]).getBytes())); bw.addMutation(m); } } bw.close(); assertEquals(splits.size() + 1, count); // This should fail: only one volume assertEquals(1, cluster.exec(RandomizeVolumes.class, "-z", cluster.getZooKeepers(), "-i", c.getInstance().getInstanceName(), "-t", tableName).waitFor()); cluster.stop(); // add the 2nd volume Configuration conf = new Configuration(false); conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml")); conf.set(Property.INSTANCE_VOLUMES.getKey(), v1.toString() + "," + v2.toString()); BufferedOutputStream fos = new BufferedOutputStream( new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml"))); conf.writeXml(fos); fos.close(); // initialize volume assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor()); cluster.start(); c = getConnector(); // change the directory entries assertEquals(0, cluster.exec(Admin.class, "randomizeVolumes", "-t", tableName).waitFor()); // verify a more equal sharing int v1Count = 0, v2Count = 0; for (Entry<Key, Value> entry : scanner) { if (entry.getValue().toString().contains(v1.toString())) { v1Count++; } if (entry.getValue().toString().contains(v2.toString())) { v2Count++; } } log.info("Count for volume1: " + v1Count); log.info("Count for volume2: " + v2Count); assertEquals(splits.size() + 1, v1Count + v2Count); // a fair chooser will differ by less than count(volumes) assertTrue("Expected the number of files to differ between volumes by less than 10. " + v1Count + " " + v2Count, Math.abs(v1Count - v2Count) < 2); // verify we can read the old data count = 0; for (Entry<Key, Value> entry : c.createScanner(tableName, Authorizations.EMPTY)) { assertTrue("Found unexpected entry in table: " + entry, splits.contains(entry.getKey().getRow())); count++; } assertEquals(splits.size(), count); }
From source file:org.apache.accumulo.test.VolumeIT.java
License:Apache License
@Test public void testAddVolumes() throws Exception { String[] tableNames = getUniqueNames(2); // grab this before shutting down cluster String uuid = new ZooKeeperInstance(cluster.getClientConfig()).getInstanceID(); verifyVolumesUsed(tableNames[0], false, v1, v2); Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); cluster.stop();/*from w w w .jav a2 s.co m*/ Configuration conf = new Configuration(false); conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml")); File v3f = new File(volDirBase, "v3"); assertTrue(v3f.mkdir() || v3f.isDirectory()); Path v3 = new Path("file://" + v3f.getAbsolutePath()); conf.set(Property.INSTANCE_VOLUMES.getKey(), v1.toString() + "," + v2.toString() + "," + v3.toString()); BufferedOutputStream fos = new BufferedOutputStream( new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml"))); conf.writeXml(fos); fos.close(); // initialize volume Assert.assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor()); // check that all volumes are initialized for (Path volumePath : Arrays.asList(v1, v2, v3)) { FileSystem fs = volumePath.getFileSystem(CachedConfiguration.getInstance()); Path vp = new Path(volumePath, ServerConstants.INSTANCE_ID_DIR); FileStatus[] iids = fs.listStatus(vp); Assert.assertEquals(1, iids.length); Assert.assertEquals(uuid, iids[0].getPath().getName()); } // start cluster and verify that new volume is used cluster.start(); verifyVolumesUsed(tableNames[1], false, v1, v2, v3); }
From source file:org.apache.accumulo.test.VolumeIT.java
License:Apache License
@Test public void testNonConfiguredVolumes() throws Exception { String[] tableNames = getUniqueNames(2); // grab this before shutting down cluster String uuid = new ZooKeeperInstance(cluster.getClientConfig()).getInstanceID(); verifyVolumesUsed(tableNames[0], false, v1, v2); Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); cluster.stop();//from w w w. j a v a2s.c om Configuration conf = new Configuration(false); conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml")); File v3f = new File(volDirBase, "v3"); assertTrue(v3f.mkdir() || v3f.isDirectory()); Path v3 = new Path("file://" + v3f.getAbsolutePath()); conf.set(Property.INSTANCE_VOLUMES.getKey(), v2.toString() + "," + v3.toString()); BufferedOutputStream fos = new BufferedOutputStream( new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml"))); conf.writeXml(fos); fos.close(); // initialize volume Assert.assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor()); // check that all volumes are initialized for (Path volumePath : Arrays.asList(v1, v2, v3)) { FileSystem fs = volumePath.getFileSystem(CachedConfiguration.getInstance()); Path vp = new Path(volumePath, ServerConstants.INSTANCE_ID_DIR); FileStatus[] iids = fs.listStatus(vp); Assert.assertEquals(1, iids.length); Assert.assertEquals(uuid, iids[0].getPath().getName()); } // start cluster and verify that new volume is used cluster.start(); // Make sure we can still read the tables (tableNames[0] is very likely to have a file still on v1) List<String> expected = new ArrayList<>(); for (int i = 0; i < 100; i++) { String row = String.format("%06d", i * 100 + 3); expected.add(row + ":cf1:cq1:1"); } verifyData(expected, getConnector().createScanner(tableNames[0], Authorizations.EMPTY)); // v1 should not have any data for tableNames[1] verifyVolumesUsed(tableNames[1], false, v2, v3); }