List of usage examples for org.apache.hadoop.conf Configuration get
public String get(String name)
name
property, null
if no such property exists. From source file:com.cloudera.crunch.io.avro.AvroFileTarget.java
License:Open Source License
@Override public void configureForMapReduce(Job job, PType<?> ptype, Path outputPath, String name) { AvroType<?> atype = (AvroType<?>) ptype; Configuration conf = job.getConfiguration(); String outputSchema = conf.get("avro.output.schema"); if (outputSchema == null) { conf.set("avro.output.schema", atype.getSchema().toString()); } else if (!outputSchema.equals(atype.getSchema().toString())) { throw new IllegalStateException("Avro targets must use the same output schema"); }/*from w w w . j a v a2s. c om*/ SourceTargetHelper.configureTarget(job, AvroOutputFormat.class, ptype.getDataBridge(), outputPath, name); }
From source file:com.cloudera.hadoop.hdfs.nfs.NetUtils.java
License:Apache License
public static String getDomain(Configuration conf, InetAddress address) { String override = conf.get(NFS_OWNER_DOMAIN); if (override != null) { return override; }/*from ww w . j av a 2 s. c o m*/ String host = address.getCanonicalHostName(); if (address.isLoopbackAddress() && address.getHostAddress().equals(address.getHostName())) { // loopback does not resolve return "localdomain"; } int pos; if (((pos = host.indexOf('.')) > 0) && (pos < host.length())) { return host.substring(pos + 1); } LOGGER.error(Joiner.on("\n").join("Unable to find the domain the server is running on. Please report.", "canonicalHostName = " + host, "hostname = " + address.getHostName(), "isLoopback = " + address.isLoopbackAddress(), "hostAdddress = " + address.getHostAddress())); return "unknown"; }
From source file:com.cloudera.hoop.AuthFilter.java
License:Open Source License
/** * Returns the Alfredo configuration from Hoop's configuration. * <p/>/*from ww w. j a v a 2s .co m*/ * It returns all Hoop's configuration properties prefixed with * <code>hoop.authentication</code>. The <code>hoop.authentication</code> * prefix is removed from the returned property names. * * @param configPrefix parameter not used. * @param filterConfig parameter not used. * @return Alfredo configuration read from Hoop's configuration. */ @Override protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) { Properties props = new Properties(); Configuration conf = HoopServer.get().getConfig(); props.setProperty(AuthenticationFilter.COOKIE_PATH, "/"); for (Map.Entry<String, String> entry : conf) { String name = entry.getKey(); if (name.startsWith(CONF_PREFIX)) { String value = conf.get(name); name = name.substring(CONF_PREFIX.length()); props.setProperty(name, value); } } return props; }
From source file:com.cloudera.impala.service.JniFrontend.java
License:Apache License
/** * Return an empty string if the FileSystem configured in CONF refers to a * DistributedFileSystem (the only one supported by Impala) and Impala can list the root * directory "/". Otherwise, return an error string describing the issues. *//*from w ww . j a va 2 s. co m*/ private String checkFileSystem(Configuration conf) { try { FileSystem fs = FileSystem.get(CONF); if (!(fs instanceof DistributedFileSystem)) { return "Unsupported file system. Impala only supports DistributedFileSystem " + "but the configured filesystem is: " + fs.getClass().getSimpleName() + "." + CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY + "(" + CONF.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) + ")" + " might be set incorrectly"; } } catch (IOException e) { return "couldn't retrieve FileSystem:\n" + e.getMessage(); } try { FileSystemUtil.getTotalNumVisibleFiles(new Path("/")); } catch (IOException e) { return "Could not read the HDFS root directory at " + CONF.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) + ". Error was: \n" + e.getMessage(); } return ""; }
From source file:com.cloudera.impala.service.ZooKeeperSession.java
License:Apache License
/** * Connects to zookeeper and handles maintaining membership. * Note: this can only be called when either planner or worker is running. * The client of this class is responsible for checking that. * @param conf//from ww w .j a v a 2 s . c o m * @param id - The ID for this server. This should be unique among * all instances of the service. * @param principal - The Kerberos principal to use. If not null and not empty, * ZooKeeper nodes will be secured with Kerberos. * @param keytabPath - The path to the keytab file. Only used when a valid * principal is provided. * @param plannerPort - If greater than 0, running the planner service. * @param workerPort - If greater than 0, running the worker service. */ public ZooKeeperSession(Configuration conf, String id, String principal, String keytabPath, int plannerPort, int workerPort) throws IOException { id_ = id; plannerPort_ = plannerPort; workerPort_ = workerPort; zkConnectString_ = conf.get(ZOOKEEPER_CONNECTION_STRING_CONF); if (zkConnectString_ == null || zkConnectString_.trim().isEmpty()) { throw new IllegalArgumentException( "Zookeeper connect string has to be specified through " + ZOOKEEPER_CONNECTION_STRING_CONF); } LOGGER.info("Connecting to zookeeper at: " + zkConnectString_ + " with id: " + id_); connectTimeoutMillis_ = conf.getInt(ZOOKEEPER_CONNECT_TIMEOUTMILLIS_CONF, CuratorFrameworkFactory.builder().getConnectionTimeoutMs()); if (principal != null && !principal.isEmpty()) { newNodeAcl_ = Ids.CREATOR_ALL_ACL; } else { newNodeAcl_ = Ids.OPEN_ACL_UNSAFE; } String aclStr = conf.get(ZOOKEEPER_STORE_ACL_CONF, null); LOGGER.info("Zookeeper acl: " + aclStr); if (StringUtils.isNotBlank(aclStr)) newNodeAcl_ = parseACLs(aclStr); plannersAcl_ = Ids.READ_ACL_UNSAFE; String plannersAclStr = conf.get(ZOOKEEPER_STORE_PLANNERS_ACL_CONF, null); LOGGER.info("Zookeeper planners acl: " + plannersAclStr); if (plannersAclStr != null) plannersAcl_ = parseACLs(plannersAclStr); rootNode_ = conf.get(ZOOKEEPER_ZNODE_CONF, ZOOKEEPER_ZNODE_DEFAULT); LOGGER.info("Zookeeper root: " + rootNode_); // Install the JAAS Configuration for the runtime, if Kerberos is enabled. if (principal != null && !principal.isEmpty()) { setupJAASConfig(principal, keytabPath); } initMembershipPaths(); }
From source file:com.cloudera.lib.service.hadoop.HadoopService.java
License:Open Source License
@Override public <T> T execute(String user, final Configuration conf, final FileSystemExecutor<T> executor) throws HadoopException { Check.notEmpty(user, "user"); Check.notNull(conf, "conf"); Check.notNull(executor, "executor"); if (conf.get(NAME_NODE_PROPERTY) == null || conf.getTrimmed(NAME_NODE_PROPERTY).length() == 0) { throw new HadoopException(HadoopException.ERROR.H06, NAME_NODE_PROPERTY); }/*w ww.j a va 2s . c o m*/ try { validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority()); UserGroupInformation ugi = getUGI(user); return ugi.doAs(new PrivilegedExceptionAction<T>() { public T run() throws Exception { Configuration namenodeConf = createNameNodeConf(conf); FileSystem fs = createFileSystem(namenodeConf); Instrumentation instrumentation = getServer().get(Instrumentation.class); Instrumentation.Cron cron = instrumentation.createCron(); try { checkNameNodeHealth(fs); cron.start(); return executor.execute(fs); } finally { cron.stop(); instrumentation.addCron(INSTRUMENTATION_GROUP, executor.getClass().getSimpleName(), cron); closeFileSystem(fs); } } }); } catch (HadoopException ex) { throw ex; } catch (Exception ex) { throw new HadoopException(HadoopException.ERROR.H03, ex); } }
From source file:com.cloudera.lib.service.hadoop.HadoopService.java
License:Open Source License
@Override public <T> T execute(String user, final Configuration conf, final JobClientExecutor<T> executor) throws HadoopException { Check.notEmpty(user, "user"); Check.notNull(conf, "conf"); Check.notNull(executor, "executor"); if (conf.get(JOB_TRACKER_PROPERTY) == null || conf.getTrimmed(JOB_TRACKER_PROPERTY).length() == 0) { throw new HadoopException(HadoopException.ERROR.H06, JOB_TRACKER_PROPERTY); }//from w ww. jav a2 s . com if (conf.get(NAME_NODE_PROPERTY) == null || conf.getTrimmed(NAME_NODE_PROPERTY).length() == 0) { throw new HadoopException(HadoopException.ERROR.H06, NAME_NODE_PROPERTY); } try { validateJobtracker(new URI(conf.get(JOB_TRACKER_PROPERTY)).getAuthority()); validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority()); UserGroupInformation ugi = getUGI(user); return ugi.doAs(new PrivilegedExceptionAction<T>() { public T run() throws Exception { JobConf jobtrackerConf = createJobTrackerConf(conf); Configuration namenodeConf = createNameNodeConf(conf); JobClient jobClient = createJobClient(jobtrackerConf); try { checkJobTrackerHealth(jobClient); FileSystem fs = createFileSystem(namenodeConf); Instrumentation instrumentation = getServer().get(Instrumentation.class); Instrumentation.Cron cron = instrumentation.createCron(); try { checkNameNodeHealth(fs); cron.start(); return executor.execute(jobClient, fs); } finally { cron.stop(); instrumentation.addCron(INSTRUMENTATION_GROUP, executor.getClass().getSimpleName(), cron); closeFileSystem(fs); } } finally { closeJobClient(jobClient); } } }); } catch (HadoopException ex) { throw ex; } catch (Exception ex) { throw new HadoopException(HadoopException.ERROR.H04, ex); } }
From source file:com.cloudera.lib.service.hadoop.HadoopService.java
License:Open Source License
public FileSystem createFileSystemInternal(String user, final Configuration conf) throws IOException, HadoopException { Check.notEmpty(user, "user"); Check.notNull(conf, "conf"); try {//from www. j a va 2 s . c om validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority()); UserGroupInformation ugi = getUGI(user); return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() { public FileSystem run() throws Exception { Configuration namenodeConf = createNameNodeConf(conf); return createFileSystem(namenodeConf); } }); } catch (IOException ex) { throw ex; } catch (HadoopException ex) { throw ex; } catch (Exception ex) { throw new HadoopException(HadoopException.ERROR.H08, ex.getMessage(), ex); } }
From source file:com.cloudera.lib.util.TestXConfiguration.java
License:Open Source License
@Test public void copy() throws Exception { Configuration srcConf = new Configuration(false); Configuration targetConf = new Configuration(false); srcConf.set("testParameter1", "valueFromSource"); srcConf.set("testParameter2", "valueFromSource"); targetConf.set("testParameter2", "valueFromTarget"); targetConf.set("testParameter3", "valueFromTarget"); XConfiguration.copy(srcConf, targetConf); Assert.assertEquals("valueFromSource", targetConf.get("testParameter1")); Assert.assertEquals("valueFromSource", targetConf.get("testParameter2")); Assert.assertEquals("valueFromTarget", targetConf.get("testParameter3")); }
From source file:com.cloudera.lib.util.TestXConfiguration.java
License:Open Source License
@Test public void injectDefaults() throws Exception { Configuration srcConf = new Configuration(false); Configuration targetConf = new Configuration(false); srcConf.set("testParameter1", "valueFromSource"); srcConf.set("testParameter2", "valueFromSource"); targetConf.set("testParameter2", "originalValueFromTarget"); targetConf.set("testParameter3", "originalValueFromTarget"); XConfiguration.injectDefaults(srcConf, targetConf); Assert.assertEquals("valueFromSource", targetConf.get("testParameter1")); Assert.assertEquals("originalValueFromTarget", targetConf.get("testParameter2")); Assert.assertEquals("originalValueFromTarget", targetConf.get("testParameter3")); Assert.assertEquals("valueFromSource", srcConf.get("testParameter1")); Assert.assertEquals("valueFromSource", srcConf.get("testParameter2")); Assert.assertNull(srcConf.get("testParameter3")); }