List of usage examples for org.apache.hadoop.conf Configuration get
public String get(String name, String defaultValue)
name
. From source file:com.alibaba.wasp.zookeeper.ZKUtil.java
License:Apache License
/** * Waits for HBase installation's base (parent) znode to become available. * * @throws java.io.IOException/* w w w .j a v a 2 s . c om*/ * on ZK errors */ public static void waitForBaseZNode(Configuration conf) throws IOException { LOG.info("Waiting until the base znode is available"); String parentZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); ZooKeeper zk = new ZooKeeper(ZKConfig.getZKQuorumServersString(conf), conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT), EmptyWatcher.instance); final int maxTimeMs = 10000; final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS; KeeperException keeperEx = null; try { try { for (int attempt = 0; attempt < maxNumAttempts; ++attempt) { try { if (zk.exists(parentZNode, false) != null) { LOG.info("Parent znode exists: " + parentZNode); keeperEx = null; break; } } catch (KeeperException e) { keeperEx = e; } Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS); } } finally { zk.close(); } } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } if (keeperEx != null) { throw new IOException(keeperEx); } }
From source file:com.alibaba.wasp.zookeeper.ZooKeeperWatcher.java
License:Apache License
/** * Set the local variable node names using the specified configuration. *//*from w w w . j ava2 s .com*/ private void setNodeNames(Configuration conf) { baseZNode = conf.get(FConstants.ZOOKEEPER_ZNODE_PARENT, FConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); fsZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.fs", "fs")); drainingZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.draining.fs", "draining")); masterAddressZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.master", "master")); backupMasterAddressesZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.backup.masters", "backup-masters")); clusterStateZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.state", "shutdown")); assignmentZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.unassigned", "unassigned")); tableZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.tableEnableDisable", "table")); clusterIdZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.clusterId", "waspid")); balancerZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.balancer", "balancer")); }
From source file:com.aliyun.odps.volume.VolumeFSInputStream.java
License:Apache License
public VolumeFSInputStream(String path, VolumeFSClient volumeClient, Long fileLength, Configuration conf) throws IOException { this.path = path; this.volumeFSClient = volumeClient; this.seekOptimization = conf.getBoolean(VolumeFileSystemConfigKeys.ODPS_VOLUME_SEEK_OPTIMIZATION_ENABLED, false);//from ww w. j a va 2s . co m if (this.seekOptimization) { this.blockSize = conf.getLong(VolumeFileSystemConfigKeys.ODPS_VOLUME_BLOCK_SIZE, VolumeFSConstants.DEFAULT_VOLUME_BLOCK_SIZE); } this.fileLength = fileLength; this.closed = false; this.uuid = UUID.randomUUID().toString(); buffer_block_dir = new File(conf.get(VolumeFileSystemConfigKeys.ODPS_VOLUME_BLOCK_BUFFER_DIR, VolumeFSConstants.DEFAULT_VOLUME_BLOCK_BUFFER_DIR)); if (!buffer_block_dir.exists() && !buffer_block_dir.mkdirs()) { throw new IOException("Cannot create Volume block buffer directory: " + buffer_block_dir); } if (seekOptimization) { executorService = Executors.newFixedThreadPool(1); } }
From source file:com.architecting.ch07.MapReduceIndexerTool.java
License:Apache License
private void addDistributedCacheFile(File file, Configuration conf) throws IOException { String HADOOP_TMP_FILES = "tmpfiles";// see Hadoop's GenericOptionsParser String tmpFiles = conf.get(HADOOP_TMP_FILES, ""); if (tmpFiles.length() > 0) { // already present? tmpFiles = tmpFiles + ","; }//from w w w.j av a 2s .c o m GenericOptionsParser parser = new GenericOptionsParser(new Configuration(conf), new String[] { "--files", file.getCanonicalPath() }); String additionalTmpFiles = parser.getConfiguration().get(HADOOP_TMP_FILES); assert additionalTmpFiles != null; assert additionalTmpFiles.length() > 0; tmpFiles += additionalTmpFiles; conf.set(HADOOP_TMP_FILES, tmpFiles); }
From source file:com.armon.test.quartz.QuartzConfiguration.java
License:Apache License
/** * Get the password from the Configuration instance using the * getPassword method if it exists. If not, then fall back to the * general get method for configuration elements. * @param conf configuration instance for accessing the passwords * @param alias the name of the password element * @param defPass the default password/* w w w. j av a2 s .c o m*/ * @return String password or default password * @throws IOException */ public static String getPassword(Configuration conf, String alias, String defPass) throws IOException { String passwd = null; try { Method m = Configuration.class.getMethod("getPassword", String.class); char[] p = (char[]) m.invoke(conf, alias); if (p != null) { LOG.debug(String.format( "Config option \"%s\" was found through" + " the Configuration getPassword method.", alias)); passwd = new String(p); } else { LOG.debug(String.format("Config option \"%s\" was not found. Using provided default value", alias)); passwd = defPass; } } catch (NoSuchMethodException e) { // this is a version of Hadoop where the credential //provider API doesn't exist yet LOG.debug(String .format("Credential.getPassword method is not available." + " Falling back to configuration.")); passwd = conf.get(alias, defPass); } catch (SecurityException e) { throw new IOException(e.getMessage(), e); } catch (IllegalAccessException e) { throw new IOException(e.getMessage(), e); } catch (IllegalArgumentException e) { throw new IOException(e.getMessage(), e); } catch (InvocationTargetException e) { throw new IOException(e.getMessage(), e); } return passwd; }
From source file:com.asakusafw.runtime.directio.hadoop.HadoopDataSourceUtil.java
License:Apache License
private static Path getTransactionInfoDir(Configuration conf) throws IOException { if (conf == null) { throw new IllegalArgumentException("conf must not be null"); //$NON-NLS-1$ }// ww w . j av a 2s. co m String working = conf.get(KEY_SYSTEM_DIR, DEFAULT_SYSTEM_DIR); Path path = new Path(working, TRANSACTION_INFO_DIR); return path.getFileSystem(conf).makeQualified(path); }
From source file:com.asakusafw.runtime.stage.configurator.AutoLocalStageConfigurator.java
License:Apache License
private boolean isLocal(Job job) { Configuration conf = job.getConfiguration(); if (conf.get(KEY_JOBTRACKER, DEFAULT_JOBTRACKER).equals(DEFAULT_JOBTRACKER)) { return true; }//w ww. jav a2 s.com return false; }
From source file:com.asakusafw.runtime.stage.configurator.AutoLocalStageConfigurator.java
License:Apache License
private void localize(Job job) { Configuration conf = job.getConfiguration(); // reset job-tracker conf.set(KEY_JOBTRACKER, DEFAULT_JOBTRACKER); // replace local directories String tmpDir = conf.get(KEY_TEMPORARY_DIRECTORY, ""); if (tmpDir.isEmpty()) { String name = System.getProperty("user.name", "asakusa"); tmpDir = String.format("/tmp/hadoop-%s/autolocal", name); } else if (tmpDir.length() > 1 && tmpDir.endsWith("/")) { tmpDir = tmpDir.substring(0, tmpDir.length() - 1); }/* w w w. j a v a2 s . c o m*/ if (conf.getBoolean(KEY_DIRECTORY_QUALIFIER, true)) { String qualifier = UUID.randomUUID().toString(); tmpDir = String.format("%s/%s", tmpDir, qualifier); } LOG.info(MessageFormat.format("Substituting temporary dir: job={0}, target={1}", job.getJobName(), tmpDir)); conf.set(KEY_LOCAL_DIR, tmpDir + "/mapred/local"); conf.set(KEY_STAGING_DIR, tmpDir + "/mapred/staging"); }
From source file:com.asakusafw.runtime.stage.inprocess.InProcessStageConfigurator.java
License:Apache License
private void install(Job job) { Configuration conf = job.getConfiguration(); int prefixLength = KEY_PREFIX_REPLACE.length(); for (Map.Entry<String, String> entry : conf.getValByRegex(PATTERN_KEY_REPLACE.pattern()).entrySet()) { assert entry.getKey().length() >= prefixLength; String key = entry.getKey().substring(prefixLength); if (key.isEmpty()) { continue; }//from w w w . ja v a 2 s .c o m String value = entry.getValue(); if (LOG.isDebugEnabled()) { LOG.debug(MessageFormat.format("activate in-process configuration: {0}=\"{1}\"->\"{2}\"", //$NON-NLS-1$ key, conf.get(key, ""), //$NON-NLS-1$ value)); } conf.set(key, value); } conf.set(StageConstants.PROP_JOB_RUNNER, SimpleJobRunner.class.getName()); StageResourceDriver.setAccessMode(job, StageResourceDriver.AccessMode.DIRECT); StageInputFormat.setSplitCombinerClass(job, ExtremeSplitCombiner.class); }
From source file:com.asakusafw.runtime.stage.input.BridgeInputFormat.java
License:Apache License
private VariableTable createBatchArgumentsTable(Configuration configuration) { String arguments = configuration.get(StageConstants.PROP_ASAKUSA_BATCH_ARGS, ""); //$NON-NLS-1$ VariableTable variables = new VariableTable(VariableTable.RedefineStrategy.IGNORE); variables.defineVariables(arguments); return variables; }