List of usage examples for org.apache.hadoop.conf Configuration getBoolean
public boolean getBoolean(String name, boolean defaultValue)
name
property as a boolean
. From source file:org.apache.hama.ipc.AsyncServer.java
License:Apache License
protected AsyncServer(String bindAddress, int port, Class<? extends Writable> paramClass, int handlerCount, Configuration conf, String serverName, SecretManager<? extends TokenIdentifier> secretManager) throws IOException { this.conf = conf; this.port = port; this.address = new InetSocketAddress(bindAddress, port); this.paramClass = paramClass; this.maxRespSize = conf.getInt(IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY, IPC_SERVER_RPC_MAX_RESPONSE_SIZE_DEFAULT); this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", true); this.backlogLength = conf.getInt("ipc.server.listen.queue.size", 100); }
From source file:org.apache.hama.ipc.Server.java
License:Apache License
/** * Constructs a server listening on the named port and address. Parameters * passed must be of the named class. The * <code>handlerCount</handlerCount> determines * the number of handler threads that will be used to process calls. * /*from w ww .j a v a 2 s . c o m*/ */ protected Server(String bindAddress, int port, Class<? extends Writable> paramClass, int handlerCount, Configuration conf, String serverName, SecretManager<? extends TokenIdentifier> secretManager) throws IOException { this.bindAddress = bindAddress; this.conf = conf; this.port = port; this.paramClass = paramClass; this.handlerCount = handlerCount; this.socketSendBufferSize = 0; this.maxQueueSize = handlerCount * conf.getInt(IPC_SERVER_HANDLER_QUEUE_SIZE_KEY, IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT); this.maxRespSize = conf.getInt(IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY, IPC_SERVER_RPC_MAX_RESPONSE_SIZE_DEFAULT); this.readThreads = conf.getInt(IPC_SERVER_RPC_READ_THREADS_KEY, IPC_SERVER_RPC_READ_THREADS_DEFAULT); this.callQueue = new LinkedBlockingQueue<Call>(maxQueueSize); this.maxIdleTime = 2 * conf.getInt("ipc.client.connection.maxidletime", 1000); this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10); this.thresholdIdleConnections = conf.getInt("ipc.client.idlethreshold", 4000); this.authorize = conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false); this.isSecurityEnabled = UserGroupInformation.isSecurityEnabled(); // Start the listener here and let it bind to the port listener = new Listener(); this.port = listener.getAddress().getPort(); this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false); // Create the responder here responder = new Responder(); }
From source file:org.apache.hama.pipes.PipesApplication.java
License:Apache License
public Map<String, String> setupEnvironment(Configuration conf) throws IOException { Map<String, String> env = new HashMap<String, String>(); this.streamingEnabled = conf.getBoolean("hama.streaming.enabled", false); if (!this.streamingEnabled) { serverSocket = new ServerSocket(0); env.put("hama.pipes.command.port", Integer.toString(serverSocket.getLocalPort())); }// w ww . j a v a 2s . co m // add TMPDIR environment variable with the value of java.io.tmpdir env.put("TMPDIR", System.getProperty("java.io.tmpdir")); // Set Logging Environment from Configuration env.put("hama.pipes.logging", conf.getBoolean("hama.pipes.logging", false) ? "1" : "0"); return env; }
From source file:org.apache.hama.pipes.PipesApplication.java
License:Apache License
private List<String> setupCommand(Configuration conf) throws IOException, InterruptedException { List<String> cmd = new ArrayList<String>(); String interpretor = conf.get("hama.pipes.executable.interpretor"); if (interpretor != null) { cmd.add(interpretor);//from ww w .j a v a 2 s . com } String executable = null; try { if (DistributedCache.getLocalCacheFiles(conf) != null) { LOG.debug("DEBUG LocalCacheFilesCount: " + DistributedCache.getLocalCacheFiles(conf).length); for (Path u : DistributedCache.getLocalCacheFiles(conf)) LOG.debug("DEBUG LocalCacheFiles: " + u); executable = DistributedCache.getLocalCacheFiles(conf)[0].toString(); LOG.debug("DEBUG: executable: " + executable); } else { LOG.debug("DEBUG: DistributedCache.getLocalCacheFiles(conf) returns null."); throw new IOException("Executable is missing!"); } } catch (Exception e) { LOG.error("Executable: " + executable + " fs.default.name: " + conf.get("fs.default.name")); throw new IOException("Executable is missing!"); } if (!new File(executable).canExecute()) { // LinuxTaskController sets +x permissions on all distcache files already. // In case of DefaultTaskController, set permissions here. FileUtil.chmod(executable, "u+x"); } cmd.add(executable); String additionalArgs = conf.get("hama.pipes.executable.args"); // if true, we are resolving filenames with the linked paths in // DistributedCache boolean resolveArguments = conf.getBoolean("hama.pipes.resolve.executable.args", false); if (additionalArgs != null && !additionalArgs.isEmpty()) { String[] split = additionalArgs.split(" "); for (String s : split) { if (resolveArguments) { for (Path u : DistributedCache.getLocalCacheFiles(conf)) { if (u.getName().equals(s)) { LOG.info("Resolved argument \"" + s + "\" with fully qualified path \"" + u.toString() + "\"!"); cmd.add(u.toString()); break; } } } else { cmd.add(s); } } } return cmd; }
From source file:org.apache.hama.pipes.Submitter.java
License:Apache License
/** * Does the user want to keep the command file for debugging? If this is true, * pipes will write a copy of the command data to a file in the task directory * named "downlink.data", which may be used to run the C++ program under the * debugger. You probably also want to set * JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from * being deleted. To run using the data file, set the environment variable * "hadoop.pipes.command.file" to point to the file. * /*from www . j av a 2 s.c o m*/ * @param conf the configuration to check * @return will the framework save the command file? */ public static boolean getKeepCommandFile(Configuration conf) { return conf.getBoolean("hama.pipes.command-file.keep", false); }
From source file:org.apache.hcatalog.data.HCatRecordSerDe.java
License:Apache License
private static Object serializePrimitiveField(Object field, ObjectInspector fieldObjectInspector) { Object f = ((PrimitiveObjectInspector) fieldObjectInspector).getPrimitiveJavaObject(field); if (f != null && HCatContext.INSTANCE.getConf().isPresent()) { Configuration conf = HCatContext.INSTANCE.getConf().get(); if (f instanceof Boolean && conf.getBoolean(HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER, HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER_DEFAULT)) { return ((Boolean) f) ? 1 : 0; } else if (f instanceof Short && conf.getBoolean(HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION, HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT)) { return new Integer((Short) f); } else if (f instanceof Byte && conf.getBoolean(HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION, HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT)) { return new Integer((Byte) f); }/*from ww w . ja v a2s .c om*/ } return f; }
From source file:org.apache.hcatalog.hbase.ImportSequenceFile.java
License:Apache License
/** * Method to run the Importer MapReduce Job. Normally will be called by another MR job * during OutputCommitter.commitJob()./*from w ww . java2s . com*/ * @param parentContext JobContext of the parent job * @param tableName name of table to bulk load data into * @param InputDir path of SequenceFile formatted data to read * @param scratchDir temporary path for the Importer MR job to build the HFiles which will be imported * @return */ static boolean runJob(JobContext parentContext, String tableName, Path InputDir, Path scratchDir) { Configuration parentConf = parentContext.getConfiguration(); Configuration conf = new Configuration(); for (Map.Entry<String, String> el : parentConf) { if (el.getKey().startsWith("hbase.")) conf.set(el.getKey(), el.getValue()); if (el.getKey().startsWith("mapred.cache.archives")) conf.set(el.getKey(), el.getValue()); } //Inherit jar dependencies added to distributed cache loaded by parent job conf.set("mapred.job.classpath.archives", parentConf.get("mapred.job.classpath.archives", "")); conf.set("mapreduce.job.cache.archives.visibilities", parentConf.get("mapreduce.job.cache.archives.visibilities", "")); //Temporary fix until hbase security is ready //We need the written HFile to be world readable so //hbase regionserver user has the privileges to perform a hdfs move if (parentConf.getBoolean("hadoop.security.authorization", false)) { FsPermission.setUMask(conf, FsPermission.valueOf("----------")); } conf.set(HBaseConstants.PROPERTY_OUTPUT_TABLE_NAME_KEY, tableName); conf.setBoolean(JobContext.JOB_CANCEL_DELEGATION_TOKEN, false); boolean localMode = "local".equals(conf.get("mapred.job.tracker")); boolean success = false; try { FileSystem fs = FileSystem.get(parentConf); Path workDir = new Path(new Job(parentConf).getWorkingDirectory(), IMPORTER_WORK_DIR); if (!fs.mkdirs(workDir)) throw new IOException("Importer work directory already exists: " + workDir); Job job = createSubmittableJob(conf, tableName, InputDir, scratchDir, localMode); job.setWorkingDirectory(workDir); job.getCredentials().addAll(parentContext.getCredentials()); success = job.waitForCompletion(true); fs.delete(workDir, true); //We only cleanup on success because failure might've been caused by existence of target directory if (localMode && success) { new ImporterOutputFormat().getOutputCommitter( org.apache.hadoop.mapred.HCatMapRedUtil.createTaskAttemptContext(conf, new TaskAttemptID())) .commitJob(job); } } catch (InterruptedException e) { LOG.error("ImportSequenceFile Failed", e); } catch (ClassNotFoundException e) { LOG.error("ImportSequenceFile Failed", e); } catch (IOException e) { LOG.error("ImportSequenceFile Failed", e); } return success; }
From source file:org.apache.hcatalog.mapreduce.FileOutputCommitterContainer.java
License:Apache License
private static boolean getOutputDirMarking(Configuration conf) { return conf.getBoolean(SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, false); }
From source file:org.apache.hive.hcatalog.data.HCatRecordSerDe.java
License:Apache License
private static Object serializePrimitiveField(Object field, ObjectInspector fieldObjectInspector) { if (field == null) { return null; }/*from ww w. j av a 2s. c om*/ Object f = ((PrimitiveObjectInspector) fieldObjectInspector).getPrimitiveJavaObject(field); if (f != null && HCatContext.INSTANCE.getConf().isPresent()) { Configuration conf = HCatContext.INSTANCE.getConf().get(); if (f instanceof Boolean && conf.getBoolean(HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER, HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER_DEFAULT)) { return ((Boolean) f) ? 1 : 0; } else if (f instanceof Short && conf.getBoolean(HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION, HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT)) { return new Integer((Short) f); } else if (f instanceof Byte && conf.getBoolean(HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION, HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT)) { return new Integer((Byte) f); } } return f; }
From source file:org.apache.hive.http.HttpServer.java
License:Apache License
/** * Checks the user has privileges to access to instrumentation servlets. * <p/>//from ww w . j a v a 2s .c o m * If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE * (default value) it always returns TRUE. * <p/> * If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE * it will check if the current user is in the admin ACLS. If the user is * in the admin ACLs it returns TRUE, otherwise it returns FALSE. * * @param servletContext the servlet context. * @param request the servlet request. * @param response the servlet response. * @return TRUE/FALSE based on the logic described above. */ @InterfaceAudience.LimitedPrivate("hive") public static boolean isInstrumentationAccessAllowed(ServletContext servletContext, HttpServletRequest request, HttpServletResponse response) throws IOException { Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); boolean access = true; boolean adminAccess = conf .getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, false); if (adminAccess) { access = hasAdministratorAccess(servletContext, request, response); } return access; }