List of usage examples for org.apache.hadoop.conf Configuration getInt
public int getInt(String name, int defaultValue)
name
property as an int
. From source file:com.aliyun.fs.oss.common.OssRecordReader.java
License:Apache License
public OssRecordReader(Configuration job, FileSplit split, FileSystem fs, byte[] recordDelimiter) throws IOException { this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);/* w w w.j av a2 s. c o m*/ start = split.getStart(); end = start + split.getLength(); final Path file = split.getPath(); compressionCodecs = new CompressionCodecFactory(job); codec = compressionCodecs.getCodec(file); // open the file and seek to the start of the split fileIn = fs.open(file); if (isCompressedInput()) { decompressor = CodecPool.getDecompressor(codec); if (codec instanceof SplittableCompressionCodec) { final SplitCompressionInputStream cIn = ((SplittableCompressionCodec) codec).createInputStream( fileIn, decompressor, start, end, SplittableCompressionCodec.READ_MODE.BYBLOCK); in = new LineReader(cIn, job, recordDelimiter); start = cIn.getAdjustedStart(); end = cIn.getAdjustedEnd(); filePosition = cIn; // take pos from compressed stream } else { in = new LineReader(codec.createInputStream(fileIn, decompressor), job, recordDelimiter); filePosition = fileIn; } } else { fileIn.seek(start); in = new LineReader(fileIn, job, recordDelimiter); filePosition = fileIn; } // If this is not the first split, we always throw away first record // because we always (except the last split) read one extra line in // next() method. if (start != 0) { start += in.readLine(new Text(), 0, maxBytesToConsume(start)); } this.pos = start; }
From source file:com.aliyun.fs.oss.nat.JetOssNativeFileSystemStore.java
License:Apache License
public void initialize(URI uri, Configuration conf) throws Exception { if (uri.getHost() == null) { throw new IllegalArgumentException("Invalid hostname in URI " + uri); }/* ww w . j a v a 2 s .com*/ String userInfo = uri.getUserInfo(); if (userInfo != null) { throw new IllegalArgumentException("Disallow set ak information in OSS URI."); } this.conf = conf; String host = uri.getHost(); if (!StringUtils.isEmpty(host) && !host.contains(".")) { bucket = host; } else if (!StringUtils.isEmpty(host)) { bucket = host.substring(0, host.indexOf(".")); endpoint = host.substring(host.indexOf(".") + 1); } // try to get accessKeyId, accessJeySecret, securityToken, endpoint from configuration. if (accessKeyId == null) { accessKeyId = conf.getTrimmed("fs.oss.accessKeyId"); } if (accessKeySecret == null) { accessKeySecret = conf.getTrimmed("fs.oss.accessKeySecret"); } if (securityToken == null) { securityToken = conf.getTrimmed("fs.oss.securityToken"); } if (endpoint == null) { endpoint = conf.getTrimmed("fs.oss.endpoint"); } // try to get accessKeyId, accessJeySecret, securityToken, endpoint from MetaService. LOG.debug("Try to get accessKeyId, accessJeySecret, securityToken endpoint from MetaService."); if (accessKeyId == null || accessKeySecret == null) { accessKeyId = MetaClient.getRoleAccessKeyId(); accessKeySecret = MetaClient.getRoleAccessKeySecret(); securityToken = MetaClient.getRoleSecurityToken(); if (StringUtils.isEmpty(accessKeyId) || StringUtils.isEmpty(accessKeySecret) || StringUtils.isEmpty(securityToken)) { throw new IllegalArgumentException( "AccessKeyId/AccessKeySecret/SecurityToken is not available, you " + "can set them in configuration."); } } if (endpoint == null) { endpoint = EndpointEnum.getEndpoint("oss", MetaClient.getClusterRegionName(), MetaClient.getClusterNetworkType()); if (endpoint == null) { throw new IllegalArgumentException( "Can not find any suitable " + "endpoint, you can set it in OSS URI"); } } if (securityToken == null) { this.ossClient = new OSSClientAgent(endpoint, accessKeyId, accessKeySecret, conf); } else { this.ossClient = new OSSClientAgent(endpoint, accessKeyId, accessKeySecret, securityToken, conf); } this.numCopyThreads = conf.getInt("fs.oss.uploadPartCopy.thread.number", 10); this.numPutThreads = conf.getInt("fs.oss.uploadPart.thread.number", 5); this.maxSplitSize = conf.getInt("fs.oss.multipart.split.max.byte", 5 * 1024 * 1024); this.numSplits = conf.getInt("fs.oss.multipart.split.number", 10); this.maxSimpleCopySize = conf.getLong("fs.oss.copy.simple.max.byte", 64 * 1024 * 1024L); this.maxSimplePutSize = conf.getLong("fs.oss.put.simple.max.byte", 5 * 1024 * 1024); }
From source file:com.aliyun.fs.oss.nat.NativeOssFileSystem.java
License:Apache License
private static NativeFileSystemStore createDefaultStore(Configuration conf) { NativeFileSystemStore store = new JetOssNativeFileSystemStore(); RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( conf.getInt("fs.oss.maxRetries", 4), conf.getLong("fs.oss.sleepTimeSeconds", 10), TimeUnit.SECONDS); Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>(); // for reflection invoke. exceptionToPolicyMap.put(InvocationTargetException.class, basePolicy); exceptionToPolicyMap.put(IOException.class, basePolicy); exceptionToPolicyMap.put(OssException.class, basePolicy); RetryPolicy methodPolicy = RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);/*from w w w . j a v a2 s .com*/ Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>(); methodNameToPolicyMap.put("storeFile", methodPolicy); methodNameToPolicyMap.put("storeFiles", methodPolicy); methodNameToPolicyMap.put("storeEmptyFile", methodPolicy); methodNameToPolicyMap.put("retrieveMetadata", methodPolicy); methodNameToPolicyMap.put("retrieve", methodPolicy); methodNameToPolicyMap.put("purge", methodPolicy); methodNameToPolicyMap.put("dump", methodPolicy); methodNameToPolicyMap.put("doesObjectExist", methodPolicy); methodNameToPolicyMap.put("copy", methodPolicy); methodNameToPolicyMap.put("list", methodPolicy); methodNameToPolicyMap.put("delete", methodPolicy); return (NativeFileSystemStore) RetryProxy.create(NativeFileSystemStore.class, store, methodNameToPolicyMap); }
From source file:com.aliyun.fs.oss.nat.NativeOssFileSystem.java
License:Apache License
@Override public void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); if (store == null) { store = createDefaultStore(conf); }//from w ww. j ava 2 s . c om try { store.initialize(uri, conf); } catch (Exception e) { LOG.warn(e.getMessage()); throw new IOException(e); } setConf(conf); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(uri, null); this.bufferSize = conf.getInt("fs.oss.readBuffer.size", 64 * 1024 * 1024); // do not suggest to use too large buffer in case of GC issue or OOM. if (this.bufferSize >= 256 * 1024 * 1024) { LOG.warn("'fs.oss.readBuffer.size' is " + bufferSize + ", it's to large and system will suppress it down " + "to '268435456' automatically."); this.bufferSize = 256 * 1024 * 1024; } this.conf = conf; this.algorithmVersion = conf.getInt(OSSREADER_ALGORITHM_VERSION, OSSREADER_ALGORITHM_VERSION_DEFAULT); if (algorithmVersion != 1 && algorithmVersion != 2) { throw new IOException("Only 1 or 2 algorithm version is supported"); } }
From source file:com.aliyun.fs.oss.utils.OSSClientAgent.java
License:Apache License
@SuppressWarnings("unchecked") private Object initializeOSSClientConfig(Configuration conf, Class ClientConfigurationClz) throws IOException, ServiceException, ClientException { try {//from w w w .j a va2 s . c o m Constructor cons = ClientConfigurationClz.getConstructor(); Object clientConfiguration = cons.newInstance(); Method method0 = ClientConfigurationClz.getMethod("setConnectionTimeout", Integer.TYPE); method0.invoke(clientConfiguration, conf.getInt("fs.oss.client.connection.timeout", ClientConfiguration.DEFAULT_CONNECTION_TIMEOUT)); Method method1 = ClientConfigurationClz.getMethod("setSocketTimeout", Integer.TYPE); method1.invoke(clientConfiguration, conf.getInt("fs.oss.client.socket.timeout", ClientConfiguration.DEFAULT_SOCKET_TIMEOUT)); Method method2 = ClientConfigurationClz.getMethod("setConnectionTTL", Long.TYPE); method2.invoke(clientConfiguration, conf.getLong("fs.oss.client.connection.ttl", ClientConfiguration.DEFAULT_CONNECTION_TTL)); Method method3 = ClientConfigurationClz.getMethod("setMaxConnections", Integer.TYPE); method3.invoke(clientConfiguration, conf.getInt("fs.oss.connection.max", ClientConfiguration.DEFAULT_MAX_CONNECTIONS)); return clientConfiguration; } catch (Exception e) { handleException(e); return null; } }
From source file:com.aliyun.odps.fs.VolumeFileSystem.java
License:Apache License
@Override public void initialize(URI uri, Configuration conf) throws IOException { conf.addResource(VolumeFSConstants.VOLUME_FS_CONFIG_FILE); super.initialize(uri, conf); setConf(conf);//ww w .ja va 2s. c o m checkURI(uri); this.project = resolveProject(uri); this.volumeClient = createVolumeClient(conf); this.uri = URI.create(uri.getScheme() + VolumeFSConstants.SCHEME_SEPARATOR + uri.getAuthority()); this.homeVolume = getHomeVolume(conf); this.workingDir = getHomeDirectory(); this.defaultReplication = (short) conf.getInt(VolumeFileSystemConfigKeys.DFS_REPLICATION_KEY, VolumeFSConstants.DFS_REPLICATION_DEFAULT); }
From source file:com.aliyun.openservices.tablestore.hadoop.TableStoreOutputFormat.java
License:Apache License
@Override public RecordWriter<Writable, BatchWriteWritable> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); String outputTable = conf.get(OUTPUT_TABLE); Preconditions.checkNotNull(outputTable, "Output table must be set."); SyncClientInterface ots = TableStore.newOtsClient(conf); int maxBatchSize = conf.getInt(MAX_UPDATE_BATCH_SIZE, 0); if (maxBatchSize == 0) { return new TableStoreRecordWriter(ots, outputTable); } else {/*from w w w . j a v a 2 s. com*/ return new TableStoreRecordWriter(ots, outputTable, maxBatchSize); } }
From source file:com.armon.test.quartz.QuartzConfiguration.java
License:Apache License
/** * Get the value of the <code>name</code> property as an <code>int</code>, possibly * referring to the deprecated name of the configuration property. * If no such property exists, the provided default value is returned, * or if the specified value is not a valid <code>int</code>, * then an error is thrown.//from w ww.j a v a 2 s . c o m * * @param name property name. * @param deprecatedName a deprecatedName for the property to use * if non-deprecated name is not used * @param defaultValue default value. * @throws NumberFormatException when the value is invalid * @return property value as an <code>int</code>, * or <code>defaultValue</code>. */ // TODO: developer note: This duplicates the functionality of deprecated // property support in Configuration in Hadoop 2. But since Hadoop-1 does not // contain these changes, we will do our own as usual. Replace these when H2 is default. public static int getInt(Configuration conf, String name, String deprecatedName, int defaultValue) { if (conf.get(deprecatedName) != null) { LOG.warn( String.format("Config option \"%s\" is deprecated. Instead, use \"%s\"", deprecatedName, name)); return conf.getInt(deprecatedName, defaultValue); } else { return conf.getInt(name, defaultValue); } }
From source file:com.ashishpaliwal.hadoop.utils.inputformat.CsvLineReader.java
License:Apache License
/** * Create a line reader that reads from the given stream using the * <code>io.file.buffer.size</code> specified in the given * <code>Configuration</code>. * * @param in input stream/*from ww w .j a va 2s . com*/ * @param conf configuration * @throws IOException */ public CsvLineReader(InputStream in, Configuration conf) throws IOException { this(in, conf.getInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE)); }
From source file:com.ashishpaliwal.hadoop.utils.inputformat.CsvRecordReader.java
License:Apache License
public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException { FileSplit split = (FileSplit) genericSplit; Configuration job = context.getConfiguration(); this.maxLineLength = job.getInt(MAX_LINE_LENGTH, 2147483647); this.start = split.getStart(); this.end = (this.start + split.getLength()); Path file = split.getPath();// w w w . j a v a 2 s . co m this.compressionCodecs = new CompressionCodecFactory(job); this.codec = this.compressionCodecs.getCodec(file); FileSystem fs = file.getFileSystem(job); this.fileIn = fs.open(file); if (isCompressedInput()) { this.decompressor = CodecPool.getDecompressor(this.codec); if ((this.codec instanceof SplittableCompressionCodec)) { SplitCompressionInputStream cIn = ((SplittableCompressionCodec) this.codec).createInputStream( this.fileIn, this.decompressor, this.start, this.end, SplittableCompressionCodec.READ_MODE.BYBLOCK); this.in = new CsvLineReader(cIn, job); this.start = cIn.getAdjustedStart(); this.end = cIn.getAdjustedEnd(); this.filePosition = cIn; } else { this.in = new CsvLineReader(this.codec.createInputStream(this.fileIn, this.decompressor), job); this.filePosition = this.fileIn; } } else { this.fileIn.seek(this.start); this.in = new CsvLineReader(this.fileIn, job); this.filePosition = this.fileIn; } if (this.start != 0L) { this.start += this.in.readLine(new Text(), 0, maxBytesToConsume(this.start)); } this.pos = this.start; }