List of usage examples for org.apache.hadoop.conf Configuration getLong
public long getLong(String name, long defaultValue)
name
property as a long
. From source file:com.qubole.rubix.spi.ClusterManager.java
License:Apache License
public void initialize(Configuration conf) { splitSize = conf.getLong(splitSizeConf, splitSize); nodeRefreshTime = conf.getInt(nodeRefreshTimeConf, nodeRefreshTime); }
From source file:com.quixey.hadoop.fs.oss.MultiPartUploader.java
License:Apache License
MultiPartUploader(OSSClient client, String bucket, Configuration conf) { this.client = checkNotNull(client); this.bucket = checkNotNull(bucket); checkNotNull(conf);//from www . j a v a 2 s. c om multipartEnabled = conf.getBoolean(OSSFileSystemConfigKeys.OSS_MULTIPART_UPLOADS_ENABLED, true); partSize = conf.getLong(OSS_MULTIPART_UPLOADS_BLOCK_SIZE_PROPERTY, DEFAULT_MULTIPART_BLOCK_SIZE); maxThreads = conf.getInt(OSS_MULTIPART_UPLOADS_MAX_THREADS_PROPERTY, Integer.MAX_VALUE); checkArgument(partSize <= MAX_PART_SIZE, "%s must be at most %s", OSS_MULTIPART_UPLOADS_BLOCK_SIZE_PROPERTY, MAX_PART_SIZE); }
From source file:com.quixey.hadoop.fs.oss.OSSFileSystem.java
License:Apache License
private static FileSystemStore createDefaultStore(Configuration conf) { FileSystemStore store = new CloudOSSFileSystemStore(); RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( conf.getInt(OSS_MAX_RETRIES_PROPERTY, 4), conf.getLong(OSS_SLEEP_TIME_SECONDS_PROPERTY, 10), TimeUnit.SECONDS);/* w w w .j a va 2s. com*/ Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<>(); exceptionToPolicyMap.put(IOException.class, basePolicy); RetryPolicy methodPolicy = RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap); Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<>(); methodNameToPolicyMap.put("storeFile", methodPolicy); methodNameToPolicyMap.put("rename", methodPolicy); return (FileSystemStore) RetryProxy.create(FileSystemStore.class, store, methodNameToPolicyMap); }
From source file:com.ricemap.spateDB.mapred.RandomShapeGenerator.java
License:Apache License
/** * Initialize from a FileSplit//from w w w . j a va 2 s . co m * * @param job * @param split * @throws IOException */ @SuppressWarnings("unchecked") public RandomShapeGenerator(Configuration job, RandomInputFormat.GeneratedSplit split) throws IOException { this(split.length, SpatialSite.getPrism(job, GenerationMBR), DistributionType.valueOf(job.get(GenerationType)), job.getInt(GenerationRectSize, 100), split.index + job.getLong(GenerationSeed, System.currentTimeMillis())); setShape((S) SpatialSite.createStockShape(job)); }
From source file:com.rockstor.memory.MemAllocatorFactory.java
License:Apache License
/** * rockstor.memory.allocator.class com.rockstor.memory.DefaultAllocator * rockstor.memory.minBufferSize 16384 16K rockstor.memory.poolSize * 1073741824 1G/* w ww.ja va2 s .co m*/ */ private MemAllocatorFactory() { Configuration conf = RockConfiguration.getDefault(); long poolSize = conf.getLong("rockstor.memory.poolSize", defaultPoolSize); if (poolSize < (1L << 20)) { poolSize = (1L << 20); } long reservedSize = conf.getLong("rockstor.memory.reservedSize", defaultReservedSize); if (reservedSize < defaultReservedSize) { reservedSize = defaultReservedSize; } long leftSize = Runtime.getRuntime().maxMemory() - reservedSize; if (leftSize < 0) { LOG.fatal("configuration error, rockstor.memory.reservedSize=" + DefaultAllocator.readableSpace(reservedSize) + ", left memory(" + DefaultAllocator.readableSpace(leftSize) + ") is less than ZERO"); System.exit(-1); } if (leftSize < poolSize) { LOG.warn("configuration warning, reduce rockstor.memory.poolSize " + DefaultAllocator.readableSpace(poolSize) + ", to left meory Size " + DefaultAllocator.readableSpace(leftSize)); poolSize = leftSize; } int minBufSize = conf.getInt("rockstor.memory.minBufferSize", defaultMinBufSize); if (minBufSize < 1024 || minBufSize >= poolSize) { minBufSize = 1024; } String className = conf.get("rockstor.memory.allocator.class", defaultAllocatorClassName); try { allocator = (MemAllocatorInterface) Class.forName(className).newInstance(); } catch (Exception e) { ExceptionLogger.log(LOG, "new memory allocator (" + className + ") instance failed", e); System.exit(-1); } LOG.info("New Memory Allocator Instance (" + className + ") OK!"); allocator.init(poolSize, minBufSize); }
From source file:com.sa.npopa.samples.hbase.rest.client.RemoteAdmin.java
License:Apache License
/** * Constructor//w w w.j a va 2 s .co m * @param client * @param conf * @param accessToken */ public RemoteAdmin(Client client, Configuration conf, String accessToken) { this.client = client; this.conf = conf; this.accessToken = accessToken; this.maxRetries = conf.getInt("hbase.rest.client.max.retries", 10); this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000); }
From source file:com.sa.npopa.samples.hbase.rest.client.RemoteHTable.java
License:Apache License
/** * Constructor//from w w w. ja v a2s. co m * @param client * @param conf * @param name */ public RemoteHTable(Client client, Configuration conf, byte[] name) { this.client = client; this.conf = conf; this.name = name; this.maxRetries = conf.getInt("hbase.rest.client.max.retries", 10); this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000); }
From source file:com.splicemachine.fs.s3.PrestoS3FileSystem.java
License:Apache License
@Override public void initialize(URI uri, Configuration conf) throws IOException { requireNonNull(uri, "uri is null"); requireNonNull(conf, "conf is null"); super.initialize(uri, conf); setConf(conf);/*from ww w . ja v a2 s.c om*/ this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR)); HiveS3Config defaults = new HiveS3Config(); this.stagingDirectory = new File( conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString())); this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1; this.maxBackoffTime = Duration .valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString())); this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString())); int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries()); boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled()); Duration connectTimeout = Duration .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString())); Duration socketTimeout = Duration .valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString())); int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections()); long minFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE, defaults.getS3MultipartMinFileSize().toBytes()); long minPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE, defaults.getS3MultipartMinPartSize().toBytes()); this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS, defaults.isS3UseInstanceCredentials()); this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION, defaults.isPinS3ClientToCurrentRegion()); this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled()); this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name())); this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId()); String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix()); ClientConfiguration configuration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries) .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP) .withConnectionTimeout(toIntExact(connectTimeout.toMillis())) .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections) .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(S3_USER_AGENT_SUFFIX); this.s3 = createAmazonS3Client(uri, conf, configuration); transferConfig.setMultipartUploadThreshold(minFileSize); transferConfig.setMinimumUploadPartSize(minPartSize); }
From source file:com.splicemachine.orc.OrcConf.java
License:Open Source License
public static long getLongVar(Configuration conf, OrcConf.ConfVars var) { return conf.getLong(var.varname, var.defaultLongVal); }
From source file:com.splout.db.hadoop.TablespaceGenerator.java
License:Apache License
/** * Samples the input, if needed./* ww w .j a va 2s .co m*/ */ protected PartitionMap sample(int nPartitions, Configuration conf, TupleSampler.SamplingType samplingType, TupleSampler.SamplingOptions samplingOptions) throws TupleSamplerException, IOException { FileSystem fileSystem = outputPath.getFileSystem(conf); // Number of records to sample long recordsToSample = conf.getLong("splout.sampling.records.to.sample", 100000); // The sampler will generate a file with samples to use to create the // partition map Path sampledInput = new Path(outputPath, OUT_SAMPLED_INPUT); Path sampledInputSorted = new Path(outputPath, OUT_SAMPLED_INPUT_SORTED); TupleSampler sampler = new TupleSampler(samplingType, samplingOptions, callingClass); long retrivedSamples = sampler.sample(tablespace, conf, recordsToSample, sampledInput); // 1.1 Sorting sampled keys on disk fileSystem.delete(sampledInputSorted, true); SequenceFile.Sorter sorter = new SequenceFile.Sorter(fileSystem, Text.class, NullWritable.class, conf); sorter.sort(sampledInput, sampledInputSorted); // Start the reader @SuppressWarnings("deprecation") final SequenceFile.Reader reader = new SequenceFile.Reader(fileSystem, sampledInputSorted, conf); Log.info(retrivedSamples + " total keys sampled."); /* * 2: Calculate partition map */ Nextable nextable = new Nextable() { @Override public boolean next(Writable writable) throws IOException { return reader.next(writable); } }; List<PartitionEntry> partitionEntries = calculatePartitions(nPartitions, retrivedSamples, nextable); reader.close(); fileSystem.delete(sampledInput, true); fileSystem.delete(sampledInputSorted, true); // 2.2 Create the partition map return new PartitionMap(partitionEntries); }