List of usage examples for org.apache.hadoop.conf Configuration getLong
public long getLong(String name, long defaultValue)
name
property as a long
. From source file:org.apache.accumulo.server.fs.VolumeManagerImpl.java
License:Apache License
private static long correctBlockSize(Configuration conf, long blockSize) { if (blockSize <= 0) blockSize = conf.getLong("dfs.block.size", 67108864); int checkSum = conf.getInt("io.bytes.per.checksum", 512); blockSize -= blockSize % checkSum;//www . j ava 2 s . c om blockSize = Math.max(blockSize, checkSum); return blockSize; }
From source file:org.apache.apex.benchmark.memsql.MemsqlInputBenchmarkTest.java
License:Apache License
@Test public void testMethod() throws SQLException, IOException { Configuration conf = new Configuration(); InputStream inputStream = new FileInputStream("src/site/conf/dt-site-memsql.xml"); conf.addResource(inputStream);/*from w w w . j a v a 2 s .c om*/ MemsqlStore memsqlStore = new MemsqlStore(); memsqlStore.setDatabaseUrl(conf.get("dt.rootDbUrl")); memsqlStore.setConnectionProperties(conf.get( "dt.application.MemsqlInputBenchmark.operator.memsqlInputOperator.store.connectionProperties")); AbstractMemsqlOutputOperatorTest.memsqlInitializeDatabase(memsqlStore); MemsqlPOJOOutputOperator outputOperator = new MemsqlPOJOOutputOperator(); outputOperator.getStore().setDatabaseUrl( conf.get("dt.application.MemsqlInputBenchmark.operator.memsqlInputOperator.store.dbUrl")); outputOperator.getStore().setConnectionProperties(conf.get( "dt.application.MemsqlInputBenchmark.operator.memsqlInputOperator.store.connectionProperties")); outputOperator.setBatchSize(BATCH_SIZE); Random random = new Random(); com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributeMap = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap(); attributeMap.put(OperatorContext.PROCESSING_MODE, ProcessingMode.AT_LEAST_ONCE); attributeMap.put(OperatorContext.ACTIVATION_WINDOW_ID, -1L); attributeMap.put(DAG.APPLICATION_ID, APP_ID); OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap); long seedSize = conf.getLong("dt.seedSize", SEED_SIZE); outputOperator.setup(context); outputOperator.beginWindow(0); for (long valueCounter = 0; valueCounter < seedSize; valueCounter++) { outputOperator.input.put(random.nextInt()); } outputOperator.endWindow(); outputOperator.teardown(); MemsqlInputBenchmark app = new MemsqlInputBenchmark(); LocalMode lm = LocalMode.newInstance(); try { lm.prepareDAG(app, conf); LocalMode.Controller lc = lm.getController(); lc.run(20000); } catch (Exception ex) { DTThrowable.rethrow(ex); } IOUtils.closeQuietly(inputStream); }
From source file:org.apache.apex.examples.mobile.Application.java
License:Apache License
@Override public void populateDAG(DAG dag, Configuration conf) { String lPhoneRange = conf.get(PHONE_RANGE_PROP, null); if (lPhoneRange != null) { String[] tokens = lPhoneRange.split("-"); if (tokens.length != 2) { throw new IllegalArgumentException("Invalid range: " + lPhoneRange); }// www . ja va2 s .co m this.phoneRange = Range.between(Integer.parseInt(tokens[0]), Integer.parseInt(tokens[1])); } LOG.debug("Phone range {}", this.phoneRange); RandomEventGenerator phones = dag.addOperator("Receiver", RandomEventGenerator.class); phones.setMinvalue(this.phoneRange.getMinimum()); phones.setMaxvalue(this.phoneRange.getMaximum()); PhoneMovementGenerator movementGen = dag.addOperator("LocationFinder", PhoneMovementGenerator.class); dag.setAttribute(movementGen, OperatorContext.COUNTERS_AGGREGATOR, new BasicCounters.LongAggregator<MutableLong>()); StatelessThroughputBasedPartitioner<PhoneMovementGenerator> partitioner = new StatelessThroughputBasedPartitioner<PhoneMovementGenerator>(); partitioner.setCooldownMillis(conf.getLong(COOL_DOWN_MILLIS, 45000)); partitioner.setMaximumEvents(conf.getLong(MAX_THROUGHPUT, 30000)); partitioner.setMinimumEvents(conf.getLong(MIN_THROUGHPUT, 10000)); dag.setAttribute(movementGen, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { partitioner })); dag.setAttribute(movementGen, OperatorContext.PARTITIONER, partitioner); // generate seed numbers Random random = new Random(); int maxPhone = phoneRange.getMaximum() - phoneRange.getMinimum(); int phonesToDisplay = conf.getInt(TOTAL_SEED_NOS, 10); for (int i = phonesToDisplay; i-- > 0;) { int phoneNo = phoneRange.getMinimum() + random.nextInt(maxPhone + 1); LOG.info("seed no: " + phoneNo); movementGen.phoneRegister.add(phoneNo); } // done generating data LOG.info("Finished generating seed data."); URI uri = PubSubHelper.getURI(dag); PubSubWebSocketOutputOperator<Object> wsOut = dag.addOperator("LocationResults", new PubSubWebSocketOutputOperator<Object>()); wsOut.setUri(uri); PubSubWebSocketInputOperator<Map<String, String>> wsIn = dag.addOperator("QueryLocation", new PubSubWebSocketInputOperator<Map<String, String>>()); wsIn.setUri(uri); // default partitioning: first connected stream to movementGen will be partitioned dag.addStream("Phone-Data", phones.integer_data, movementGen.data); dag.addStream("Results", movementGen.locationQueryResult, wsOut.input); dag.addStream("Query", wsIn.outputPort, movementGen.phoneQuery); }
From source file:org.apache.apex.examples.transform.DynamicTransformApplication.java
License:Apache License
@Override void setPartitioner(DAG dag, Configuration conf, TransformOperator transform) { StatelessThroughputBasedPartitioner<TransformOperator> partitioner = new StatelessThroughputBasedPartitioner<>(); partitioner.setCooldownMillis(conf.getLong(COOL_DOWN_MILLIS, 10000)); partitioner.setMaximumEvents(conf.getLong(MAX_THROUGHPUT, 30000)); partitioner.setMinimumEvents(conf.getLong(MIN_THROUGHPUT, 10000)); dag.setAttribute(transform, Context.OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { partitioner })); dag.setAttribute(transform, Context.OperatorContext.PARTITIONER, partitioner); }
From source file:org.apache.blur.server.TableContext.java
License:Apache License
private static TableContext createInternal(TableDescriptor tableDescriptor, boolean remote, Iface client, String name, String tableUri) { TableContext tableContext;/*from w ww .j a v a2 s. c om*/ LOG.info("Creating table context for table [{0}]", name); Configuration configuration = getSystemConfiguration(); BlurConfiguration blurConfiguration = getSystemBlurConfiguration(); Map<String, String> tableProperties = tableDescriptor.getTableProperties(); if (tableProperties != null) { for (Entry<String, String> prop : tableProperties.entrySet()) { configuration.set(prop.getKey(), prop.getValue()); blurConfiguration.set(prop.getKey(), prop.getValue()); } } tableContext = new TableContext(); tableContext._configuration = configuration; tableContext._blurConfiguration = blurConfiguration; tableContext._tablePath = new Path(tableUri); tableContext._defaultFieldName = SUPER; tableContext._table = name; tableContext._descriptor = tableDescriptor; tableContext._timeBetweenCommits = configuration.getLong(BLUR_SHARD_TIME_BETWEEN_COMMITS, 60000); tableContext._timeBetweenRefreshs = configuration.getLong(BLUR_SHARD_TIME_BETWEEN_REFRESHS, 5000); tableContext._defaultPrimeDocTerm = new Term(BlurConstants.PRIME_DOC, BlurConstants.PRIME_DOC_VALUE); tableContext._defaultScoreType = ScoreType.SUPER; // TODO make configurable tableContext._discoverableFields = new HashSet<String>( Arrays.asList(BlurConstants.ROW_ID, BlurConstants.RECORD_ID, BlurConstants.FAMILY)); // TODO make configurable tableContext._accessControlFactory = new FilterAccessControlFactory(); boolean strict = tableDescriptor.isStrictTypes(); String defaultMissingFieldType = tableDescriptor.getDefaultMissingFieldType(); boolean defaultMissingFieldLessIndexing = tableDescriptor.isDefaultMissingFieldLessIndexing(); Map<String, String> defaultMissingFieldProps = emptyIfNull(tableDescriptor.getDefaultMissingFieldProps()); Path storagePath = new Path(tableContext._tablePath, TYPES); try { FieldManager fieldManager; if (remote) { fieldManager = new ThriftFieldManager(SUPER, new NoStopWordStandardAnalyzer(), strict, defaultMissingFieldType, defaultMissingFieldLessIndexing, defaultMissingFieldProps, configuration, client, name); } else { fieldManager = new HdfsFieldManager(SUPER, new NoStopWordStandardAnalyzer(), storagePath, configuration, strict, defaultMissingFieldType, defaultMissingFieldLessIndexing, defaultMissingFieldProps); } loadCustomTypes(tableContext, blurConfiguration, fieldManager); fieldManager.loadFromStorage(); tableContext._fieldManager = fieldManager; } catch (IOException e) { throw new RuntimeException(e); } Class<?> c1 = configuration.getClass(BLUR_SHARD_INDEX_DELETION_POLICY_MAXAGE, KeepOnlyLastCommitDeletionPolicy.class); tableContext._indexDeletionPolicy = (IndexDeletionPolicy) configure( ReflectionUtils.newInstance(c1, configuration), tableContext); Class<?> c2 = configuration.getClass(BLUR_SHARD_INDEX_SIMILARITY, FairSimilarity.class); tableContext._similarity = (Similarity) configure(ReflectionUtils.newInstance(c2, configuration), tableContext); String readInterceptorClass = blurConfiguration.get(BLUR_SHARD_READ_INTERCEPTOR); if (readInterceptorClass == null || readInterceptorClass.trim().isEmpty()) { tableContext._readInterceptor = DEFAULT_INTERCEPTOR; } else { try { @SuppressWarnings("unchecked") Class<? extends ReadInterceptor> clazz = (Class<? extends ReadInterceptor>) Class .forName(readInterceptorClass); Constructor<? extends ReadInterceptor> constructor = clazz .getConstructor(new Class[] { BlurConfiguration.class }); tableContext._readInterceptor = constructor.newInstance(blurConfiguration); } catch (Exception e) { throw new RuntimeException(e); } } tableContext._similarity = (Similarity) configure(ReflectionUtils.newInstance(c2, configuration), tableContext); // DEFAULT_INTERCEPTOR _cache.put(name, tableContext); return tableContext.clone(); }
From source file:org.apache.cassandra.hadoop.ColumnFamilyRecordWriter.java
License:Apache License
ColumnFamilyRecordWriter(Configuration conf) { this.conf = conf; this.queueSize = conf.getInt(ColumnFamilyOutputFormat.QUEUE_SIZE, 32 * FBUtilities.getAvailableProcessors()); batchThreshold = conf.getLong(ColumnFamilyOutputFormat.BATCH_THRESHOLD, 32); consistencyLevel = ConsistencyLevel.valueOf(ConfigHelper.getWriteConsistencyLevel(conf)); this.ringCache = new RingCache(conf); this.clients = new HashMap<Range, RangeClient>(); }
From source file:org.apache.cassandra.hadoop.cql3.CqlRecordWriter.java
License:Apache License
CqlRecordWriter(Configuration conf) { this.conf = conf; this.queueSize = conf.getInt(ColumnFamilyOutputFormat.QUEUE_SIZE, 32 * FBUtilities.getAvailableProcessors()); batchThreshold = conf.getLong(ColumnFamilyOutputFormat.BATCH_THRESHOLD, 32); this.clients = new HashMap<>(); try {//from w w w .ja v a 2 s . c om String keyspace = ConfigHelper.getOutputKeyspace(conf); try (Session client = CqlConfigHelper.getOutputCluster(ConfigHelper.getOutputInitialAddress(conf), conf) .connect(keyspace)) { ringCache = new NativeRingCache(conf); if (client != null) { TableMetadata tableMetadata = client.getCluster().getMetadata() .getKeyspace(client.getLoggedKeyspace()) .getTable(ConfigHelper.getOutputColumnFamily(conf)); clusterColumns = tableMetadata.getClusteringColumns(); partitionKeyColumns = tableMetadata.getPartitionKey(); String cqlQuery = CqlConfigHelper.getOutputCql(conf).trim(); if (cqlQuery.toLowerCase().startsWith("insert")) throw new UnsupportedOperationException( "INSERT with CqlRecordWriter is not supported, please use UPDATE/DELETE statement"); cql = appendKeyWhereClauses(cqlQuery); } else { throw new IllegalArgumentException("Invalid configuration specified " + conf); } } } catch (Exception e) { throw new RuntimeException(e); } }
From source file:org.apache.cassandra.hadoop.fs.CassandraFileSystem.java
License:Apache License
public void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); setConf(conf);/*from ww w.j av a 2 s .c o m*/ this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); this.workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(this); store.initialize(this.uri, conf); subBlockSize = conf.getLong("fs.local.subblock.size", 256L * 1024L); }
From source file:org.apache.crunch.impl.mr.run.CrunchCombineFileInputFormat.java
License:Apache License
public CrunchCombineFileInputFormat(JobContext jobContext) { if (getMaxSplitSize(jobContext) == Long.MAX_VALUE) { Configuration conf = jobContext.getConfiguration(); if (conf.get(RuntimeParameters.COMBINE_FILE_BLOCK_SIZE) != null) { setMaxSplitSize(conf.getLong(RuntimeParameters.COMBINE_FILE_BLOCK_SIZE, 0)); } else {/*from w w w .j av a2 s . c o m*/ setMaxSplitSize(jobContext.getConfiguration().getLong("dfs.block.size", 134217728L)); } } }
From source file:org.apache.crunch.kafka.inputformat.KafkaInputFormat.java
License:Apache License
/** * Reads the {@code configuration} to determine which topics, partitions, and offsets should be used for reading data. * * @param configuration the configuration to derive the data to read. * @return a map of {@link TopicPartition} to a pair of start and end offsets. * @throws IllegalStateException if the {@code configuration} does not have the start and end offsets set properly * for a partition./* w w w . jav a 2s . c o m*/ */ public static Map<TopicPartition, Pair<Long, Long>> getOffsets(Configuration configuration) { Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); //find configuration for all of the topics with defined partitions Map<String, String> topicPartitionKeys = configuration.getValByRegex(TOPIC_KEY_REGEX); //for each topic start to process it's partitions for (String key : topicPartitionKeys.keySet()) { String topic = getTopicFromKey(key); int[] partitions = configuration.getInts(key); //for each partition find and add the start/end offset for (int partitionId : partitions) { TopicPartition topicPartition = new TopicPartition(topic, partitionId); long start = configuration.getLong(generatePartitionStartKey(topic, partitionId), Long.MIN_VALUE); long end = configuration.getLong(generatePartitionEndKey(topic, partitionId), Long.MIN_VALUE); if (start == Long.MIN_VALUE || end == Long.MIN_VALUE) { throw new IllegalStateException("The " + topicPartition + "has an invalid start:" + start + " or end:" + end + " offset configured."); } offsets.put(topicPartition, Pair.of(start, end)); } } return offsets; }