List of usage examples for org.apache.hadoop.conf Configuration getClass
public Class<?> getClass(String name, Class<?> defaultValue)
name
property as a Class
. From source file:ai.grakn.kb.internal.computer.GraknSparkComputer.java
License:Open Source License
@SuppressWarnings("PMD.UnusedFormalParameter") private Future<ComputerResult> submitWithExecutor() { jobGroupId = Integer.toString(ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE)); String jobDescription = this.vertexProgram == null ? this.mapReducers.toString() : this.vertexProgram + "+" + this.mapReducers; // Use different output locations this.sparkConfiguration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION) + "/" + jobGroupId); updateConfigKeys(sparkConfiguration); final Future<ComputerResult> result = computerService.submit(() -> { final long startTime = System.currentTimeMillis(); // apache and hadoop configurations that are used throughout the graph computer computation final org.apache.commons.configuration.Configuration graphComputerConfiguration = new HadoopConfiguration( this.sparkConfiguration); if (!graphComputerConfiguration.containsKey(Constants.SPARK_SERIALIZER)) { graphComputerConfiguration.setProperty(Constants.SPARK_SERIALIZER, GryoSerializer.class.getCanonicalName()); }// w ww . j a v a2s . c o m graphComputerConfiguration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER_HAS_EDGES, this.persist.equals(GraphComputer.Persist.EDGES)); final Configuration hadoopConfiguration = ConfUtil.makeHadoopConfiguration(graphComputerConfiguration); final Storage fileSystemStorage = FileSystemStorage.open(hadoopConfiguration); final boolean inputFromHDFS = FileInputFormat.class.isAssignableFrom( hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class)); final boolean inputFromSpark = PersistedInputRDD.class.isAssignableFrom( hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class)); final boolean outputToHDFS = FileOutputFormat.class.isAssignableFrom( hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class)); final boolean outputToSpark = PersistedOutputRDD.class.isAssignableFrom( hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class)); final boolean skipPartitioner = graphComputerConfiguration .getBoolean(Constants.GREMLIN_SPARK_SKIP_PARTITIONER, false); final boolean skipPersist = graphComputerConfiguration .getBoolean(Constants.GREMLIN_SPARK_SKIP_GRAPH_CACHE, false); if (inputFromHDFS) { String inputLocation = Constants .getSearchGraphLocation(hadoopConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION), fileSystemStorage) .orElse(null); if (null != inputLocation) { try { graphComputerConfiguration.setProperty(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR, FileSystem.get(hadoopConfiguration).getFileStatus(new Path(inputLocation)).getPath() .toString()); hadoopConfiguration.set(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR, FileSystem.get(hadoopConfiguration).getFileStatus(new Path(inputLocation)).getPath() .toString()); } catch (final IOException e) { throw new IllegalStateException(e.getMessage(), e); } } } final InputRDD inputRDD; final OutputRDD outputRDD; final boolean filtered; try { inputRDD = InputRDD.class.isAssignableFrom( hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class)) ? hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, InputRDD.class, InputRDD.class).newInstance() : InputFormatRDD.class.newInstance(); outputRDD = OutputRDD.class.isAssignableFrom( hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class)) ? hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, OutputRDD.class, OutputRDD.class).newInstance() : OutputFormatRDD.class.newInstance(); // if the input class can filter on load, then set the filters if (inputRDD instanceof InputFormatRDD && GraphFilterAware.class.isAssignableFrom(hadoopConfiguration.getClass( Constants.GREMLIN_HADOOP_GRAPH_READER, InputFormat.class, InputFormat.class))) { GraphFilterAware.storeGraphFilter(graphComputerConfiguration, hadoopConfiguration, this.graphFilter); filtered = false; } else if (inputRDD instanceof GraphFilterAware) { ((GraphFilterAware) inputRDD).setGraphFilter(this.graphFilter); filtered = false; } else filtered = this.graphFilter.hasFilter(); } catch (final InstantiationException | IllegalAccessException e) { throw new IllegalStateException(e.getMessage(), e); } // create the spark context from the graph computer configuration final JavaSparkContext sparkContext = new JavaSparkContext(Spark.create(hadoopConfiguration)); final Storage sparkContextStorage = SparkContextStorage.open(); sparkContext.setJobGroup(jobGroupId, jobDescription); GraknSparkMemory memory = null; // delete output location final String outputLocation = hadoopConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, null); if (null != outputLocation) { if (outputToHDFS && fileSystemStorage.exists(outputLocation)) { fileSystemStorage.rm(outputLocation); } if (outputToSpark && sparkContextStorage.exists(outputLocation)) { sparkContextStorage.rm(outputLocation); } } // the Spark application name will always be set by SparkContextStorage, // thus, INFO the name to make it easier to debug logger.debug(Constants.GREMLIN_HADOOP_SPARK_JOB_PREFIX + (null == this.vertexProgram ? "No VertexProgram" : this.vertexProgram) + "[" + this.mapReducers + "]"); // add the project jars to the cluster this.loadJars(hadoopConfiguration, sparkContext); updateLocalConfiguration(sparkContext, hadoopConfiguration); // create a message-passing friendly rdd from the input rdd boolean partitioned = false; JavaPairRDD<Object, VertexWritable> loadedGraphRDD = inputRDD.readGraphRDD(graphComputerConfiguration, sparkContext); // if there are vertex or edge filters, filter the loaded graph rdd prior to partitioning and persisting if (filtered) { this.logger.debug("Filtering the loaded graphRDD: " + this.graphFilter); loadedGraphRDD = GraknSparkExecutor.applyGraphFilter(loadedGraphRDD, this.graphFilter); } // if the loaded graph RDD is already partitioned use that partitioner, // else partition it with HashPartitioner if (loadedGraphRDD.partitioner().isPresent()) { this.logger.debug("Using the existing partitioner associated with the loaded graphRDD: " + loadedGraphRDD.partitioner().get()); } else { if (!skipPartitioner) { final Partitioner partitioner = new HashPartitioner( this.workersSet ? this.workers : loadedGraphRDD.partitions().size()); this.logger.debug("Partitioning the loaded graphRDD: " + partitioner); loadedGraphRDD = loadedGraphRDD.partitionBy(partitioner); partitioned = true; assert loadedGraphRDD.partitioner().isPresent(); } else { // no easy way to test this with a test case assert skipPartitioner == !loadedGraphRDD.partitioner().isPresent(); this.logger.debug("Partitioning has been skipped for the loaded graphRDD via " + Constants.GREMLIN_SPARK_SKIP_PARTITIONER); } } // if the loaded graphRDD was already partitioned previous, // then this coalesce/repartition will not take place if (this.workersSet) { // ensures that the loaded graphRDD does not have more partitions than workers if (loadedGraphRDD.partitions().size() > this.workers) { loadedGraphRDD = loadedGraphRDD.coalesce(this.workers); } else { // ensures that the loaded graphRDD does not have less partitions than workers if (loadedGraphRDD.partitions().size() < this.workers) { loadedGraphRDD = loadedGraphRDD.repartition(this.workers); } } } // persist the vertex program loaded graph as specified by configuration // or else use default cache() which is MEMORY_ONLY if (!skipPersist && (!inputFromSpark || partitioned || filtered)) { loadedGraphRDD = loadedGraphRDD.persist(StorageLevel.fromString( hadoopConfiguration.get(Constants.GREMLIN_SPARK_GRAPH_STORAGE_LEVEL, "MEMORY_ONLY"))); } // final graph with view // (for persisting and/or mapReducing -- may be null and thus, possible to save space/time) JavaPairRDD<Object, VertexWritable> computedGraphRDD = null; try { //////////////////////////////// // process the vertex program // //////////////////////////////// if (null != this.vertexProgram) { memory = new GraknSparkMemory(this.vertexProgram, this.mapReducers, sparkContext); ///////////////// // if there is a registered VertexProgramInterceptor, use it to bypass the GraphComputer semantics if (graphComputerConfiguration .containsKey(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR)) { try { final GraknSparkVertexProgramInterceptor<VertexProgram> interceptor = (GraknSparkVertexProgramInterceptor) Class .forName(graphComputerConfiguration .getString(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR)) .newInstance(); computedGraphRDD = interceptor.apply(this.vertexProgram, loadedGraphRDD, memory); } catch (final ClassNotFoundException | IllegalAccessException | InstantiationException e) { throw new IllegalStateException(e.getMessage()); } } else { // standard GraphComputer semantics // get a configuration that will be propagated to all workers final HadoopConfiguration vertexProgramConfiguration = new HadoopConfiguration(); this.vertexProgram.storeState(vertexProgramConfiguration); // set up the vertex program and wire up configurations this.vertexProgram.setup(memory); JavaPairRDD<Object, ViewIncomingPayload<Object>> viewIncomingRDD = null; memory.broadcastMemory(sparkContext); // execute the vertex program while (true) { if (Thread.interrupted()) { sparkContext.cancelAllJobs(); throw new TraversalInterruptedException(); } memory.setInExecute(true); viewIncomingRDD = GraknSparkExecutor.executeVertexProgramIteration(loadedGraphRDD, viewIncomingRDD, memory, graphComputerConfiguration, vertexProgramConfiguration); memory.setInExecute(false); if (this.vertexProgram.terminate(memory)) { break; } else { memory.incrIteration(); memory.broadcastMemory(sparkContext); } } // if the graph will be continued to be used (persisted or mapreduced), // then generate a view+graph if ((null != outputRDD && !this.persist.equals(Persist.NOTHING)) || !this.mapReducers.isEmpty()) { computedGraphRDD = GraknSparkExecutor.prepareFinalGraphRDD(loadedGraphRDD, viewIncomingRDD, this.vertexProgram.getVertexComputeKeys()); assert null != computedGraphRDD && computedGraphRDD != loadedGraphRDD; } else { // ensure that the computedGraphRDD was not created assert null == computedGraphRDD; } } ///////////////// memory.complete(); // drop all transient memory keys // write the computed graph to the respective output (rdd or output format) if (null != outputRDD && !this.persist.equals(Persist.NOTHING)) { // the logic holds that a computeGraphRDD must be created at this point assert null != computedGraphRDD; outputRDD.writeGraphRDD(graphComputerConfiguration, computedGraphRDD); } } final boolean computedGraphCreated = computedGraphRDD != null && computedGraphRDD != loadedGraphRDD; if (!computedGraphCreated) { computedGraphRDD = loadedGraphRDD; } final Memory.Admin finalMemory = null == memory ? new MapMemory() : new MapMemory(memory); ////////////////////////////// // process the map reducers // ////////////////////////////// if (!this.mapReducers.isEmpty()) { // create a mapReduceRDD for executing the map reduce jobs on JavaPairRDD<Object, VertexWritable> mapReduceRDD = computedGraphRDD; if (computedGraphCreated && !outputToSpark) { // drop all the edges of the graph as they are not used in mapReduce processing mapReduceRDD = computedGraphRDD.mapValues(vertexWritable -> { vertexWritable.get().dropEdges(Direction.BOTH); return vertexWritable; }); // if there is only one MapReduce to execute, don't bother wasting the clock cycles. if (this.mapReducers.size() > 1) { mapReduceRDD = mapReduceRDD.persist(StorageLevel.fromString(hadoopConfiguration .get(Constants.GREMLIN_SPARK_GRAPH_STORAGE_LEVEL, "MEMORY_ONLY"))); } } for (final MapReduce mapReduce : this.mapReducers) { // execute the map reduce job final HadoopConfiguration newApacheConfiguration = new HadoopConfiguration( graphComputerConfiguration); mapReduce.storeState(newApacheConfiguration); // map final JavaPairRDD mapRDD = GraknSparkExecutor.executeMap(mapReduceRDD, mapReduce, newApacheConfiguration); // combine final JavaPairRDD combineRDD = mapReduce.doStage(MapReduce.Stage.COMBINE) ? GraknSparkExecutor.executeCombine(mapRDD, newApacheConfiguration) : mapRDD; // reduce final JavaPairRDD reduceRDD = mapReduce.doStage(MapReduce.Stage.REDUCE) ? GraknSparkExecutor.executeReduce(combineRDD, mapReduce, newApacheConfiguration) : combineRDD; // write the map reduce output back to disk and computer result memory if (null != outputRDD) { mapReduce.addResultToMemory(finalMemory, outputRDD.writeMemoryRDD( graphComputerConfiguration, mapReduce.getMemoryKey(), reduceRDD)); } } // if the mapReduceRDD is not simply the computed graph, unpersist the mapReduceRDD if (computedGraphCreated && !outputToSpark) { assert loadedGraphRDD != computedGraphRDD; assert mapReduceRDD != computedGraphRDD; mapReduceRDD.unpersist(); } else { assert mapReduceRDD == computedGraphRDD; } } // unpersist the loaded graph if it will not be used again (no PersistedInputRDD) // if the graphRDD was loaded from Spark, but then partitioned or filtered, its a different RDD if (!inputFromSpark || partitioned || filtered) { loadedGraphRDD.unpersist(); } // unpersist the computed graph if it will not be used again (no PersistedOutputRDD) // if the computed graph is the loadedGraphRDD because it was not mutated and not-unpersisted, // then don't unpersist the computedGraphRDD/loadedGraphRDD if ((!outputToSpark || this.persist.equals(GraphComputer.Persist.NOTHING)) && computedGraphCreated) { computedGraphRDD.unpersist(); } // delete any file system or rdd data if persist nothing if (null != outputLocation && this.persist.equals(GraphComputer.Persist.NOTHING)) { if (outputToHDFS) { fileSystemStorage.rm(outputLocation); } if (outputToSpark) { sparkContextStorage.rm(outputLocation); } } // update runtime and return the newly computed graph finalMemory.setRuntime(System.currentTimeMillis() - startTime); // clear properties that should not be propagated in an OLAP chain graphComputerConfiguration.clearProperty(Constants.GREMLIN_HADOOP_GRAPH_FILTER); graphComputerConfiguration.clearProperty(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR); graphComputerConfiguration.clearProperty(Constants.GREMLIN_SPARK_SKIP_GRAPH_CACHE); graphComputerConfiguration.clearProperty(Constants.GREMLIN_SPARK_SKIP_PARTITIONER); return new DefaultComputerResult(InputOutputHelper.getOutputGraph(graphComputerConfiguration, this.resultGraph, this.persist), finalMemory.asImmutable()); } catch (Exception e) { // So it throws the same exception as tinker does throw new RuntimeException(e); } }); computerService.shutdown(); return result; }
From source file:cascading.tap.hadoop.Hfs.java
License:Open Source License
/** * Based on the configuration, handles and sets {@link CombineFileInputFormat} as the input * format./*from ww w . j a v a2 s. c o m*/ */ private void handleCombineFileInputFormat(Configuration conf) { // if combining files, override the configuration to use CombineFileInputFormat if (!getUseCombinedInput(conf)) return; // get the prescribed individual input format from the underlying scheme so it can be used by CombinedInputFormat String individualInputFormat = conf.get("mapred.input.format.class"); if (individualInputFormat == null) throw new TapException("input format is missing from the underlying scheme"); if (individualInputFormat.equals(CombinedInputFormat.class.getName()) && conf.get(CombineFileRecordReaderWrapper.INDIVIDUAL_INPUT_FORMAT) == null) throw new TapException( "the input format class is already the combined input format but the underlying input format is missing"); // if safe mode is on (default) throw an exception if the InputFormat is not a FileInputFormat, otherwise log a // warning and don't use the CombineFileInputFormat boolean safeMode = getCombinedInputSafeMode(conf); if (!FileInputFormat.class.isAssignableFrom(conf.getClass("mapred.input.format.class", null))) { if (safeMode) throw new TapException( "input format must be of type org.apache.hadoop.mapred.FileInputFormat, got: " + individualInputFormat); else LOG.warn( "not combining input splits with CombineFileInputFormat, {} is not of type org.apache.hadoop.mapred.FileInputFormat.", individualInputFormat); } else { // set the underlying individual input format conf.set(CombineFileRecordReaderWrapper.INDIVIDUAL_INPUT_FORMAT, individualInputFormat); // override the input format class conf.setClass("mapred.input.format.class", CombinedInputFormat.class, InputFormat.class); } }
From source file:cascading.tap.hadoop.io.CombineFileRecordReaderWrapper.java
License:Open Source License
public CombineFileRecordReaderWrapper(CombineFileSplit split, Configuration conf, Reporter reporter, Integer idx) throws Exception { FileSplit fileSplit = new FileSplit(split.getPath(idx), split.getOffset(idx), split.getLength(idx), split.getLocations());//from www.j a v a2 s. c o m Class<?> clz = conf.getClass(INDIVIDUAL_INPUT_FORMAT, null); FileInputFormat<K, V> inputFormat = (FileInputFormat<K, V>) clz.newInstance(); if (inputFormat instanceof Configurable) ((Configurable) inputFormat).setConf(conf); delegate = inputFormat.getRecordReader(fileSplit, (JobConf) conf, reporter); }
From source file:co.cask.cdap.internal.app.runtime.batch.dataset.output.MultipleOutputsMainOutputWrapper.java
License:Apache License
private OutputFormat<K, V> getRootOutputFormat(JobContext context) { if (innerFormat == null) { Configuration conf = context.getConfiguration(); @SuppressWarnings("unchecked") Class<OutputFormat<K, V>> c = (Class<OutputFormat<K, V>>) conf.getClass(ROOT_OUTPUT_FORMAT, FileOutputFormat.class); try {/*from w ww . java 2 s . c o m*/ innerFormat = c.newInstance(); } catch (InstantiationException | IllegalAccessException e) { throw new RuntimeException(e); } } return innerFormat; }
From source file:com.alibaba.wasp.fserver.EntityGroup.java
License:Apache License
/** * default//from w w w.j a va 2 s . c o m * * @throws java.io.IOException **/ @SuppressWarnings("unchecked") public EntityGroup(Configuration conf, EntityGroupInfo egi, FTable table, FServerServices service) throws IOException { this.conf = conf; this.ftableDescriptor = table; this.entityGroupInfo = egi; try { Class<? extends Redo> redoClass = (Class<? extends Redo>) conf.getClass(FConstants.REDO_IMPL, RedoLog.class); Constructor<? extends Redo> c = redoClass.getConstructor(EntityGroupInfo.class, Configuration.class); this.redo = c.newInstance(entityGroupInfo, conf); } catch (Throwable e) { throw new IllegalStateException("Could not instantiate a entityGroup instance.", e); } this.transactionRetryPause = conf.getInt(FConstants.WASP_TRANSACTION_RETRY_PAUSE, FConstants.DEFAULT_WASP_TRANSACTION_RETRY_PAUSE); this.rowLockWaitDuration = conf.getInt("wasp.rowlock.wait.duration", DEFAULT_ROWLOCK_WAIT_DURATION); this.services = service; this.leases = new Leases( conf.getInt(FConstants.THREAD_WAKE_FREQUENCY, FConstants.DEFAULT_THREAD_WAKE_FREQUENCY)); this.lockTimeoutPeriod = conf.getInt(FConstants.SELECT_FOR_UPDATE_LOCK_TIMEOUT, FConstants.DEFAULT_SELECT_FOR_UPDATE_LOCK_TIMEOUT); if (this.services != null) { this.metricsEntityGroup = new MetricsEntityGroup(new MetricsEntityGroupWrapperImpl(this)); } else { this.metricsEntityGroup = null; } if (LOG.isDebugEnabled()) { // Write out EntityGroup name as string and its encoded name. LOG.debug("Instantiated " + this); } }
From source file:com.alibaba.wasp.fserver.EntityGroup.java
License:Apache License
/** * A utility method to create new instances of EntityGroup based on the * configuration property./* w w w . jav a 2 s. c o m*/ * * @param conf * is global configuration settings. * @param entityGroupInfo * - EntityGroupInfo that describes the entityGroup is new), then * read them from the supplied path. * @param ftd * @param fsServices * @return the new instance */ public static EntityGroup newEntityGroup(Configuration conf, EntityGroupInfo entityGroupInfo, final FTable ftd, FServerServices fsServices) { try { @SuppressWarnings("unchecked") Class<? extends EntityGroup> entityGroupClass = (Class<? extends EntityGroup>) conf .getClass(FConstants.ENTITYGROUP_IMPL, EntityGroup.class); Constructor<? extends EntityGroup> c = entityGroupClass.getConstructor(Configuration.class, EntityGroupInfo.class, FTable.class, FServerServices.class); return c.newInstance(conf, entityGroupInfo, ftd, fsServices); } catch (Throwable e) { throw new IllegalStateException("Could not instantiate a entityGroup instance.", e); } }
From source file:com.alibaba.wasp.fserver.FServer.java
License:Apache License
/** * @param args// w w w . ja va 2 s . com */ public static void main(String[] args) { Configuration conf = WaspConfiguration.create(); @SuppressWarnings("unchecked") Class<? extends FServer> fserverClass = (Class<? extends FServer>) conf.getClass(FConstants.FSERVER_IMPL, FServer.class); new FServerCommandLine(fserverClass).doMain(args); }
From source file:com.alibaba.wasp.ipc.WaspRPC.java
License:Apache License
private static synchronized RpcEngine getProtocolEngine(Class protocol, Configuration conf) { RpcEngine engine = PROTOCOL_ENGINES.get(protocol); if (engine == null) { // check for a configured default engine Class<?> defaultEngine = conf.getClass(RPC_ENGINE_PROP, ProtobufRpcEngine.class); // check for a per interface override Class<?> impl = conf.getClass(RPC_ENGINE_PROP + "." + protocol.getName(), defaultEngine); LOG.debug("Using " + impl.getName() + " for " + protocol.getName()); engine = (RpcEngine) ReflectionUtils.newInstance(impl, conf); if (protocol.isInterface()) PROXY_ENGINES.put(Proxy.getProxyClass(protocol.getClassLoader(), protocol), engine); PROTOCOL_ENGINES.put(protocol, engine); }/*from w w w . ja v a2s . c om*/ return engine; }
From source file:com.alibaba.wasp.LocalWaspCluster.java
License:Apache License
@SuppressWarnings("unchecked") private static Class<? extends FServer> getFServerImplementation(final Configuration conf) { return (Class<? extends FServer>) conf.getClass(FConstants.FSERVER_IMPL, FServer.class); }
From source file:com.alibaba.wasp.LocalWaspCluster.java
License:Apache License
@SuppressWarnings("unchecked") private static Class<? extends FMaster> getMasterImplementation(final Configuration conf) { return (Class<? extends FMaster>) conf.getClass(FConstants.MASTER_IMPL, FMaster.class); }