Example usage for org.apache.hadoop.conf Configuration getClass

List of usage examples for org.apache.hadoop.conf Configuration getClass

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getClass.

Prototype

public <U> Class<? extends U> getClass(String name, Class<? extends U> defaultValue, Class<U> xface) 

Source Link

Document

Get the value of the name property as a Class implementing the interface specified by xface.

Usage

From source file:org.apache.giraph.graph.BspUtils.java

License:Apache License

/**
 * Get the user's subclassed VertexResolver.
 *
 *
 * @param <I> Vertex id/* w  ww  .j av  a  2  s  .c  om*/
 * @param <V> Vertex data
 * @param <E> Edge data
 * @param <M> Message data
 * @param conf Configuration to check
 * @return User's vertex resolver class
 */
@SuppressWarnings({ "unchecked", "rawtypes" })
public static <I extends WritableComparable, V extends Writable, E extends Writable, M extends Writable> Class<? extends VertexResolver<I, V, E, M>> getVertexResolverClass(
        Configuration conf) {
    return (Class<? extends VertexResolver<I, V, E, M>>) conf.getClass(GiraphJob.VERTEX_RESOLVER_CLASS,
            VertexResolver.class, VertexResolver.class);
}

From source file:org.apache.giraph.graph.BspUtils.java

License:Apache License

/**
 * Get the user's subclassed WorkerContext.
 *
 * @param conf Configuration to check/*from   www .  j a v  a  2 s  .  com*/
 * @return User's worker context class
 */
public static Class<? extends WorkerContext> getWorkerContextClass(Configuration conf) {
    return (Class<? extends WorkerContext>) conf.getClass(GiraphJob.WORKER_CONTEXT_CLASS,
            DefaultWorkerContext.class, WorkerContext.class);
}

From source file:org.apache.giraph.graph.BspUtils.java

License:Apache License

/**
 * Get the user's subclassed {@link MasterCompute}
 *
 * @param conf Configuration to check//from  w  ww  .  j  a v a 2s.c om
 * @return User's master class
 */
public static Class<? extends MasterCompute> getMasterComputeClass(Configuration conf) {
    return (Class<? extends MasterCompute>) conf.getClass(GiraphJob.MASTER_COMPUTE_CLASS,
            DefaultMasterCompute.class, MasterCompute.class);
}

From source file:org.apache.giraph.graph.BspUtils.java

License:Apache License

/**
 * Get the user's subclassed {@link BasicVertex}
 *
 * @param <I> Vertex id//from   www  .j  a va2 s . co m
 * @param <V> Vertex data
 * @param <E> Edge data
 * @param <M> Message data
 * @param conf Configuration to check
 * @return User's vertex class
 */
@SuppressWarnings({ "rawtypes", "unchecked" })
public static <I extends WritableComparable, V extends Writable, E extends Writable, M extends Writable> Class<? extends BasicVertex<I, V, E, M>> getVertexClass(
        Configuration conf) {
    return (Class<? extends BasicVertex<I, V, E, M>>) conf.getClass(GiraphJob.VERTEX_CLASS, null,
            BasicVertex.class);
}

From source file:org.apache.hadoop.examples.dancing.DistributedPentomino.java

License:Apache License

public int run(String[] args) throws Exception {
    Configuration conf = getConf();
    if (args.length == 0) {
        System.out.println("Usage: pentomino <output> [-depth #] [-height #] [-width #]");
        ToolRunner.printGenericCommandUsage(System.out);
        return 2;
    }//w w w.jav  a 2 s  .  c om
    // check for passed parameters, otherwise use defaults
    int width = conf.getInt(Pentomino.WIDTH, PENT_WIDTH);
    int height = conf.getInt(Pentomino.HEIGHT, PENT_HEIGHT);
    int depth = conf.getInt(Pentomino.DEPTH, PENT_DEPTH);
    for (int i = 0; i < args.length; i++) {
        if (args[i].equalsIgnoreCase("-depth")) {
            depth = Integer.parseInt(args[++i].trim());
        } else if (args[i].equalsIgnoreCase("-height")) {
            height = Integer.parseInt(args[++i].trim());
        } else if (args[i].equalsIgnoreCase("-width")) {
            width = Integer.parseInt(args[++i].trim());
        }
    }
    // now set the values within conf for M/R tasks to read, this
    // will ensure values are set preventing MAPREDUCE-4678
    conf.setInt(Pentomino.WIDTH, width);
    conf.setInt(Pentomino.HEIGHT, height);
    conf.setInt(Pentomino.DEPTH, depth);
    Class<? extends Pentomino> pentClass = conf.getClass(Pentomino.CLASS, OneSidedPentomino.class,
            Pentomino.class);
    int numMaps = conf.getInt(MRJobConfig.NUM_MAPS, DEFAULT_MAPS);
    Path output = new Path(args[0]);
    Path input = new Path(output + "_input");
    FileSystem fileSys = FileSystem.get(conf);
    try {
        Job job = Job.getInstance(conf);
        FileInputFormat.setInputPaths(job, input);
        FileOutputFormat.setOutputPath(job, output);
        job.setJarByClass(PentMap.class);

        job.setJobName("dancingElephant");
        Pentomino pent = ReflectionUtils.newInstance(pentClass, conf);
        pent.initialize(width, height);
        long inputSize = createInputDirectory(fileSys, input, pent, depth);
        // for forcing the number of maps
        FileInputFormat.setMaxInputSplitSize(job, (inputSize / numMaps));

        // the keys are the prefix strings
        job.setOutputKeyClass(Text.class);
        // the values are puzzle solutions
        job.setOutputValueClass(Text.class);

        job.setMapperClass(PentMap.class);
        job.setReducerClass(Reducer.class);

        job.setNumReduceTasks(1);

        return (job.waitForCompletion(true) ? 0 : 1);
    } finally {
        fileSys.delete(input, true);
    }
}

From source file:org.apache.hama.bsp.TestTaskAllocation.java

License:Apache License

public void testBestEffortDataLocality() throws Exception {

    Configuration conf = new Configuration();

    String[] locations = new String[] { "host6", "host4", "host3" };
    String value = "data";
    RawSplit split = new RawSplit();
    split.setLocations(locations);/*w ww .j a  va  2  s . c o m*/
    split.setBytes(value.getBytes(), 0, value.getBytes().length);
    split.setDataLength(value.getBytes().length);

    assertEquals(value.getBytes().length, (int) split.getDataLength());

    Map<GroomServerStatus, Integer> taskCountInGroomMap = new HashMap<GroomServerStatus, Integer>(20);
    BSPResource[] resources = new BSPResource[0];
    BSPJob job = new BSPJob(new BSPJobID("checkpttest", 1), "/tmp");
    JobInProgress jobProgress = new JobInProgress(job.getJobID(), conf);
    TaskInProgress taskInProgress = new TaskInProgress(job.getJobID(), "job.xml", split, conf, jobProgress, 1);

    Map<String, GroomServerStatus> groomStatuses = new HashMap<String, GroomServerStatus>(20);

    for (int i = 0; i < 10; ++i) {

        String name = "host" + i;
        GroomServerStatus status = new GroomServerStatus(name, new ArrayList<TaskStatus>(), 0, 3);
        groomStatuses.put(name, status);
        taskCountInGroomMap.put(status, 0);

    }

    TaskAllocationStrategy strategy = ReflectionUtils.newInstance(
            conf.getClass("", BestEffortDataLocalTaskAllocator.class, TaskAllocationStrategy.class), conf);

    String[] hosts = strategy.selectGrooms(groomStatuses, taskCountInGroomMap, resources, taskInProgress);

    List<String> list = new ArrayList<String>();

    for (int i = 0; i < hosts.length; ++i) {
        list.add(hosts[i]);
    }

    assertTrue(list.contains("host6"));
    assertTrue(list.contains("host3"));
    assertTrue(list.contains("host4"));

}

From source file:org.apache.hama.graph.GraphJobRunner.java

License:Apache License

@SuppressWarnings("unchecked")
public static <V extends WritableComparable<? super V>, E extends Writable, M extends Writable> void initClasses(
        Configuration conf) {
    Class<V> vertexIdClass = (Class<V>) conf.getClass(GraphJob.VERTEX_ID_CLASS_ATTR, Text.class,
            Writable.class);
    Class<M> vertexValueClass = (Class<M>) conf.getClass(GraphJob.VERTEX_VALUE_CLASS_ATTR, IntWritable.class,
            Writable.class);
    Class<E> edgeValueClass = (Class<E>) conf.getClass(GraphJob.VERTEX_EDGE_VALUE_CLASS_ATTR, IntWritable.class,
            Writable.class);
    vertexClass = (Class<Vertex<?, ?, ?>>) conf.getClass("hama.graph.vertex.class", Vertex.class);

    // set the classes statically, so we can save memory per message
    VERTEX_ID_CLASS = vertexIdClass;/* w  w w.ja  va2s . c  om*/
    VERTEX_VALUE_CLASS = vertexValueClass;
    VERTEX_CLASS = vertexClass;
    EDGE_VALUE_CLASS = edgeValueClass;
}

From source file:org.apache.hama.pipes.Submitter.java

License:Apache License

/**
 * Get the user's original partitioner./*from w ww  .j a  v a 2  s . co  m*/
 * 
 * @param conf the configuration to look in
 * @return the class that the user submitted
 */
@SuppressWarnings("rawtypes")
static Class<? extends Partitioner> getJavaPartitioner(Configuration conf) {
    return conf.getClass("hama.pipes.partitioner", HashPartitioner.class, Partitioner.class);
}

From source file:org.apache.hoya.providers.HoyaProviderFactory.java

License:Apache License

/**
 * Create a provider for a specific application
 * @param application app/*from  w ww.ja  va 2 s  .c  o m*/
 * @return app instance
 * @throws HoyaException on any instantiation problem
 */
public static HoyaProviderFactory createHoyaProviderFactory(String application) throws HoyaException {
    Configuration conf = loadHoyaConfiguration();
    if (application == null) {
        application = DEFAULT_CLUSTER_TYPE;
    }
    String providerKey = String.format(HoyaXmlConfKeys.KEY_PROVIDER, application);
    if (application.contains(".")) {
        log.debug("Treating {} as a classname", application);
        String name = "classname.key";
        conf.set(name, application);
        providerKey = name;
    }

    Class<? extends HoyaProviderFactory> providerClass;
    try {
        providerClass = conf.getClass(providerKey, null, HoyaProviderFactory.class);
    } catch (RuntimeException e) {
        throw new BadClusterStateException(e, "Failed to load provider %s: %s", application, e);
    }
    if (providerClass == null) {
        throw new BadClusterStateException(PROVIDER_NOT_FOUND, application);
    }

    Exception ex;
    try {
        HoyaProviderFactory providerFactory = providerClass.newInstance();
        providerFactory.setConf(conf);
        return providerFactory;
    } catch (InstantiationException e) {
        ex = e;
    } catch (IllegalAccessException e) {
        ex = e;
    } catch (Exception e) {
        ex = e;
    }
    //by here the operation failed and ex is set to the value 
    throw new BadClusterStateException(ex, "Failed to create an instance of %s : %s", providerClass, ex);
}

From source file:org.apache.lens.cube.parse.StorageTableResolver.java

License:Apache License

StorageTableResolver(Configuration conf) {
    this.conf = conf;
    this.supportedStorages = getSupportedStorages(conf);
    this.allStoragesSupported = (supportedStorages == null);
    this.failOnPartialData = conf.getBoolean(CubeQueryConfUtil.FAIL_QUERY_ON_PARTIAL_DATA, false);
    String str = conf.get(CubeQueryConfUtil.VALID_STORAGE_DIM_TABLES);
    validDimTables = StringUtils.isBlank(str) ? null : Arrays.asList(StringUtils.split(str.toLowerCase(), ","));
    this.processTimePartCol = conf.get(CubeQueryConfUtil.PROCESS_TIME_PART_COL);
    String maxIntervalStr = conf.get(CubeQueryConfUtil.QUERY_MAX_INTERVAL);
    if (maxIntervalStr != null) {
        this.maxInterval = UpdatePeriod.valueOf(maxIntervalStr);
    } else {//  w  w w  .ja  v a  2  s .  c  o  m
        this.maxInterval = null;
    }
    rangeWriter = ReflectionUtils.newInstance(conf.getClass(CubeQueryConfUtil.TIME_RANGE_WRITER_CLASS,
            CubeQueryConfUtil.DEFAULT_TIME_RANGE_WRITER, TimeRangeWriter.class), this.conf);
    String formatStr = conf.get(CubeQueryConfUtil.PART_WHERE_CLAUSE_DATE_FORMAT);
    if (formatStr != null) {
        partWhereClauseFormat = new SimpleDateFormat(formatStr);
    }
    this.phase = PHASE.first();
    completenessThreshold = conf.getFloat(CubeQueryConfUtil.COMPLETENESS_THRESHOLD,
            CubeQueryConfUtil.DEFAULT_COMPLETENESS_THRESHOLD);
    completenessPartCol = conf.get(CubeQueryConfUtil.COMPLETENESS_CHECK_PART_COL);
}