Example usage for org.apache.hadoop.conf Configuration getClass

List of usage examples for org.apache.hadoop.conf Configuration getClass

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getClass.

Prototype

public Class<?> getClass(String name, Class<?> defaultValue) 

Source Link

Document

Get the value of the name property as a Class.

Usage

From source file:org.apache.gora.mapreduce.GoraOutputFormat.java

License:Apache License

@Override
@SuppressWarnings("unchecked")
public RecordWriter<K, T> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    Class<? extends DataStore<K, T>> dataStoreClass = (Class<? extends DataStore<K, T>>) conf
            .getClass(DATA_STORE_CLASS, null);
    Class<K> keyClass = (Class<K>) conf.getClass(OUTPUT_KEY_CLASS, null);
    Class<T> rowClass = (Class<T>) conf.getClass(OUTPUT_VALUE_CLASS, null);
    final DataStore<K, T> store = DataStoreFactory.createDataStore(dataStoreClass, keyClass, rowClass,
            context.getConfiguration());

    setOutputPath(store, context);// w w w  . ja  v a2s.c o m

    return new GoraRecordWriter(store, context);
}

From source file:org.apache.hama.bsp.message.queue.SpillingQueue.java

License:Apache License

@SuppressWarnings("unchecked")
@Override/* w  w  w .  j a va 2 s  .  c o  m*/
public void init(Configuration conf, TaskAttemptID arg1) {

    bufferCount = conf.getInt(SPILLBUFFER_COUNT, 3);
    bufferSize = conf.getInt(SPILLBUFFER_SIZE, Constants.BUFFER_DEFAULT_SIZE);
    direct = conf.getBoolean(SPILLBUFFER_DIRECT, true);
    threshold = conf.getInt(SPILLBUFFER_THRESHOLD, Constants.BUFFER_DEFAULT_SIZE);
    fileName = conf.get(SPILLBUFFER_FILENAME, System.getProperty("java.io.tmpdir") + File.separatorChar
            + new BigInteger(128, new SecureRandom()).toString(32));

    messageClass = (Class<M>) conf.getClass(Constants.MESSAGE_CLASS, null);
    objectWritableMode = messageClass == null;

    SpilledDataProcessor processor;
    try {
        processor = new CombineSpilledDataProcessor<M>(fileName);
        processor.init(conf);
    } catch (FileNotFoundException e) {
        LOG.error("Error initializing spilled data stream.", e);
        throw new RuntimeException(e);
    }
    spillOutputBuffer = new SpillingDataOutputBuffer(bufferCount, bufferSize, threshold, direct, processor);
    objectWritable = new ObjectWritable();
    objectWritable.setConf(conf);
    this.conf = conf;
}

From source file:org.apache.hama.computemodel.mapreduce.Mapper.java

License:Apache License

@Override
protected void compute(
        BSPPeer<K1, V1, K2, V2, WritableKeyValues<? extends WritableComparable<?>, ? extends Writable>> peer)
        throws IOException {

    this.memoryQueue = new PriorityQueue<WritableKeyValues<K2, V2>>();
    this.globalKeyDistribution = new long[peer.getNumPeers()][peer.getNumPeers()];

    int myId = peer.getPeerId();
    OutputCollector<K2, V2> collector = new BSPMapperOutputCollector<K1, V1, K2, V2>(peer, memoryQueue,
            globalKeyDistribution[myId]);

    KeyValuePair<K1, V1> record = null;
    while ((record = peer.readNext()) != null) {
        map(record.getKey(), record.getValue(), collector);
    }//from   w w  w  .  j a  va 2 s  .c  om

    Comparator<V2> valComparator = null;
    Configuration conf = peer.getConfiguration();

    Class<?> comparatorClass = conf.getClass(VALUE_COMPARATOR_CLASS, null);

    if (comparatorClass != null) {
        valComparator = (Comparator<V2>) ReflectionUtils.newInstance(comparatorClass, conf);
    }

    Reducer<K2, V2, K2, V2> combiner = null;
    Class<?> combinerClass = conf.getClass(COMBINER_CLASS, null);

    if (combinerClass != null) {
        combiner = (Reducer<K2, V2, K2, V2>) ReflectionUtils.newInstance(combinerClass, conf);
    }

    ExecutorService service = Executors.newFixedThreadPool(1);
    Future<Integer> future = service.submit(new CombineAndSortThread<K2, V2>(peer.getConfiguration(),
            this.memoryQueue, valComparator, combiner));

    String[] peers = peer.getAllPeerNames();

    IntWritable keyPartition = new IntWritable();
    LongWritable value = new LongWritable();

    WritableKeyValues<IntWritable, IntWritable> myIdTuple = new WritableKeyValues<IntWritable, IntWritable>(
            new IntWritable(peer.getPeerId()), new IntWritable(-1));

    int peerId = peer.getPeerId();
    for (int keyNumber = 0; keyNumber < globalKeyDistribution[0].length; ++keyNumber) {
        keyPartition.set(keyNumber);
        value.set(globalKeyDistribution[peerId][keyNumber]);
        myIdTuple.setValue(keyPartition);
        for (String peerName : peers) {
            peer.send(peerName,
                    new WritableKeyValues<WritableKeyValues<IntWritable, IntWritable>, LongWritable>(myIdTuple,
                            value));
        }
    }
    peer.save(KEY_DIST, this.globalKeyDistribution);
    peer.save(COMBINER_FUTURE, future);
    peer.save(MESSAGE_QUEUE, this.memoryQueue);
}

From source file:org.apache.hama.computemodel.mapreduce.Reducer.java

License:Apache License

@Override
protected void compute(BSPPeer<K2, V2, K3, V3, WritableKeyValues<K2, V2>> peer) throws IOException {

    this.memoryQueue = (PriorityQueue<WritableKeyValues<K2, V2>>) peer.getSavedObject(Mapper.MESSAGE_QUEUE);

    Configuration conf = peer.getConfiguration();

    WritableKeyValues<K2, V2> message;
    while ((message = peer.getCurrentMessage()) != null) {
        this.memoryQueue.add(message);
    }/*w ww  .j ava 2  s .  com*/

    CombinerOutputCollector<K3, V3> outputCollector = new CombinerOutputCollector<K3, V3>();
    Comparator<V2> valComparator = null;
    Class<?> comparatorClass = conf.getClass(Mapper.VALUE_COMPARATOR_CLASS, null);
    if (comparatorClass != null) {
        valComparator = (Comparator<V2>) ReflectionUtils.newInstance(comparatorClass, conf);
    }

    WritableKeyValues<K2, V2> previousRecord = null;

    List<WritableKeyValues<K2, V2>> list = new ArrayList<WritableKeyValues<K2, V2>>(memoryQueue.size());

    while (!memoryQueue.isEmpty()) {

        WritableKeyValues<K2, V2> record = memoryQueue.poll();
        K2 key = record.getKey();
        if (previousRecord != null && key.equals(previousRecord.getKey())) {
            previousRecord.addValues(record.getValues());
        } else {
            if (previousRecord != null) {
                previousRecord.sortValues(valComparator);
                list.add(previousRecord);
            }
            previousRecord = record;
        }
    }

    Iterator<WritableKeyValues<K2, V2>> recordIter = list.iterator();
    while (recordIter.hasNext()) {
        WritableKeyValues<K2, V2> record = recordIter.next();
        Iterator<V2> valIterator = record.getValues().iterator();
        reduce(record.getKey(), valIterator, outputCollector);
    }

    LOG.debug("In reduder " + outputCollector.getCollectedRecords());

    Iterator<WritableKeyValues<K3, V3>> outputIter = outputCollector.getCollectedRecords().iterator();
    while (outputIter.hasNext()) {
        WritableKeyValues<K3, V3> output = outputIter.next();
        peer.write(output.getKey(), output.getValue());
    }

}

From source file:org.apache.hama.computemodel.mapreduce.ShuffleAndDistribute.java

License:Apache License

protected void designateKeysToReducers(int[] keyDistribution, final long[][] globalKeyDistribution,
        Configuration conf) {
    Class<?> designatorClass = conf.getClass("", null);
    ReducerKeyDesignator designator = null;
    if (designatorClass == null) {
        designator = ReducerKeyDesignator.getReduceDesignator(DesignateStrategy.MINIMIZE_COMMUNICATION,
                KeyDistribution.CONTIGUOUS);
    } else {// w  w w.ja va  2 s .  c o m
        designator = (ReducerKeyDesignator) (ReflectionUtils.newInstance(designatorClass, conf));
    }

    designator.designateKeysToReducers(keyDistribution, globalKeyDistribution, conf);

}

From source file:org.apache.hama.computemodel.mapreduce.ShuffleAndDistribute.java

License:Apache License

@Override
protected void compute(
        BSPPeer<NullWritable, NullWritable, K2, V2, WritableKeyValues<? extends WritableComparable<?>, ? extends Writable>> peer)
        throws IOException {
    int peerId = peer.getPeerId();
    Configuration conf = peer.getConfiguration();

    this.memoryQueue = (PriorityQueue<WritableKeyValues<K2, V2>>) peer.getSavedObject(Mapper.MESSAGE_QUEUE);

    this.globalKeyDistribution = (long[][]) peer.getSavedObject(Mapper.KEY_DIST);

    WritableKeyValues<WritableKeyValues<IntWritable, IntWritable>, LongWritable> message;
    while ((message = (WritableKeyValues<WritableKeyValues<IntWritable, IntWritable>, LongWritable>) peer
            .getCurrentMessage()) != null) {
        int peerNo = message.getKey().getKey().get();
        int partition = message.getKey().getValue().get();
        globalKeyDistribution[peerNo][partition] += message.getValue().get();
    }/*from   w  w w .  j  ava 2 s  .  c om*/

    int[] keyDistribution = new int[globalKeyDistribution[0].length];

    designateKeysToReducers(keyDistribution, globalKeyDistribution, conf);

    int myKeyCount = 0;
    for (int i = 0; i < globalKeyDistribution[0].length; ++i) {
        myKeyCount += globalKeyDistribution[peerId][i];
    }

    PriorityQueue<WritableKeyValues<K2, V2>> mergeQueue = new PriorityQueue<WritableKeyValues<K2, V2>>(
            myKeyCount);
    Partitioner<K2, V2> partitioner = (Partitioner<K2, V2>) ReflectionUtils
            .newInstance(conf.getClass(Mapper.PARTITIONER_CLASS, HashPartitioner.class), conf);

    Iterator<WritableKeyValues<K2, V2>> keyValIter = this.memoryQueue.iterator();
    String[] peerNames = peer.getAllPeerNames();
    while (keyValIter.hasNext()) {
        WritableKeyValues<K2, V2> record = keyValIter.next();
        int partition = partitioner.getPartition(record.getKey(), record.getValue(), peer.getNumPeers()); // should be num reducers
                                                                                                          // eventually
        int destPeerId = keyDistribution[partition];
        if (peerId != destPeerId) {
            peer.send(peerNames[destPeerId], record);
            keyValIter.remove();
        }
    }

}

From source file:org.apache.hama.graph.VertexInputReader.java

License:Apache License

@SuppressWarnings("unchecked")
@Override/*from   ww  w.ja  v  a  2 s.c om*/
public KeyValuePair<Writable, Writable> convertRecord(KeyValuePair<Writable, Writable> inputRecord,
        Configuration conf) throws IOException {
    Class<Vertex<V, E, M>> vertexClass = (Class<Vertex<V, E, M>>) conf.getClass(GraphJob.VERTEX_CLASS_ATTR,
            Vertex.class);
    boolean vertexCreation;
    Vertex<V, E, M> vertex = GraphJobRunner.<V, E, M>newVertexInstance(vertexClass);
    try {
        vertexCreation = parseVertex((KEYIN) inputRecord.getKey(), (VALUEIN) inputRecord.getValue(), vertex);
    } catch (Exception e) {
        vertexCreation = false;
    }
    if (!vertexCreation) {
        throw new IOException("Error parsing vertex. Please check your vertex input reader.");
    }

    outputRecord.setKey(vertex.getVertexID());
    outputRecord.setValue(vertex);
    return outputRecord;
}

From source file:org.apache.hama.ml.recommendation.cf.OnlineTrainBSP.java

License:Apache License

@Override
public void setup(BSPPeer<Text, VectorWritable, Text, VectorWritable, MapWritable> peer)
        throws IOException, SyncException, InterruptedException {

    Configuration conf = peer.getConfiguration();

    ITERATION = conf.getInt(OnlineCF.Settings.CONF_ITERATION_COUNT, OnlineCF.Settings.DFLT_ITERATION_COUNT);
    MATRIX_RANK = conf.getInt(OnlineCF.Settings.CONF_MATRIX_RANK, OnlineCF.Settings.DFLT_MATRIX_RANK);
    SKIP_COUNT = conf.getInt(OnlineCF.Settings.CONF_SKIP_COUNT, OnlineCF.Settings.DFLT_SKIP_COUNT);

    inputItemDelim = conf.get(OnlineCF.Settings.CONF_INPUT_ITEM_DELIM, OnlineCF.Settings.DFLT_ITEM_DELIM);
    inputUserDelim = conf.get(OnlineCF.Settings.CONF_INPUT_USER_DELIM, OnlineCF.Settings.DFLT_USER_DELIM);
    inputPreferenceDelim = conf.get(OnlineCF.Settings.CONF_INPUT_PREFERENCES_DELIM,
            OnlineCF.Settings.DFLT_PREFERENCE_DELIM);

    Class<?> cls = conf.getClass(OnlineCF.Settings.CONF_ONLINE_UPDATE_FUNCTION, null);
    try {/*  w ww. ja va 2  s.  co m*/
        function = (OnlineUpdate.Function) (cls.newInstance());
    } catch (Exception e) {
        // set default function
    }
}

From source file:org.apache.hoya.yarn.appmaster.rpc.RpcBinder.java

License:Apache License

/**
 * Verify that the conf is set up for protobuf transport of Hoya RPC
 * @param conf configuration//w ww  .ja v  a  2s .c  o m
 * @param hoyaClusterAPIClass class for the API
 * @return
 */
public static boolean verifyBondedToProtobuf(Configuration conf,
        Class<HoyaClusterProtocolPB> hoyaClusterAPIClass) {
    return conf.getClass("rpc.engine." + hoyaClusterAPIClass.getName(), RpcEngine.class)
            .equals(ProtobufRpcEngine.class);
}

From source file:org.apache.oozie.action.hadoop.DistcpMain.java

License:Apache License

@Override
protected void run(String[] args) throws Exception {

    Configuration actionConf = loadActionConf();
    LauncherMainHadoopUtils.killChildYarnJobs(actionConf);
    Class<?> klass = actionConf.getClass(LauncherMapper.CONF_OOZIE_ACTION_MAIN_CLASS,
            org.apache.hadoop.tools.DistCp.class);
    System.out.println("Main class        : " + klass.getName());
    System.out.println("Arguments         :");
    for (String arg : args) {
        System.out.println("                    " + arg);
    }/*  w  w w .java  2 s  .  c  o m*/

    // propagate delegation related props from launcher job to MR job
    if (getFilePathFromEnv("HADOOP_TOKEN_FILE_LOCATION") != null) {
        actionConf.set("mapreduce.job.credentials.binary", getFilePathFromEnv("HADOOP_TOKEN_FILE_LOCATION"));
    }

    getConstructorAndArgs(klass, actionConf);
    if (construct == null) {
        throw new RuntimeException("Distcp constructor was not found, unable to instantiate");
    }
    if (constArgs == null) {
        throw new RuntimeException("Arguments for distcp constructor is null, unable to instantiate");
    }
    try {
        Tool distcp = (Tool) construct.newInstance(constArgs);
        int i = ToolRunner.run(distcp, args);
        if (i != 0) {
            throw new RuntimeException("Returned value from distcp is non-zero (" + i + ")");
        }
    } catch (InvocationTargetException ex) {
        throw new JavaMainException(ex.getCause());
    }
}