com.hn.cluster.hadoop.mrs.SecondarySort.java Source code

Java tutorial

Introduction

Here is the source code for com.hn.cluster.hadoop.mrs.SecondarySort.java

Source

/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.hn.cluster.hadoop.mrs;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.LineRecordReader;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;

/**
 * This is an example Hadoop Map/Reduce application. It reads the text input
 * files that must contain two integers per a line. The output is sorted by the
 * first and second number and grouped on the first number.
 * 
 * 
 * key?MapRduce??MapMerge
 * 
 * MR? WritableComparableRawComparator?
 * writableComparable?????????
 * RawComparator????????????
 * ?compareToKey?Hadoop????
 * HadoopcompareTokey????mapreduce
 * mapmapreducereduce?compareTo????
 * ??
 * 
 * To run: bin/hadoop jar build/hadoop-examples.jar secondarysort <i>in-dir</i>
 * <i>out-dir</i>
 */
public class SecondarySort {

    /**
     * Define a pair of integers that are writable. They are serialized in a
     * byte comparable format.
     */
    public static class IntPair implements WritableComparable<IntPair> {

        private int first = 0;
        private int second = 0;

        /**
         * Set the left and right values.
         */
        public void set(int left, int right) {
            first = left;
            second = right;
        }

        public int getFirst() {
            return first;
        }

        public int getSecond() {
            return second;
        }

        /**
         * Read the two integers. Encoded as: MIN_VALUE -> 0, 0 -> -MIN_VALUE,
         * MAX_VALUE-> -1
         */
        @Override
        public void readFields(DataInput in) throws IOException {
            first = in.readInt() + Integer.MIN_VALUE;
            second = in.readInt() + Integer.MIN_VALUE;
        }

        @Override
        public void write(DataOutput out) throws IOException {
            out.writeInt(first - Integer.MIN_VALUE);
            out.writeInt(second - Integer.MIN_VALUE);
        }

        @Override
        public int hashCode() {
            return first * 157 + second;
        }

        @Override
        public boolean equals(Object right) {
            if (right instanceof IntPair) {
                IntPair r = (IntPair) right;
                return r.first == first && r.second == second;
            } else {
                return false;
            }
        }

        /** A Comparator that compares serialized IntPair. */
        public static class Comparator extends WritableComparator {
            public Comparator() {
                super(IntPair.class);
            }

            public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
                return compareBytes(b1, s1, l1, b2, s2, l2);
            }
        }

        static { // register this comparator
            WritableComparator.define(IntPair.class, new Comparator());
        }

        @Override
        public int compareTo(IntPair o) {
            if (first != o.first) {
                return first < o.first ? -1 : 1;
            } else if (second != o.second) {
                return second < o.second ? -1 : 1;
            } else {
                return 0;
            }
        }
    }

    /**
     * Partition based on the first part of the pair.
     * map?key???reduce????
     * HadoopHash Partitioner
     * ??Partition??
     * ?123? ??;1 ?;2 ;3 ;4 ;1 ?;3 ;11
     * ??Partition
     * ??3reduce?3
     * partition????hash??
     */
    public static class FirstPartitioner extends Partitioner<IntPair, IntWritable> {
        /**
         * getPartition()0Reducerint?<key,value>?Reducer
         */
        @Override
        public int getPartition(IntPair key, IntWritable value, int numPartitions) {
            return Math.abs(key.getFirst() * 127) % numPartitions;
        }
    }

    /**
     * Compare only the first part of the pair, so that reduce is called once
     * for each value of the first part.
     */
    public static class FirstGroupingComparator implements RawComparator<IntPair> {
        @Override
        public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
            return WritableComparator.compareBytes(b1, s1, Integer.SIZE / 8, b2, s2, Integer.SIZE / 8);
        }

        @Override
        public int compare(IntPair o1, IntPair o2) {
            int l = o1.getFirst();
            int r = o2.getFirst();
            return l == r ? 0 : (l < r ? -1 : 1);
        }
    }

    /**
     * Read two integers from each line and generate a key, value pair as
     * ((left, right), right).
     */
    public static class MapClass extends Mapper<LongWritable, Text, IntPair, IntWritable> {

        private final IntPair key = new IntPair();
        private final IntWritable value = new IntWritable();

        /**
         * map a#1 1 z#3 3 b#2 2 a#100 100 a#3 3 b#1 1
         */
        @Override
        public void map(LongWritable inKey, Text inValue, Context context)
                throws IOException, InterruptedException {
            StringTokenizer itr = new StringTokenizer(inValue.toString());
            int left = 0;
            int right = 0;
            if (itr.hasMoreTokens()) {
                left = Integer.parseInt(itr.nextToken());
                if (itr.hasMoreTokens()) {
                    right = Integer.parseInt(itr.nextToken());
                }
                key.set(left, right);
                value.set(right);
                context.write(key, value);
            }
        }
    }

    public static class Reduce extends Reducer<IntPair, IntWritable, Text, IntWritable> {
        private static final Text SEPARATOR = new Text("------------------------------------------------");
        private final Text first = new Text();

        @Override
        public void reduce(IntPair key, Iterable<IntWritable> values, Context context)
                throws IOException, InterruptedException {
            context.write(SEPARATOR, null);
            first.set(Integer.toString(key.getFirst()));
            for (IntWritable value : values) {
                context.write(first, value);
            }
        }
    }

    public static void main(String[] args) throws Exception {
        // ?hadoop?
        Configuration conf = new Configuration();
        // ?
        Job job = new Job(conf, "secondary sort");
        job.setJarByClass(SecondarySort.class);
        // Mapper
        job.setMapperClass(MapClass.class);
        // Reducer
        job.setReducerClass(Reduce.class);

        // 
        job.setPartitionerClass(FirstPartitioner.class);
        // 
        job.setGroupingComparatorClass(FirstGroupingComparator.class);

        // map Key
        job.setMapOutputKeyClass(IntPair.class);
        // mapValue
        job.setMapOutputValueClass(IntWritable.class);

        // rduceKeyTextOutputFormatClassTextOutputFormat
        job.setOutputKeyClass(Text.class);
        // rduceValue
        job.setOutputValueClass(IntWritable.class);

        /**
         * ?????splites???RecordReder
         * ??RecordReder?keyvalue
         * Map<LongWritable, Text>
         * Mapmap<LongWritable, Text>Mapmap
         * ?List<IntPair, IntWritable>
         * map?job.setPartitionerClassList?reducer
         */
        job.setInputFormatClass(TextInputFormat.class);
        // ??RecordWriter?
        job.setOutputFormatClass(TextOutputFormat.class);

        // hdfs
        FileInputFormat.addInputPath(job, new Path("hdfs://192.1168.1.12:9000/input/input/soso.txt"));
        // hdfs
        FileOutputFormat.setOutputPath(job, new Path("hdfs://192.1168.1.12:9000/output/sort/"));
        // ??job
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

}