edu.umd.shrawanraina.BuildInvertedIndexHBaseCompressed.java Source code

Java tutorial

Introduction

Here is the source code for edu.umd.shrawanraina.BuildInvertedIndexHBaseCompressed.java

Source

/*
 * Cloud9: A Hadoop toolkit for working with big data
 *
 * Licensed under the Apache License, Version 2.0 (the "License"); you
 * may not use this file except in compliance with the License. You may
 * obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied. See the License for the specific language governing
 * permissions and limitations under the License.
 */

package edu.umd.shrawanraina;

import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;

import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.VIntWritable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;

import tl.lin.data.fd.Object2IntFrequencyDistribution;
import tl.lin.data.fd.Object2IntFrequencyDistributionEntry;
import tl.lin.data.pair.PairOfInts;
import tl.lin.data.pair.PairOfObjectInt;
import tl.lin.data.pair.PairOfStringInt;

public class BuildInvertedIndexHBaseCompressed extends Configured implements Tool {
    private static final Logger LOG = Logger.getLogger(BuildInvertedIndexHBaseCompressed.class);
    public static final String[] FAMILIES = { "p" };
    public static final byte[] CF = FAMILIES[0].getBytes();
    public static final byte[] COUNT = "count".getBytes();

    private static class MyMapper extends Mapper<LongWritable, Text, PairOfStringInt, VIntWritable> {
        private static final Text WORD = new Text();
        private static final Object2IntFrequencyDistribution<String> COUNTS = new Object2IntFrequencyDistributionEntry<String>();
        private static final VIntWritable TF = new VIntWritable();

        @Override
        public void map(LongWritable docno, Text doc, Context context) throws IOException, InterruptedException {
            String text = doc.toString();
            COUNTS.clear();
            String[] terms = text.split("\\s+");
            // First build a histogram of the terms.
            for (String term : terms) {
                if (term == null || term.length() == 0) {
                    continue;
                }
                COUNTS.increment(term);
            }

            // Emit postings.
            for (PairOfObjectInt<String> e : COUNTS) {
                WORD.set(e.getLeftElement());
                TF.set(e.getRightElement());
                context.write(new PairOfStringInt(WORD.toString(), (int) docno.get()), TF);
                /*
                if(!dfMap.containsKey(e.getLeftElement()))
                   dfMap.put(e.getLeftElement(), 1);
                else{
                   dfMap.put(e.getLeftElement(), 1+dfMap.get(e.getLeftElement()));
                }
                */
            }
        }
    }

    private static class MyReducer extends Reducer<PairOfStringInt, VIntWritable, Text, BytesWritable> {
        private static String prevKey;
        private static int prevDocNo;
        private static int df;
        private static int tf;
        private static int dgap;
        private final static ByteArrayOutputStream postings = new ByteArrayOutputStream();
        private final static DataOutputStream stream = new DataOutputStream(postings);
        private final static Text KEY = new Text();

        @Override
        public void setup(Context context) {
            prevKey = null;
            prevDocNo = 0;
            df = 0;
            tf = 0;
            dgap = 0;
        }

        @Override
        public void reduce(PairOfStringInt key, Iterable<VIntWritable> values, Context context)
                throws IOException, InterruptedException {
            String term = key.getLeftElement();
            if (term.equals(prevKey) || prevKey == null) {
                prevKey = key.getLeftElement();
                //Iterate through the values
                Iterator<VIntWritable> iter = values.iterator();
                while (iter.hasNext()) {
                    df++;
                    tf = iter.next().get();
                    dgap = key.getRightElement() - prevDocNo;
                    WritableUtils.writeVInt(stream, dgap);
                    WritableUtils.writeVInt(stream, tf);
                    prevDocNo = key.getRightElement();
                }
            } else {
                KEY.set(prevKey);

                stream.flush();

                ByteArrayOutputStream dfpost = new ByteArrayOutputStream();
                DataOutputStream dfstream = new DataOutputStream(dfpost);
                WritableUtils.writeVInt(dfstream, df);
                dfstream.write(postings.toByteArray());
                dfstream.flush();
                context.write(KEY, new BytesWritable(dfpost.toByteArray()));

                prevDocNo = 0;
                df = 0;
                prevKey = key.getLeftElement();
                postings.reset();

                Iterator<VIntWritable> iter = values.iterator();
                while (iter.hasNext()) {
                    df++;
                    tf = iter.next().get();
                    dgap = key.getRightElement() - prevDocNo;

                    WritableUtils.writeVInt(stream, dgap);
                    WritableUtils.writeVInt(stream, tf);

                    prevDocNo = key.getRightElement();
                }
            }
        }
    }

    public static class MyTableReducer extends TableReducer<PairOfStringInt, VIntWritable, ImmutableBytesWritable> {
        private static String prevKey;
        private static int prevDocNo;
        private static int df;
        private static int tf;
        private static int dgap;
        private final static ByteArrayOutputStream postings = new ByteArrayOutputStream();
        private final static DataOutputStream stream = new DataOutputStream(postings);
        private final static Text KEY = new Text();

        public void setup(Context context) {
            prevKey = null;
            prevDocNo = 0;
            df = 0;
            tf = 0;
            dgap = 0;
        }

        public void reduce(PairOfStringInt key, Iterable<VIntWritable> values, Context context)
                throws IOException, InterruptedException {
            String term = key.getLeftElement();
            if (term.equals(prevKey) || prevKey == null) {
                prevKey = key.getLeftElement();
                //Iterate through the values
                Iterator<VIntWritable> iter = values.iterator();
                while (iter.hasNext()) {
                    df++;
                    tf = iter.next().get();
                    dgap = key.getRightElement() - prevDocNo;
                    WritableUtils.writeVInt(stream, dgap);
                    WritableUtils.writeVInt(stream, tf);
                    prevDocNo = key.getRightElement();
                    ;
                }
            } else {
                KEY.set(prevKey);

                stream.flush();

                ByteArrayOutputStream dfpost = new ByteArrayOutputStream();
                DataOutputStream dfstream = new DataOutputStream(dfpost);
                WritableUtils.writeVInt(dfstream, df);
                dfstream.write(postings.toByteArray());
                dfstream.flush();
                //context.write(KEY, new BytesWritable(dfpost.toByteArray()) );

                Put put = new Put(Bytes.toBytes(KEY.toString()));
                put.add(CF, COUNT, dfpost.toByteArray());

                context.write(null, put);

                prevDocNo = 0;
                df = 0;
                prevKey = key.getLeftElement();
                postings.reset();

                Iterator<VIntWritable> iter = values.iterator();
                while (iter.hasNext()) {
                    df++;
                    tf = iter.next().get();
                    dgap = key.getRightElement() - prevDocNo;

                    WritableUtils.writeVInt(stream, dgap);
                    WritableUtils.writeVInt(stream, tf);

                    prevDocNo = key.getRightElement();
                }
            }
        }

        public void cleanup(Context context) throws IOException, InterruptedException {
            KEY.set(prevKey);

            stream.flush();

            ByteArrayOutputStream dfpost = new ByteArrayOutputStream();
            DataOutputStream dfstream = new DataOutputStream(dfpost);
            WritableUtils.writeVInt(dfstream, df);
            dfstream.write(postings.toByteArray());
            dfstream.flush();
            //context.write(KEY, new BytesWritable(dfpost.toByteArray()));

            Put put = new Put(Bytes.toBytes(KEY.toString()));
            put.add(CF, COUNT, dfpost.toByteArray());

            context.write(null, put);

            postings.close();
            stream.close();
        }

    }

    private BuildInvertedIndexHBaseCompressed() {
    }

    private static final String INPUT = "input";
    private static final String OUTPUT = "output";
    private static final String NUM_REDUCERS = "numReducers";

    /**
     * Runs this tool.
     */
    @SuppressWarnings({ "static-access" })
    public int run(String[] args) throws Exception {
        Options options = new Options();
        options.addOption(OptionBuilder.withArgName("path").hasArg().withDescription("input path").create(INPUT));
        options.addOption(OptionBuilder.withArgName("path").hasArg().withDescription("output path").create(OUTPUT));
        options.addOption(OptionBuilder.withArgName("num").hasArg().withDescription("number of reducers")
                .create(NUM_REDUCERS));
        CommandLine cmdline;
        CommandLineParser parser = new GnuParser();

        try {
            cmdline = parser.parse(options, args);
        } catch (ParseException exp) {
            System.err.println("Error parsing command line: " + exp.getMessage());
            return -1;
        }

        if (!cmdline.hasOption(INPUT) || !cmdline.hasOption(OUTPUT)) {
            System.out.println("args: " + Arrays.toString(args));
            HelpFormatter formatter = new HelpFormatter();
            formatter.setWidth(120);
            formatter.printHelp(this.getClass().getName(), options);
            ToolRunner.printGenericCommandUsage(System.out);
            return -1;
        }

        String inputPath = cmdline.getOptionValue(INPUT);
        String outputTable = cmdline.getOptionValue(OUTPUT);
        int reduceTasks = cmdline.hasOption(NUM_REDUCERS) ? Integer.parseInt(cmdline.getOptionValue(NUM_REDUCERS))
                : 1;

        // If the table doesn't already exist, create it.
        Configuration conf = getConf();
        conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));

        Configuration hbaseConfig = HBaseConfiguration.create(conf);
        HBaseAdmin admin = new HBaseAdmin(hbaseConfig);

        if (admin.tableExists(outputTable)) {
            LOG.info(String.format("Table '%s' exists: dropping table and recreating.", outputTable));
            LOG.info(String.format("Disabling table '%s'", outputTable));
            admin.disableTable(outputTable);
            LOG.info(String.format("Droppping table '%s'", outputTable));
            admin.deleteTable(outputTable);
        }

        HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(outputTable));
        for (int i = 0; i < FAMILIES.length; i++) {
            HColumnDescriptor hColumnDesc = new HColumnDescriptor(FAMILIES[i]);
            tableDesc.addFamily(hColumnDesc);
        }

        admin.createTable(tableDesc);
        LOG.info(String.format("Successfully created table '%s'", outputTable));

        admin.close();

        // Now we're ready to start running MapReduce.
        LOG.info("Tool: " + HBaseWordCount.class.getSimpleName());
        LOG.info(" - input path: " + inputPath);
        LOG.info(" - output table: " + outputTable);
        LOG.info(" - number of reducers: " + reduceTasks);

        Job job = Job.getInstance(conf);
        job.setJobName(BuildInvertedIndexHBaseCompressed.class.getSimpleName());
        job.setJarByClass(BuildInvertedIndexHBaseCompressed.class);

        job.setMapOutputKeyClass(PairOfStringInt.class);
        job.setMapOutputValueClass(VIntWritable.class);

        job.setMapperClass(MyMapper.class);
        job.setReducerClass(MyReducer.class);
        job.setNumReduceTasks(reduceTasks);

        // job.setOutputKeyClass(Text.class);
        // job.setOutputValueClass(PairOfWritables.class);

        FileInputFormat.setInputPaths(job, new Path(inputPath));
        TableMapReduceUtil.initTableReducerJob(outputTable, MyTableReducer.class, job);

        // Delete the output directory if it exists already.
        Path outputDir = new Path(outputTable);
        FileSystem.get(getConf()).delete(outputDir, true);

        long startTime = System.currentTimeMillis();
        job.waitForCompletion(true);
        System.out.println("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");
        return 0;
    }

    /**
     * Dispatches command-line arguments to the tool via the {@code ToolRunner}.
     */
    public static void main(String[] args) throws Exception {
        ToolRunner.run(new BuildInvertedIndexHBaseCompressed(), args);
    }
}