de.tudarmstadt.lt.n2n.hadoop.pipetests.GoogleSyntacticsJob4.java Source code

Java tutorial

Introduction

Here is the source code for de.tudarmstadt.lt.n2n.hadoop.pipetests.GoogleSyntacticsJob4.java

Source

/*
 *   Copyright 2012
 *
 *   Licensed under the Apache License, Version 2.0 (the "License");
 *   you may not use this file except in compliance with the License.
 *   You may obtain a copy of the License at
 *
 *       http://www.apache.org/licenses/LICENSE-2.0
 *
 *   Unless required by applicable law or agreed to in writing, software
 *   distributed under the License is distributed on an "AS IS" BASIS,
 *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *   See the License for the specific language governing permissions and
 *   limitations under the License.
 */
package de.tudarmstadt.lt.n2n.hadoop.pipetests;

import java.io.File;
import java.io.IOException;
import java.util.Collection;

import jobimtext.holing.extractor.JobimAnnotationExtractor;
import jobimtext.holing.extractor.JobimExtractorConfiguration;
import jobimtext.holing.type.JoBim;

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.uima.UIMAFramework;
import org.apache.uima.analysis_engine.AnalysisEngine;
import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
import org.apache.uima.fit.factory.AggregateBuilder;
import org.apache.uima.fit.factory.AnalysisEngineFactory;
import org.apache.uima.fit.util.JCasUtil;
import org.apache.uima.jcas.JCas;
import org.apache.uima.resource.ResourceInitializationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import de.tudarmstadt.lt.n2n.annotators.JoBimPrinter;
import de.tudarmstadt.lt.n2n.pipelines.JoBimRelationPipeline;
import de.tudarmstadt.lt.n2n.utilities.SHARED_CONSTANTS;
import de.tudarmstadt.lt.utilities.types.RepeatedSentence;
import de.tudarmstadt.ukp.dkpro.core.api.metadata.type.DocumentMetaData;

/**
 * 
 * @author Steffen Remus
 */
public class GoogleSyntacticsJob4 extends Configured implements Tool {

    private static Logger LOG = LoggerFactory.getLogger(GoogleSyntacticsJob4.class);

    public static void main(String[] args) throws Exception {
        int res = ToolRunner.run(new GoogleSyntacticsJob4(), args);
        System.exit(res);
    }

    @Override
    public int run(String[] args) throws Exception {
        JobConf conf = new JobConf(getConf(), GoogleSyntacticsJob4.class);
        conf.setJobName(GoogleSyntacticsJob4.class.getSimpleName());

        FileInputFormat.setInputPaths(conf, new Path(args[0]));
        FileOutputFormat.setOutputPath(conf, new Path(args[1]));

        // delete output path for testing purposes
        // FileSystem.get(conf).delete(new Path(args[1]), true);

        String extractorConfigurationFiles = conf.get(SHARED_CONSTANTS.PARAM_EXTRACTORCONFIGS);
        if (extractorConfigurationFiles == null) {
            extractorConfigurationFiles = StringUtils.join(SHARED_CONSTANTS.DEFAULT_EXTRACTOR_CONFIGURATIONS, ',');
            System.out.format("Extractorconfigurationfile parameter not set. Assuming -D%s=%s %n",
                    SHARED_CONSTANTS.PARAM_EXTRACTORCONFIGS, extractorConfigurationFiles);
            conf.set(SHARED_CONSTANTS.PARAM_EXTRACTORCONFIGS, extractorConfigurationFiles);
        }

        String[] extractorConfigurationFilesArr = extractorConfigurationFiles.split(",");
        for (int i = 0; i < extractorConfigurationFilesArr.length; i++)
            DistributedCache.addFileToClassPath(new Path(extractorConfigurationFilesArr[i]), conf);

        conf.setMapperClass(GoogleSyntacticsJob4Mapper.class);
        conf.setInputFormat(TextInputFormat.class);
        conf.setMapOutputKeyClass(NullWritable.class);
        conf.setMapOutputValueClass(Text.class);
        conf.setNumReduceTasks(0);
        conf.setCombinerClass(IdentityReducer.class);

        JobClient.runJob(conf);
        return 0;
    }

    public static class GoogleSyntacticsJob4Mapper extends MapReduceBase
            implements Mapper<LongWritable, Text, NullWritable, Text> {

        AnalysisEngine _engine;
        JobimAnnotationExtractor[] _extractors;

        @Override
        public void configure(JobConf job) {
            try {
                String extractorConfigurationFiles = job.get(SHARED_CONSTANTS.PARAM_EXTRACTORCONFIGS);
                String[] extractorConfigurationFilesArr = extractorConfigurationFiles.split(",");
                for (int i = 0; i < extractorConfigurationFilesArr.length; i++) {
                    String extractorConfigurationFileName = new File(extractorConfigurationFilesArr[i]).getName();
                    for (Path p : DistributedCache.getLocalCacheFiles(job))
                        if (p.getName().contains(extractorConfigurationFileName))
                            extractorConfigurationFilesArr[i] = p.toString();
                }

                AggregateBuilder builder = new AggregateBuilder();
                // builder.add(AnalysisEngineFactory.createEngineDescription(MetaDataAnnotator.class));
                builder.add(AnalysisEngineFactory.createEngineDescription(JoBimRelationPipeline
                        .createGoogleSyntacticsRelationEngine(true/* create_tokens */, true/* create_sentences */,
                                true/* create_dependencies */, true/* create_new_relations */,
                                true/* create_dependency_path */, true/*ignore_nn_relations*/,
                                5/* dependecy_path_maxlength */, false/* create_detailed_output */,
                                null/* extractor_configuration */, null/* output_destination */)));

                _engine = UIMAFramework.produceAnalysisEngine(builder.createAggregateDescription()); // builder.createAggregate();

                try {
                    _extractors = new JobimAnnotationExtractor[extractorConfigurationFilesArr.length];
                    for (int i = 0; i < extractorConfigurationFilesArr.length; i++)
                        _extractors[i] = JobimExtractorConfiguration
                                .getExtractorFromXmlFile(new File(extractorConfigurationFilesArr[i]).getName());
                } catch (Exception e) {
                    throw new ResourceInitializationException(e);
                }

            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }

        private Text _line = new Text();

        @Override
        public void map(LongWritable key, Text value, OutputCollector<NullWritable, Text> output, Reporter reporter)
                throws IOException {
            String casinput = value.toString();

            // run pipeline, get results from tempfile
            JCas aJCas;
            try {
                aJCas = _engine.newJCas();
            } catch (ResourceInitializationException e) {
                LOG.error("Could not initialize cas", e);
                return;
            }
            aJCas.setDocumentText(casinput);
            DocumentMetaData meta = DocumentMetaData.create(aJCas);
            meta.setDocumentId(key.toString());

            try {
                _engine.process(aJCas);
            } catch (AnalysisEngineProcessException e) {
                LOG.error("Could not process cas", e);
                return;
            }

            Collection<RepeatedSentence> covering_annotations = JCasUtil.select(aJCas, RepeatedSentence.class);
            for (RepeatedSentence covering_annotation : covering_annotations) {
                int repetitions = covering_annotation.getRepetitionCount();
                for (JoBim jb : JoBimPrinter.getJoBims(covering_annotation, false)) {
                    for (JobimAnnotationExtractor extractor : _extractors) {
                        _line.set(JoBimPrinter.get_concise_string_old_format(jb, covering_annotation, extractor,
                                repetitions));
                        output.collect(NullWritable.get(), _line);
                    }
                }
            }
        }
    }

}