Example usage for org.apache.hadoop.mapred FileInputFormat addInputPath

List of usage examples for org.apache.hadoop.mapred FileInputFormat addInputPath

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred FileInputFormat addInputPath.

Prototype

public static void addInputPath(JobConf conf, Path path) 

Source Link

Document

Add a Path to the list of inputs for the map-reduce job.

Usage

From source file:gobblin.source.extractor.hadoop.OldApiHadoopFileInputSource.java

License:Apache License

@Override
public List<WorkUnit> getWorkunits(SourceState state) {
    JobConf jobConf = new JobConf(new Configuration());
    for (String key : state.getPropertyNames()) {
        jobConf.set(key, state.getProp(key));
    }/*from w  w  w  .j a  va  2s .  c  o  m*/

    if (state.contains(HadoopFileInputSource.FILE_INPUT_PATHS_KEY)) {
        for (String inputPath : state.getPropAsList(HadoopFileInputSource.FILE_INPUT_PATHS_KEY)) {
            FileInputFormat.addInputPath(jobConf, new Path(inputPath));
        }
    }

    try {
        FileInputFormat<K, V> fileInputFormat = getFileInputFormat(state, jobConf);
        InputSplit[] fileSplits = fileInputFormat.getSplits(jobConf,
                state.getPropAsInt(HadoopFileInputSource.FILE_SPLITS_DESIRED_KEY,
                        HadoopFileInputSource.DEFAULT_FILE_SPLITS_DESIRED));
        if (fileSplits == null || fileSplits.length == 0) {
            return ImmutableList.of();
        }

        Extract.TableType tableType = state.contains(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY)
                ? Extract.TableType
                        .valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase())
                : null;
        String tableNamespace = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
        String tableName = state.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY);

        List<WorkUnit> workUnits = Lists.newArrayListWithCapacity(fileSplits.length);
        for (InputSplit inputSplit : fileSplits) {
            // Create one WorkUnit per InputSplit
            FileSplit fileSplit = (FileSplit) inputSplit;
            Extract extract = createExtract(tableType, tableNamespace, tableName);
            WorkUnit workUnit = WorkUnit.create(extract);
            workUnit.setProp(HadoopFileInputSource.FILE_SPLIT_BYTES_STRING_KEY,
                    HadoopUtils.serializeToString(fileSplit));
            workUnit.setProp(HadoopFileInputSource.FILE_SPLIT_PATH_KEY, fileSplit.getPath().toString());
            workUnits.add(workUnit);
        }

        return workUnits;
    } catch (IOException ioe) {
        throw new RuntimeException("Failed to get workunits", ioe);
    }
}

From source file:it.isislab.sof.core.engine.hadoop.sshclient.utils.simulation.executor.SOF.java

License:Apache License

public static void main(String[] args) {

    /**/*from w  w  w .j av a2s .c o  m*/
     * aids /home/michele/Scrivania/aids netlogo /home/michele/Scrivania/aids/aids.nlogo /home/michele/Scrivania/aids/input.tmp /home/michele/Scrivania/aids/output /home/michele/Scrivania/aids/output.xml false pepp ciao  
     *  
     */

    /*         try {//Runtime.getRuntime().exec("rm -r /home/lizard87/Desktop/mason_test/output");
        Runtime.getRuntime().exec("rm -r /home/michele/Scrivania/aids/output");
       } catch (IOException e) {e.printStackTrace();}*/

    if (args.length < 9 || args.length == 11 || args.length == 12 || args.length >= 15) {

        System.out.println("Usage:");
        System.out.println("java -jar SCUD.jar " + "<simulation_name> " + "<simulation_path_home> "
                + "<simulation_type[mason |netlogo |generic]>" + "<simulation_generic_interpreter_path>"
                + "<simultion_program_path> " + "<simulation_mapper_input_path> "
                + "<simulation_mapper_output_path> " + "<simulation_output_domain_xmlfile> "
                + "<simulation_input_path> " + "<<simulation_rating_path>>" + "<oneshot[one|loop]> "
                + "<author_name> " + "<simulation_description> " + "<path_interpreter_evaluate_file> "
                + "<evaluate_file_path>");
        System.exit(-1);
    }

    Configuration conf = null;
    JobConf job = null;

    String AUTHOR = null;/*author name*/
    String SIMULATION_NAME = null;/*simulation name*/
    String SIMULATION_HOME = null;/*path simulation*/
    String SIM_TYPE = null;/*mason, netlogo, generic*/
    String SIM_EXECUTABLE_SIMULATION_INTERPRETER_PATH = null;
    String SIM_EXECUTABLE_SIMULATION_PROGRAM = null; /*executable program *.jar | *.nlogo*/
    String SIM_EXECUTION_INPUT_DATA_MAPPER = null;/*input.data path */
    String SIM_EXECUTION_OUTPUT_MAPPER = null;/*output loop(i) path*/
    String SIM_DESCRIPTION_OUTPUT_XML_DOMAIN = null;/*path of domain file */
    String SIM_EXECUTION_INPUT_XML = null;/*execution input path*/
    boolean ISLOOP = false;/*false[one] | true[loop]*/
    //String DESCRIPTION=null;/*simulations' description*/
    String INTERPRETER_REMOTE_PATH_EVALUATION = null;/*remote program bin path for executing EvalFoo*/
    String EXECUTABLE_RATING_FILE = null;/*path of rating file*/
    String SIM_RATING_PATH = null;

    // aids /home/michele/Scrivania/aids netlogo /home/michele/Scrivania/aids/aids.nlogo /home/michele/Scrivania/aids/input.tmp /home/michele/Scrivania/aids/output /home/michele/Scrivania/aids/domain.xml /home/michele/Scrivania/aids/input loop pepp ciao /usr/bin/python /home/michele/Scrivania/aids/evaluate.py 

    if (args.length == 13) {
        SIMULATION_NAME = args[0];
        SIMULATION_HOME = args[1];
        SIM_TYPE = args[2];
        SIM_EXECUTABLE_SIMULATION_PROGRAM = args[3];
        SIM_EXECUTION_INPUT_DATA_MAPPER = args[4];
        SIM_EXECUTION_OUTPUT_MAPPER = args[5];
        SIM_DESCRIPTION_OUTPUT_XML_DOMAIN = args[6];
        SIM_EXECUTION_INPUT_XML = args[7];
        SIM_RATING_PATH = args[8];
        ISLOOP = Boolean.parseBoolean(args[9]);
        AUTHOR = args[10];
        //DESCRIPTION=args[11];
        INTERPRETER_REMOTE_PATH_EVALUATION = args[11];
        EXECUTABLE_RATING_FILE = args[12];
        //   System.out.println(DESCRIPTION);
        //System.out.println(INTERPRETER_REMOTE_PATH_EVALUATION);

    }

    else if (args.length == 9) {
        SIMULATION_NAME = args[0];
        SIMULATION_HOME = args[1];
        SIM_TYPE = args[2];
        SIM_EXECUTABLE_SIMULATION_PROGRAM = args[3];
        SIM_EXECUTION_INPUT_DATA_MAPPER = args[4];
        SIM_EXECUTION_OUTPUT_MAPPER = args[5];
        SIM_DESCRIPTION_OUTPUT_XML_DOMAIN = args[6];
        ISLOOP = Boolean.parseBoolean(args[7]);
        AUTHOR = args[8];
        //DESCRIPTION=args[9];
    }

    else if (args.length == 14) {
        SIMULATION_NAME = args[0];
        SIMULATION_HOME = args[1];
        SIM_TYPE = args[2];
        SIM_EXECUTABLE_SIMULATION_INTERPRETER_PATH = args[3];
        SIM_EXECUTABLE_SIMULATION_PROGRAM = args[4];
        SIM_EXECUTION_INPUT_DATA_MAPPER = args[5];
        SIM_EXECUTION_OUTPUT_MAPPER = args[6];
        SIM_DESCRIPTION_OUTPUT_XML_DOMAIN = args[7];
        SIM_EXECUTION_INPUT_XML = args[8];
        SIM_RATING_PATH = args[9];
        ISLOOP = Boolean.parseBoolean(args[10]);
        AUTHOR = args[11];
        //   DESCRIPTION=args[12];
        INTERPRETER_REMOTE_PATH_EVALUATION = args[12];
        EXECUTABLE_RATING_FILE = args[13];

    }

    else if (args.length == 10) {
        SIMULATION_NAME = args[0];
        SIMULATION_HOME = args[1];
        SIM_TYPE = args[2];
        SIM_EXECUTABLE_SIMULATION_INTERPRETER_PATH = args[3];
        SIM_EXECUTABLE_SIMULATION_PROGRAM = args[4];
        SIM_EXECUTION_INPUT_DATA_MAPPER = args[5];
        SIM_EXECUTION_OUTPUT_MAPPER = args[6];
        SIM_DESCRIPTION_OUTPUT_XML_DOMAIN = args[7];
        ISLOOP = Boolean.parseBoolean(args[8]);
        AUTHOR = args[9];
        //   DESCRIPTION=args[10];
    }

    if (!(SIM_TYPE.equalsIgnoreCase("mason") || SIM_TYPE.equalsIgnoreCase("netlogo")
            || SIM_TYPE.equalsIgnoreCase("generic"))) {
        System.exit(-2);
    }

    conf = new Configuration();
    job = new JobConf(conf, SOF.class);
    job.setJobName(SIMULATION_NAME/*SIMULATION NAME*/);
    job.set("simulation.home", SIMULATION_HOME);
    job.set("simulation.name", SIMULATION_NAME);
    job.set("simulation.type", SIM_TYPE);

    if (SIM_TYPE.equalsIgnoreCase("generic")) {
        job.set("simulation.interpreter.genericsim", SIM_EXECUTABLE_SIMULATION_INTERPRETER_PATH);
    }

    job.set("simulation.program.simulation", SIM_EXECUTABLE_SIMULATION_PROGRAM);
    job.set("simulation.executable.input", SIM_EXECUTION_INPUT_DATA_MAPPER);
    job.set("simulation.executable.output", SIM_EXECUTION_OUTPUT_MAPPER);
    job.setBoolean("simulation.executable.mode", ISLOOP);
    //job.set("simulation.executable.mode", ISLOOP);
    job.set("simulation.executable.author", AUTHOR);
    //job.set("simulation.executable.description", DESCRIPTION);
    job.set("simulation.description.output.domain", SIM_DESCRIPTION_OUTPUT_XML_DOMAIN);

    /**
     * GENERA IL .TMP
     * COMMENTA LA LINEA 
     * TEST IN LOCALE 
     * SOLO PER IL LOCALE
     */
    //XmlToText.convertXmlFileToFileText(conf,"/home/lizard87/Desktop/mason_test/input.xml");
    //XmlToText.convertXmlFileToFileText(conf,"/home/lizard87/Desktop/input.xml");
    //XmlToText.convertXmlFileToFileText(conf,"/home/lizard87/Desktop/aids/input.xml");

    if (ISLOOP) {
        job.set("simulation.description.input", SIM_EXECUTION_INPUT_XML);
        job.set("simulation.program.rating", EXECUTABLE_RATING_FILE);
        //job.set("simulation.interpreter.selection", INTERPRETER_REMOTE_PATH_SELECTION);
        job.set("simulation.interpreter.rating", INTERPRETER_REMOTE_PATH_EVALUATION);
        job.set("simulation.executable.loop.rating", SIM_RATING_PATH);
    }

    FileInputFormat.addInputPath(job, new Path(SIM_EXECUTION_INPUT_DATA_MAPPER)/*DIRECTORY INPUT*/);
    FileOutputFormat.setOutputPath(job, new Path(SIM_EXECUTION_OUTPUT_MAPPER));

    if (SIM_TYPE.equalsIgnoreCase("mason")) {
        job.setMapperClass(SOFMapperMason.class);
        job.setReducerClass(SOFReducerMason.class);

    } else if (SIM_TYPE.equalsIgnoreCase("netlogo")) {

        job.setMapperClass(SOFMapperNetLogo.class);
        job.setReducerClass(SOFReducerNetLogo.class);
    } else if (SIM_TYPE.equalsIgnoreCase("generic")) {
        job.setMapperClass(SOFMapperGeneric.class);
        job.setReducerClass(SOFReducerGeneric.class);
    }

    job.setOutputKeyClass(org.apache.hadoop.io.Text.class);
    job.setOutputValueClass(org.apache.hadoop.io.Text.class);

    JobClient jobc;

    try {
        jobc = new JobClient(job);
        System.out.println(jobc + " " + job);
        RunningJob runjob;
        runjob = JobClient.runJob(job);
        while (runjob.getJobStatus().equals(JobStatus.SUCCEEDED)) {
        }
        System.exit(0);
    } catch (IOException e) {

        e.printStackTrace();
    }

}

From source file:ivory.core.index.MergeGlobalStatsAcrossIndexSegments.java

License:Apache License

public int runTool() throws Exception {

    JobConf conf = new JobConf(getConf(), MergeGlobalStatsAcrossIndexSegments.class);
    FileSystem fs = FileSystem.get(conf);

    String collectionName = conf.get("Ivory.CollectionName");
    String indexPaths = conf.get("Ivory.IndexPaths");
    String dataOutputPath = conf.get("Ivory.DataOutputPath");
    int dfThreshold = conf.getInt("Ivory.DfThreshold", 0);

    // first, compute size of global term space
    Path tmpPaths = new Path("/tmp/index-paths.txt");

    FSDataOutputStream out = fs.create(tmpPaths, true);
    for (String s : indexPaths.split(",")) {
        out.write(new String(s + "\n").getBytes());
    }//from   w w w.java2s  .com
    out.close();

    LOG.info("Job: ComputeNumberOfTermsAcrossIndexSegments");
    conf.setJobName("ComputeNumberOfTermsAcrossIndexSegments:" + collectionName);

    FileInputFormat.addInputPath(conf, tmpPaths);

    conf.setNumMapTasks(1);
    conf.setNumReduceTasks(1);

    conf.set("mapred.child.java.opts", "-Xmx2048m");

    conf.setInputFormat(NLineInputFormat.class);
    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(PairOfIntLong.class);
    conf.setOutputFormat(NullOutputFormat.class);

    conf.setMapperClass(MyMapper.class);
    conf.setReducerClass(IdentityReducer.class);

    long startTime = System.currentTimeMillis();
    RunningJob job = JobClient.runJob(conf);
    LOG.info("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");

    Counters counters = job.getCounters();

    long totalNumTerms = counters.findCounter("org.apache.hadoop.mapred.Task$Counter", 6, "REDUCE_INPUT_GROUPS")
            .getCounter();

    LOG.info("total number of terms in global dictionary = " + totalNumTerms);

    // now build the dictionary
    fs.delete(new Path(dataOutputPath), true);

    conf = new JobConf(getConf(), MergeGlobalStatsAcrossIndexSegments.class);

    LOG.info("Job: MergeGlobalStatsAcrossIndexSegments");
    conf.setJobName("MergeGlobalStatsAcrossIndexSegments:" + collectionName);

    FileInputFormat.addInputPath(conf, tmpPaths);

    conf.setNumMapTasks(1);
    conf.setNumReduceTasks(1);

    conf.set("mapred.child.java.opts", "-Xmx2048m");

    conf.setInputFormat(NLineInputFormat.class);
    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(PairOfIntLong.class);
    conf.setOutputFormat(NullOutputFormat.class);

    conf.setMapperClass(MyMapper.class);
    conf.setReducerClass(MyReducer.class);

    conf.setLong("Ivory.IndexNumberOfTerms", (int) totalNumTerms);

    startTime = System.currentTimeMillis();
    job = JobClient.runJob(conf);
    LOG.info("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");

    // compute some # docs, collection length, avg doc length
    long collectionLength = 0;
    int docCount = 0;
    for (String index : indexPaths.split(",")) {
        LOG.info("reading stats for " + index);

        RetrievalEnvironment env = new RetrievalEnvironment(index, fs);

        long l = env.readCollectionLength();
        int n = env.readCollectionDocumentCount();

        LOG.info(" - CollectionLength: " + l);
        LOG.info(" - CollectionDocumentCount: " + n);

        collectionLength += l;
        docCount += n;
    }

    float avgdl = (float) collectionLength / docCount;

    LOG.info("all index segments: ");
    LOG.info(" - CollectionLength: " + collectionLength);
    LOG.info(" - CollectionDocumentCount: " + docCount);
    LOG.info(" - AverageDocumentLenght: " + avgdl);

    RetrievalEnvironment env = new RetrievalEnvironment(dataOutputPath, fs);

    env.writeCollectionAverageDocumentLength(avgdl);
    env.writeCollectionLength(collectionLength);
    env.writeCollectionDocumentCount(docCount);

    return 0;
}

From source file:ivory.preprocess.BuildTermDocVectors.java

License:Apache License

@SuppressWarnings("unchecked")
public int runTool() throws Exception {
    // create a new JobConf, inheriting from the configuration of this
    // PowerTool/*from   w w  w.ja va  2 s.  c o m*/
    JobConf conf = new JobConf(getConf(), BuildTermDocVectors.class);
    FileSystem fs = FileSystem.get(conf);

    String indexPath = conf.get("Ivory.IndexPath");
    int mapTasks = conf.getInt("Ivory.NumMapTasks", 0);

    String collectionName = conf.get("Ivory.CollectionName");
    String collectionPath = conf.get("Ivory.CollectionPath");
    String inputFormat = conf.get("Ivory.InputFormat");
    String tokenizer = conf.get("Ivory.Tokenizer");
    String mappingClass = conf.get("Ivory.DocnoMappingClass");

    sLogger.info("PowerTool: BuildTermDocVectors");
    sLogger.info(" - CollectionName: " + collectionName);
    sLogger.info(" - CollectionPath: " + collectionPath);
    sLogger.info(" - InputputFormat: " + inputFormat);
    sLogger.info(" - Tokenizer: " + tokenizer);
    sLogger.info(" - DocnoMappingClass: " + mappingClass);
    sLogger.info(" - NumMapTasks: " + mapTasks);
    sLogger.info(" - NumReduceTasks: " + 0);

    RetrievalEnvironment env = new RetrievalEnvironment(indexPath, fs);
    Path mappingFile = env.getDocnoMappingData();

    if (!fs.exists(mappingFile)) {
        sLogger.error("Error, docno mapping data file " + mappingFile + "doesn't exist!");
        return 0;
    }

    DistributedCache.addCacheFile(mappingFile.toUri(), conf);

    conf.setJobName("BuildTermDocVectors:" + collectionName);

    conf.setNumMapTasks(mapTasks);
    conf.setNumReduceTasks(0);

    if (collectionPath.indexOf(",") == -1) {
        FileInputFormat.setInputPaths(conf, new Path(collectionPath));
        sLogger.info("Adding input path " + collectionPath);
    } else {
        String[] paths = collectionPath.split(",");
        for (String p : paths) {
            FileInputFormat.addInputPath(conf, new Path(p));
            sLogger.info("Adding input path " + p);
        }
    }

    Path outputPath = new Path(env.getTermDocVectorsDirectory());
    if (fs.exists(outputPath)) {
        sLogger.info("TermDocVectors already exist: Skipping!");
    } else {
        env.writeCollectionName(collectionName);
        env.writeCollectionPath(collectionPath);
        env.writeInputFormat(inputFormat);
        env.writeDocnoMappingClass(mappingClass);
        env.writeTokenizerClass(tokenizer);

        conf.set("mapred.child.java.opts", "-Xmx2048m");
        conf.setInt("mapred.task.timeout", 60000000);

        FileOutputFormat.setOutputPath(conf, outputPath);

        conf.setInputFormat((Class<? extends InputFormat>) Class.forName(inputFormat));
        conf.setOutputFormat(SequenceFileOutputFormat.class);
        SequenceFileOutputFormat.setOutputCompressionType(conf, SequenceFile.CompressionType.RECORD);

        conf.setMapOutputKeyClass(IntWritable.class);
        conf.setMapOutputValueClass(LazyTermDocVector.class);
        conf.setOutputKeyClass(IntWritable.class);
        conf.setOutputValueClass(LazyTermDocVector.class);

        conf.setMapperClass(MyMapper.class);

        long startTime = System.currentTimeMillis();
        RunningJob job = JobClient.runJob(conf);
        sLogger.info("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");

        Counters counters = job.getCounters();

        // write out number of postings
        int collectionDocCount = (int) counters.findCounter(Docs.Total).getCounter();
        env.writeCollectionDocumentCount(collectionDocCount);
    }

    if (fs.exists(env.getDoclengthsData())) {
        sLogger.info("DocLength data exists: Skipping!");
        return 0;
    }

    int collectionDocCount = env.readCollectionDocumentCount();
    long startTime = System.currentTimeMillis();
    writeDoclengthsData(collectionDocCount);
    sLogger.info("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");
    return 0;
}

From source file:mapreducecountersoldapi.MapReduceCountersOldApi.java

/**
 * @param args the command line arguments
 *//*w  w w.  j a  v  a  2s .c o m*/
public static void main(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.println("Usage: MapReduceCounters <input path> <output path>");
        System.exit(-1);
    }

    //Job job = new Job();
    JobConf conf = new JobConf(MapReduceCountersOldApi.class);
    //job.setJarByClass(MapReduceCountersOldApi.class);
    conf.setJobName("Clics old Api");
    //int Reducers = parseInt(args[2]);
    //job.setNumReduceTasks(Reducers);
    FileInputFormat.addInputPath(conf, new Path(args[0]));
    FileOutputFormat.setOutputPath(conf, new Path(args[1]));

    conf.setMapperClass(MaxCountMapperOldApi.class);
    conf.setReducerClass(MaxCountReducerOldApi.class);

    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(LongWritable.class);

    //job.setNumReduceTasks(tasks);
    //          FileSystem fs = FileSystem.newInstance(job.getConfiguration());
    //
    //        Path inputFilePath = new Path(args[0]);
    //   Path outputFilePath = new Path(args[1]);
    //   if (fs.exists(outputFilePath)) {
    //            fs.delete(outputFilePath, true);
    //   }

    //System.exit(job.waitForCompletion(true) ? 0 : 1);
    JobClient.runJob(conf);
}

From source file:net.peacesoft.nutch.crawl.ReCrawlDb.java

License:Apache License

public void update(Path crawlDb, Path[] segments, boolean normalize, boolean filter, boolean additionsAllowed,
        boolean force) throws IOException {
    try {/*from  ww  w  .ja v a  2 s.  c  om*/
        FileSystem fs = FileSystem.get(getConf());
        Path lock = new Path(crawlDb, LOCK_NAME);
        try {
            LockUtil.createLockFile(fs, lock, force);
        } catch (Exception ex) {
        }
        SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
        long start = System.currentTimeMillis();

        JobConf job = ReCrawlDb.createJob(getConf(), crawlDb);
        job.setBoolean(CRAWLDB_ADDITIONS_ALLOWED, additionsAllowed);
        job.setBoolean(CrawlDbFilter.URL_FILTERING, filter);
        job.setBoolean(CrawlDbFilter.URL_NORMALIZING, normalize);

        boolean url404Purging = job.getBoolean(CRAWLDB_PURGE_404, false);

        if (LOG.isInfoEnabled()) {
            LOG.info("CrawlDb update: starting at " + sdf.format(start));
            LOG.info("CrawlDb update: db: " + crawlDb);
            LOG.info("CrawlDb update: segments: " + Arrays.asList(segments));
            LOG.info("CrawlDb update: additions allowed: " + additionsAllowed);
            LOG.info("CrawlDb update: URL normalizing: " + normalize);
            LOG.info("CrawlDb update: URL filtering: " + filter);
            LOG.info("CrawlDb update: 404 purging: " + url404Purging);
        }

        for (int i = 0; i < segments.length; i++) {
            Path fetch = new Path(segments[i], CrawlDatum.FETCH_DIR_NAME);
            Path parse = new Path(segments[i], CrawlDatum.PARSE_DIR_NAME);
            if (fs.exists(fetch) && fs.exists(parse)) {
                FileInputFormat.addInputPath(job, fetch);
                FileInputFormat.addInputPath(job, parse);
            } else {
                LOG.info(" - skipping invalid segment " + segments[i]);
            }
        }

        if (LOG.isInfoEnabled()) {
            LOG.info("ReCrawlDb update: Merging segment data into db.");
        }
        try {
            JobClient.runJob(job);
        } catch (IOException e) {
            LockUtil.removeLockFile(fs, lock);
            Path outPath = FileOutputFormat.getOutputPath(job);
            if (fs.exists(outPath)) {
                fs.delete(outPath, true);
            }
            throw e;
        }

        ReCrawlDb.install(job, crawlDb);
        long end = System.currentTimeMillis();
        LOG.info("CrawlDb update: finished at " + sdf.format(end) + ", elapsed: "
                + TimingUtil.elapsedTime(start, end));
    } catch (Exception ex) {
        LOG.error("ReCrawlDb update error: " + ex.toString(), ex);
    }
}

From source file:net.peacesoft.nutch.crawl.ReCrawlDb.java

License:Apache License

public static JobConf createJob(Configuration config, Path crawlDb) throws IOException {
    Path newCrawlDb = new Path(crawlDb, Integer.toString(new Random().nextInt(Integer.MAX_VALUE)));

    JobConf job = new NutchJob(config);
    job.setJobName("crawldb " + crawlDb);

    Path current = new Path(crawlDb, CURRENT_NAME);
    if (FileSystem.get(job).exists(current)) {
        FileInputFormat.addInputPath(job, current);
    }/*from w  w w.j  a va2s. c om*/
    job.setInputFormat(SequenceFileInputFormat.class);

    job.setMapperClass(CrawlDbFilter.class);
    job.setReducerClass(CrawlDbReducer.class);

    FileOutputFormat.setOutputPath(job, newCrawlDb);
    job.setOutputFormat(MapFileOutputFormat.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(CrawlDatum.class);

    // https://issues.apache.org/jira/browse/NUTCH-1110
    job.setBoolean("mapreduce.fileoutputcommitter.marksuccessfuljobs", false);

    return job;
}

From source file:net.peacesoft.nutch.crawl.ReFetcher.java

License:Apache License

public void fetch(Path segment, int threads) throws IOException {

    checkConfiguration();//  ww  w  .ja va  2s . com

    SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
    long start = System.currentTimeMillis();
    if (LOG.isInfoEnabled()) {
        LOG.info("Fetcher: starting at " + sdf.format(start));
        LOG.info("Fetcher: segment: " + segment);
    }

    // set the actual time for the timelimit relative
    // to the beginning of the whole job and not of a specific task
    // otherwise it keeps trying again if a task fails
    long timelimit = getConf().getLong("ReFetcher.timelimit.mins", -1);
    if (timelimit != -1) {
        timelimit = System.currentTimeMillis() + (timelimit * 60 * 1000);
        LOG.info("Fetcher Timelimit set for : " + timelimit);
        getConf().setLong("fetcher.timelimit", timelimit);
    }

    // Set the time limit after which the throughput threshold feature is enabled
    timelimit = getConf().getLong("fetcher.throughput.threshold.check.after", 10);
    timelimit = System.currentTimeMillis() + (timelimit * 60 * 1000);
    getConf().setLong("fetcher.throughput.threshold.check.after", timelimit);

    int maxOutlinkDepth = getConf().getInt("fetcher.follow.outlinks.depth", -1);
    if (maxOutlinkDepth > 0) {
        LOG.info("Fetcher: following outlinks up to depth: " + Integer.toString(maxOutlinkDepth));

        int maxOutlinkDepthNumLinks = getConf().getInt("fetcher.follow.outlinks.num.links", 4);
        int outlinksDepthDivisor = getConf().getInt("fetcher.follow.outlinks.depth.divisor", 2);

        int totalOutlinksToFollow = 0;
        for (int i = 0; i < maxOutlinkDepth; i++) {
            totalOutlinksToFollow += (int) Math.floor(outlinksDepthDivisor / (i + 1) * maxOutlinkDepthNumLinks);
        }

        LOG.info("Fetcher: maximum outlinks to follow: " + Integer.toString(totalOutlinksToFollow));
    }

    JobConf job = new NutchJob(getConf());
    job.setJobName("fetch " + segment);

    job.setInt("fetcher.threads.fetch", threads);
    job.set(Nutch.SEGMENT_NAME_KEY, segment.getName());

    // for politeness, don't permit parallel execution of a single task
    job.setSpeculativeExecution(false);

    FileInputFormat.addInputPath(job, new Path(segment, CrawlDatum.GENERATE_DIR_NAME));
    job.setInputFormat(ReFetcher.InputFormat.class);

    job.setMapRunnerClass(ReFetcher.class);

    FileOutputFormat.setOutputPath(job, segment);
    job.setOutputFormat(FetcherOutputFormat.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(NutchWritable.class);

    JobClient.runJob(job);

    long end = System.currentTimeMillis();
    LOG.info("Fetcher: finished at " + sdf.format(end) + ", elapsed: " + TimingUtil.elapsedTime(start, end));
}

From source file:net.peacesoft.nutch.crawl.ReGenerator.java

License:Apache License

/**
 * Generate fetchlists in one or more segments. Whether to filter URLs or
 * not is read from the crawl.generate.filter property in the configuration
 * files. If the property is not found, the URLs are filtered. Same for the
 * normalisation./*  ww w  .j  a  va 2  s.  co  m*/
 *
 * @param dbDir Crawl database directory
 * @param segments Segments directory
 * @param numLists Number of reduce tasks
 * @param topN Number of top URLs to be selected
 * @param curTime Current time in milliseconds
 *
 * @return Path to generated segment or null if no entries were selected
 *
 * @throws IOException When an I/O error occurs
 */
public Path[] generate(Path dbDir, Path segments, int numLists, long topN, long curTime, boolean filter,
        boolean norm, boolean force, int maxNumSegments) throws IOException {
    try {
        Path tempDir = new Path(
                getConf().get("mapred.temp.dir", ".") + "/generate-temp-" + System.currentTimeMillis());

        Path lock = new Path(dbDir, CrawlDb.LOCK_NAME);
        FileSystem fs = FileSystem.get(getConf());
        LockUtil.createLockFile(fs, lock, force);

        SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
        long start = System.currentTimeMillis();
        LOG.info("ReGenerator: starting at " + sdf.format(start));
        LOG.info("ReGenerator: Selecting best-scoring urls due for fetch.");
        LOG.info("ReGenerator: filtering: " + filter);
        LOG.info("ReGenerator: normalizing: " + norm);
        if (topN != Long.MAX_VALUE) {
            LOG.info("ReGenerator: topN: " + topN);
        }

        if ("true".equals(getConf().get(GENERATE_MAX_PER_HOST_BY_IP))) {
            LOG.info(
                    "ReGenerator: GENERATE_MAX_PER_HOST_BY_IP will be ignored, use partition.url.mode instead");
        }

        // map to inverted subset due for fetch, sort by score
        JobConf job = new NutchJob(getConf());
        job.setJobName("generate: select from " + dbDir);

        if (numLists == -1) { // for politeness make
            numLists = job.getNumMapTasks(); // a partition per fetch task
        }
        if ("local".equals(job.get("mapred.job.tracker")) && numLists != 1) {
            // override
            LOG.info("ReGenerator: jobtracker is 'local', generating exactly one partition.");
            numLists = 1;
        }
        job.setLong(GENERATOR_CUR_TIME, curTime);
        // record real generation time
        long generateTime = System.currentTimeMillis();
        job.setLong(Nutch.GENERATE_TIME_KEY, generateTime);
        job.setLong(GENERATOR_TOP_N, topN);
        job.setBoolean(GENERATOR_FILTER, filter);
        job.setBoolean(GENERATOR_NORMALISE, norm);
        job.setInt(GENERATOR_MAX_NUM_SEGMENTS, maxNumSegments);

        FileInputFormat.addInputPath(job, new Path(dbDir, CrawlDb.CURRENT_NAME));
        job.setInputFormat(SequenceFileInputFormat.class);

        job.setMapperClass(Selector.class);
        job.setPartitionerClass(Selector.class);
        job.setReducerClass(Selector.class);

        FileOutputFormat.setOutputPath(job, tempDir);
        job.setOutputFormat(SequenceFileOutputFormat.class);
        job.setOutputKeyClass(FloatWritable.class);
        job.setOutputKeyComparatorClass(DecreasingFloatComparator.class);
        job.setOutputValueClass(SelectorEntry.class);
        job.setOutputFormat(GeneratorOutputFormat.class);

        try {
            JobClient.runJob(job);
        } catch (IOException e) {
            throw e;
        }

        // read the subdirectories generated in the temp
        // output and turn them into segments
        List<Path> generatedSegments = new ArrayList<Path>();

        FileStatus[] status = fs.listStatus(tempDir);
        try {
            for (FileStatus stat : status) {
                Path subfetchlist = stat.getPath();
                if (!subfetchlist.getName().startsWith("fetchlist-")) {
                    continue;
                }
                // start a new partition job for this segment
                Path newSeg = partitionSegment(fs, segments, subfetchlist, numLists);
                generatedSegments.add(newSeg);
            }
        } catch (Exception e) {
            LOG.warn("ReGenerator: exception while partitioning segments, exiting ...");
            fs.delete(tempDir, true);
            return null;
        }

        if (generatedSegments.size() == 0) {
            LOG.warn("ReGenerator: 0 records selected for fetching, exiting ...");
            LockUtil.removeLockFile(fs, lock);
            fs.delete(tempDir, true);
            return null;
        }

        if (getConf().getBoolean(GENERATE_UPDATE_CRAWLDB, false)) {
            // update the db from tempDir
            Path tempDir2 = new Path(
                    getConf().get("mapred.temp.dir", ".") + "/generate-temp-" + System.currentTimeMillis());

            job = new NutchJob(getConf());
            job.setJobName("generate: updatedb " + dbDir);
            job.setLong(Nutch.GENERATE_TIME_KEY, generateTime);
            for (Path segmpaths : generatedSegments) {
                Path subGenDir = new Path(segmpaths, CrawlDatum.GENERATE_DIR_NAME);
                FileInputFormat.addInputPath(job, subGenDir);
            }
            FileInputFormat.addInputPath(job, new Path(dbDir, CrawlDb.CURRENT_NAME));
            job.setInputFormat(SequenceFileInputFormat.class);
            job.setMapperClass(CrawlDbUpdater.class);
            job.setReducerClass(CrawlDbUpdater.class);
            job.setOutputFormat(MapFileOutputFormat.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(CrawlDatum.class);
            FileOutputFormat.setOutputPath(job, tempDir2);
            try {
                JobClient.runJob(job);
                CrawlDb.install(job, dbDir);
            } catch (IOException e) {
                LockUtil.removeLockFile(fs, lock);
                fs.delete(tempDir, true);
                fs.delete(tempDir2, true);
                throw e;
            }
            fs.delete(tempDir2, true);
        }

        LockUtil.removeLockFile(fs, lock);
        fs.delete(tempDir, true);

        long end = System.currentTimeMillis();
        LOG.info("ReGenerator: finished at " + sdf.format(end) + ", elapsed: "
                + TimingUtil.elapsedTime(start, end));

        Path[] patharray = new Path[generatedSegments.size()];
        return generatedSegments.toArray(patharray);
    } catch (Exception ex) {
        LOG.error("ReGenerator generate error: " + ex.toString(), ex);
        return null;
    }
}

From source file:net.peacesoft.nutch.crawl.ReGenerator.java

License:Apache License

private Path partitionSegment(FileSystem fs, Path segmentsDir, Path inputDir, int numLists) throws IOException {
    // invert again, partition by host/domain/IP, sort by url hash
    if (LOG.isInfoEnabled()) {
        LOG.info("ReGenerator: Partitioning selected urls for politeness.");
    }/*from   w  w w.java  2s .  com*/
    Path segment = new Path(segmentsDir, generateSegmentName());
    Path output = new Path(segment, CrawlDatum.GENERATE_DIR_NAME);

    LOG.info("ReGenerator: segment: " + segment);

    NutchJob job = new NutchJob(getConf());
    job.setJobName("generate: partition " + segment);

    job.setInt("partition.url.seed", new Random().nextInt());

    FileInputFormat.addInputPath(job, inputDir);
    job.setInputFormat(SequenceFileInputFormat.class);

    job.setMapperClass(SelectorInverseMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(SelectorEntry.class);
    job.setPartitionerClass(URLPartitioner.class);
    job.setReducerClass(PartitionReducer.class);
    job.setNumReduceTasks(numLists);

    FileOutputFormat.setOutputPath(job, output);
    job.setOutputFormat(SequenceFileOutputFormat.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(CrawlDatum.class);
    job.setOutputKeyComparatorClass(HashComparator.class);
    JobClient.runJob(job);
    return segment;
}