Example usage for org.apache.hadoop.mapreduce Job isComplete

List of usage examples for org.apache.hadoop.mapreduce Job isComplete

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Job isComplete.

Prototype

public boolean isComplete() throws IOException 

Source Link

Document

Check if the job is finished or not.

Usage

From source file:org.apache.parquet.hadoop.DeprecatedInputFormatTest.java

License:Apache License

private void waitForJob(Job job) throws InterruptedException, IOException {
    while (!job.isComplete()) {
        System.out.println("waiting for job " + job.getJobName());
        sleep(100);//w  w  w  .j av  a  2 s  . co m
    }
    System.out.println(
            "status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
    if (!job.isSuccessful()) {
        throw new RuntimeException("job failed " + job.getJobName());
    }
}

From source file:org.apache.parquet.hadoop.example.TestInputOutputFormat.java

License:Apache License

private void waitForJob(Job job) throws InterruptedException, IOException {
    while (!job.isComplete()) {
        LOG.debug("waiting for job " + job.getJobName());
        sleep(100);//  www . j  a  v  a2s. c  om
    }
    LOG.info("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
    if (!job.isSuccessful()) {
        throw new RuntimeException("job failed " + job.getJobName());
    }
}

From source file:org.apache.parquet.hadoop.TestInputFormatColumnProjection.java

License:Apache License

private void waitForJob(Job job) throws Exception {
    job.submit();/*  ww w . j av a  2s.  c om*/
    while (!job.isComplete()) {
        sleep(100);
    }
    if (!job.isSuccessful()) {
        throw new RuntimeException("job failed " + job.getJobName());
    }
}

From source file:org.apache.parquet.hadoop.thrift.TestInputOutputFormat.java

License:Apache License

public static void waitForJob(Job job) throws Exception {
    job.submit();/*www .  j  a  v  a 2s. c  om*/
    while (!job.isComplete()) {
        LOG.debug("waiting for job " + job.getJobName());
        sleep(100);
    }
    LOG.info("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
    if (!job.isSuccessful()) {
        throw new RuntimeException("job failed " + job.getJobName());
    }
}

From source file:org.apache.parquet.proto.utils.WriteUsingMR.java

License:Apache License

static void waitForJob(Job job) throws Exception {
    job.submit();//from   ww  w.  j  av  a 2s . c o m
    while (!job.isComplete()) {
        LOG.debug("waiting for job " + job.getJobName());
        sleep(50);
    }
    LOG.debug("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
    if (!job.isSuccessful()) {
        throw new RuntimeException("job failed " + job.getJobName());
    }
}

From source file:org.apache.solr.hadoop.MorphlineBasicMiniMRTest.java

License:Apache License

@Test
public void mrRun() throws Exception {
    FileSystem fs = dfsCluster.getFileSystem();
    Path inDir = fs.makeQualified(new Path("/user/testing/testMapperReducer/input"));
    fs.delete(inDir, true);/*from  ww w.  j  a v a2s.  co m*/
    String DATADIR = "/user/testing/testMapperReducer/data";
    Path dataDir = fs.makeQualified(new Path(DATADIR));
    fs.delete(dataDir, true);
    Path outDir = fs.makeQualified(new Path("/user/testing/testMapperReducer/output"));
    fs.delete(outDir, true);

    assertTrue(fs.mkdirs(inDir));
    Path INPATH = new Path(inDir, "input.txt");
    OutputStream os = fs.create(INPATH);
    Writer wr = new OutputStreamWriter(os, "UTF-8");
    wr.write(DATADIR + "/" + inputAvroFile);
    wr.close();

    assertTrue(fs.mkdirs(dataDir));
    fs.copyFromLocalFile(new Path(DOCUMENTS_DIR, inputAvroFile), dataDir);

    JobConf jobConf = getJobConf();
    if (ENABLE_LOCAL_JOB_RUNNER) { // enable Hadoop LocalJobRunner; this enables to run in debugger and set breakpoints
        jobConf.set("mapred.job.tracker", "local");
    }
    jobConf.setMaxMapAttempts(1);
    jobConf.setMaxReduceAttempts(1);
    jobConf.setJar(SEARCH_ARCHIVES_JAR);
    jobConf.setBoolean(ExtractingParams.IGNORE_TIKA_EXCEPTION, false);

    int shards = 2;
    int maxReducers = Integer.MAX_VALUE;
    if (ENABLE_LOCAL_JOB_RUNNER) {
        // local job runner has a couple of limitations: only one reducer is supported and the DistributedCache doesn't work.
        // see http://blog.cloudera.com/blog/2009/07/advice-on-qa-testing-your-mapreduce-jobs/
        maxReducers = 1;
        shards = 1;
    }

    String[] args = new String[] {
            "--morphline-file=" + RESOURCES_DIR + "/test-morphlines/solrCellDocumentTypes.conf",
            "--morphline-id=morphline1", "--solr-home-dir=" + MINIMR_CONF_DIR.getAbsolutePath(),
            "--output-dir=" + outDir.toString(), "--shards=" + shards, "--verbose",
            numRuns % 2 == 0 ? "--input-list=" + INPATH.toString() : dataDir.toString(),
            numRuns % 3 == 0 ? "--reducers=" + shards
                    : (numRuns % 3 == 1 ? "--reducers=-1" : "--reducers=" + Math.min(8, maxReducers)) };
    if (numRuns % 3 == 2) {
        args = concat(args, new String[] { "--fanout=2" });
    }
    if (numRuns == 0) {
        // force (slow) MapReduce based randomization to get coverage for that as well
        args = concat(new String[] { "-D", MapReduceIndexerTool.MAIN_MEMORY_RANDOMIZATION_THRESHOLD + "=-1" },
                args);
    }
    MapReduceIndexerTool tool = createTool();
    int res = ToolRunner.run(jobConf, tool, args);
    assertEquals(0, res);
    Job job = tool.job;
    assertTrue(job.isComplete());
    assertTrue(job.isSuccessful());

    if (numRuns % 3 != 2) {
        // Only run this check if mtree merge is disabled.
        // With mtree merge enabled the BatchWriter counters aren't available anymore because 
        // variable "job" now refers to the merge job rather than the indexing job
        assertEquals(
                "Invalid counter " + SolrRecordWriter.class.getName() + "." + SolrCounters.DOCUMENTS_WRITTEN,
                count,
                job.getCounters()
                        .findCounter(SolrCounters.class.getName(), SolrCounters.DOCUMENTS_WRITTEN.toString())
                        .getValue());
    }

    // Check the output is as expected
    outDir = new Path(outDir, MapReduceIndexerTool.RESULTS_DIR);
    Path[] outputFiles = FileUtil.stat2Paths(fs.listStatus(outDir));

    System.out.println("outputfiles:" + Arrays.toString(outputFiles));

    TestUtils.validateSolrServerDocumentCount(MINIMR_CONF_DIR, fs, outDir, count, shards);

    // run again with --dryrun mode:  
    tool = createTool();
    args = concat(args, new String[] { "--dry-run" });
    res = ToolRunner.run(jobConf, tool, args);
    assertEquals(0, res);

    numRuns++;
}

From source file:org.kiji.mapreduce.TestMapReduceJob.java

License:Apache License

@Test
public void testSubmit() throws ClassNotFoundException, IOException, InterruptedException {
    Job hadoopJob = createMock(Job.class);

    // Expect that the job is submitted and that it is successful.
    hadoopJob.submit();// w w  w  . ja v a2 s. co m
    expect(hadoopJob.isComplete()).andReturn(false);
    expect(hadoopJob.isComplete()).andReturn(true);
    expect(hadoopJob.isSuccessful()).andReturn(true);

    replay(hadoopJob);

    MapReduceJob job = new ConcreteMapReduceJob(hadoopJob);
    MapReduceJob.Status jobStatus = job.submit();
    assertFalse(jobStatus.isComplete());
    assertTrue(jobStatus.isComplete());
    assertTrue(jobStatus.isSuccessful());

    verify(hadoopJob);
}

From source file:org.mrgeo.mapreduce.MapReduceUtils.java

License:Apache License

public static boolean runJob(Job job, Progress progress, JobListener jl)
        throws JobFailedException, JobCancelledException {
    boolean success = false;
    if (jl != null) {
        //append job id to the job name for easy identification
        job.setJobName("ID_" + jl.getUserJobId() + "_" + job.getJobName());
        jl.addJob(job);//from w w w  . ja  v  a 2  s  .  co  m
    }

    long start = System.currentTimeMillis();
    log.info("Running job {}", job.getJobName());

    try {
        job.submit();
        log.info("Job {} startup: {}ms", job.getJobName(), (System.currentTimeMillis() - start));
        if (progress == null) {
            job.waitForCompletion(true);
        } else {
            float initP = progress.get();
            float percentP = 100 - initP;
            while (job.isComplete() == false) {
                float p = job.mapProgress() * .9f + job.reduceProgress() * .1f;
                progress.set(p * percentP + initP);
                try {
                    Thread.sleep(500);
                } catch (InterruptedException e) {
                    log.info("Job Cancelled by user");
                    throw new JobCancelledException("Job Cancelled by user.");
                }
            }
        }

        log.info("Job {} time: {}ms", job.getJobName(), (System.currentTimeMillis() - start));

        if (job.isSuccessful() == false) {
            throw new JobFailedException("Job failed: " + job.getTrackingURL());
        }
        success = job.isSuccessful();
    } catch (InterruptedException e) {
        e.printStackTrace();
        throw new JobFailedException(e.getMessage());
    } catch (ClassNotFoundException e) {
        e.printStackTrace();
        throw new JobFailedException(e.getMessage());
    }
    // when submitting jobs under JBoss, Exception doesn't appear to be caught
    catch (Throwable e) {
        e.printStackTrace();
        throw new JobFailedException(e.getMessage());
    }
    return success;
}

From source file:org.mrgeo.mapreduce.MapReduceUtils.java

License:Apache License

/**
 * Check on the progress of a job and return true if the job has completed. Note
 * that a return value of true does not mean the job was successful, just that
 * it completed.//  w ww  .  j  av a2s .  co  m
 * 
 * @param job
 * @param progress
 * @param jl
 * @return
 * @throws IOException
 * @throws FileNotFoundException
 * @throws JobFailedException
 * @throws JobCancelledException
 */
public static boolean checkJobProgress(Job job, Progress progress, JobListener jl)
        throws IOException, JobFailedException, JobCancelledException {
    boolean result = job.isComplete();
    if (progress != null) {
        float initP = progress.get();
        float percentP = 100 - initP;
        if (!result) {
            float p = job.mapProgress() * .9f + job.reduceProgress() * .1f;
            progress.set(p * percentP + initP);
        }
    }

    if (result) {
        if (!job.isSuccessful()) {
            if (jl != null && jl.isCancelled()) {
                throw new JobCancelledException(job.getJobName() + " - Job Cancelled by user");
            }
            throw new JobFailedException("Job failed: " + job.getTrackingURL());
        }
    }
    return result;
}

From source file:water.hadoop.h2odriver.java

public static void killJobAndWait(Job job) {
    boolean killed = false;

    try {/*from  www.  j  a  v  a  2  s  .  c  o m*/
        System.out.println("Attempting to clean up hadoop job...");
        job.killJob();
        for (int i = 0; i < 5; i++) {
            if (job.isComplete()) {
                System.out.println("Killed.");
                killed = true;
                break;
            }

            Thread.sleep(1000);
        }
    } catch (Exception ignore) {
    } finally {
        if (!killed) {
            System.out.println("Kill attempt failed, please clean up job manually.");
        }
    }
}