Example usage for org.apache.hadoop.mapreduce Job getStatus

List of usage examples for org.apache.hadoop.mapreduce Job getStatus

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Job getStatus.

Prototype

public JobStatus getStatus() throws IOException, InterruptedException 

Source Link

Usage

From source file:org.apache.falcon.hive.HiveDRTool.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length < 1) {
        usage();//from   w w w .  ja v a  2 s  .  c o m
        return -1;
    }

    try {
        init(args);
    } catch (Throwable e) {
        LOG.error("Invalid arguments: ", e);
        System.err.println("Invalid arguments: " + e.getMessage());
        usage();
        return -1;
    }

    try {
        Job job = execute();
        if ((job != null) && (inputOptions.getExecutionStage()
                .equalsIgnoreCase(HiveDRUtils.ExecutionStage.EXPORT.name()))) {
            if ((job.getStatus().getState() == JobStatus.State.SUCCEEDED)
                    && (job.getConfiguration().get("counterLogDir") != null)) {
                LOG.info("Obtaining job replication counters for Hive DR job");
                Path counterFile = new Path(job.getConfiguration().get("counterLogDir"), "counter.txt");
                JobCounters hiveReplicationCounters = JobCountersHandler
                        .getCountersType(JobType.HIVEREPLICATION.name());
                hiveReplicationCounters.obtainJobCounters(job.getConfiguration(), job, false);
                hiveReplicationCounters.storeJobCounters(job.getConfiguration(), counterFile);
            }
        }
    } catch (Exception e) {
        System.err.println("Exception encountered " + e.getMessage());
        e.printStackTrace();
        LOG.error("Exception encountered, cleaning up staging dirs", e);
        cleanup();
        return -1;
    }

    if (inputOptions.getExecutionStage().equalsIgnoreCase(HiveDRUtils.ExecutionStage.IMPORT.name())) {
        cleanup();
    }

    return 0;
}

From source file:org.apache.falcon.hive.HiveDRTool.java

License:Apache License

public Job execute() throws Exception {
    assert inputOptions != null;
    assert getConf() != null;
    executionStage = inputOptions.getExecutionStage();
    LOG.info("Executing Workflow stage : {}", executionStage);
    if (executionStage.equalsIgnoreCase(HiveDRUtils.ExecutionStage.LASTEVENTS.name())) {
        String lastEventsIdFile = getLastEvents(jobConf);
        LOG.info("Last successfully replicated Event file : {}", lastEventsIdFile);
        return null;
    } else if (executionStage.equalsIgnoreCase(HiveDRUtils.ExecutionStage.EXPORT.name())) {
        createStagingDirectory();//from   ww w  .  ja  va  2s  .co  m
        eventsMetaFile = sourceEvents();
        LOG.info("Sourced Events meta file : {}", eventsMetaFile);
        if (StringUtils.isEmpty(eventsMetaFile)) {
            LOG.info("No events to process");
            return null;
        } else {
            /*
             * eventsMetaFile contains the events to be processed by HiveDr. This file should be available
             * for the import action as well. Persist the file at a location common to both export and import.
             */
            persistEventsMetafileLocation(eventsMetaFile);
        }
    } else if (executionStage.equalsIgnoreCase(HiveDRUtils.ExecutionStage.IMPORT.name())) {
        // read the location of eventsMetaFile from hdfs
        eventsMetaFile = getEventsMetaFileLocation();
        if (StringUtils.isEmpty(eventsMetaFile)) {
            LOG.info("No events to process");
            return null;
        }
    } else {
        throw new HiveReplicationException("Invalid Execution stage : " + inputOptions.getExecutionStage());
    }

    Job job = createJob();
    job.submit();

    String jobID = job.getJobID().toString();
    job.getConfiguration().set("HIVEDR_JOB_ID", jobID);

    LOG.info("HiveDR job-id: {}", jobID);
    if (inputOptions.shouldBlock() && !job.waitForCompletion(true)) {
        throw new IOException(
                "HiveDR failure: Job " + jobID + " has failed: " + job.getStatus().getFailureInfo());
    }

    return job;
}

From source file:org.apache.falcon.hive.util.EventUtils.java

License:Apache License

public void invokeCopy() throws Exception {
    DistCpOptions options = getDistCpOptions();
    DistCp distCp = new DistCp(conf, options);
    LOG.info("Started DistCp with source Path: {} \ttarget path: {}", sourceStagingUri, targetStagingUri);

    Job distcpJob = distCp.execute();
    LOG.info("Distp Hadoop job: {}", distcpJob.getJobID().toString());
    LOG.info("Completed DistCp");
    if (distcpJob.getStatus().getState() == JobStatus.State.SUCCEEDED) {
        countersMap = HiveDRUtils.fetchReplicationCounters(conf, distcpJob);
    }/*from   w w  w . ja  v a2 s .c  om*/
}

From source file:org.apache.falcon.replication.FeedReplicator.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    CommandLine cmd = getCommand(args);/*from   ww  w. j a va2 s  .co  m*/

    Configuration conf = this.getConf();
    // inject wf configs
    Path confPath = new Path("file:///" + System.getProperty("oozie.action.conf.xml"));

    LOG.info("{} found conf ? {}", confPath, confPath.getFileSystem(conf).exists(confPath));
    conf.addResource(confPath);

    String includePathConf = conf.get("falcon.include.path");
    final boolean includePathSet = (includePathConf != null) && !IGNORE.equalsIgnoreCase(includePathConf);

    DistCpOptions options = getDistCpOptions(cmd, includePathSet);

    String availabilityFlagOpt = cmd.getOptionValue("availabilityFlag");
    if (StringUtils.isEmpty(availabilityFlagOpt)) {
        availabilityFlagOpt = "NA";
    }
    String availabilityFlag = EntityUtil.SUCCEEDED_FILE_NAME;
    if (cmd.getOptionValue("falconFeedStorageType").equals(Storage.TYPE.FILESYSTEM.name())) {
        availabilityFlag = "NA".equals(availabilityFlagOpt) ? availabilityFlag : availabilityFlagOpt;
    }

    conf.set("falcon.feed.availability.flag", availabilityFlag);
    DistCp distCp = (includePathSet) ? new CustomReplicator(conf, options) : new DistCp(conf, options);
    LOG.info("Started DistCp with options :" + options);
    Job job = distCp.execute();

    if (cmd.hasOption("counterLogDir") && job.getStatus().getState() == JobStatus.State.SUCCEEDED) {
        LOG.info("Gathering counters for the the Feed Replication job");
        Path counterFile = new Path(cmd.getOptionValue("counterLogDir"), "counter.txt");
        JobCounters fsReplicationCounters = JobCountersHandler.getCountersType(JobType.FSREPLICATION.name());
        if (fsReplicationCounters != null) {
            fsReplicationCounters.obtainJobCounters(conf, job, true);
            fsReplicationCounters.storeJobCounters(conf, counterFile);
        }
    }

    if (includePathSet) {
        executePostProcessing(conf, options); // this only applies for FileSystem Storage.
    }

    LOG.info("Completed DistCp");
    return 0;
}

From source file:org.apache.ignite.client.hadoop.GridHadoopClientProtocolSelfTest.java

License:Apache License

/**
 * Tests job counters retrieval.//from  ww w .j  av a2 s. co m
 *
 * @throws Exception If failed.
 */
public void testJobCounters() throws Exception {
    IgniteFs igfs = grid(0).fileSystem(GridHadoopAbstractSelfTest.igfsName);

    igfs.mkdirs(new IgfsPath(PATH_INPUT));

    try (BufferedWriter bw = new BufferedWriter(
            new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {

        bw.write("alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n"
                + "gamma\n");
    }

    Configuration conf = config(GridHadoopAbstractSelfTest.REST_PORT);

    final Job job = Job.getInstance(conf);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(TestCountingMapper.class);
    job.setReducerClass(TestCountingReducer.class);
    job.setCombinerClass(TestCountingCombiner.class);

    FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
    FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));

    job.submit();

    final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);

    assertEquals(0, cntr.getValue());

    cntr.increment(10);

    assertEquals(10, cntr.getValue());

    // Transferring to map phase.
    setupLockFile.delete();

    // Transferring to reduce phase.
    mapLockFile.delete();

    job.waitForCompletion(false);

    assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());

    final Counters counters = job.getCounters();

    assertNotNull("counters cannot be null", counters);
    assertEquals("wrong counters count", 3, counters.countCounters());
    assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
    assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
    assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
}

From source file:org.apache.ignite.client.hadoop.GridHadoopClientProtocolSelfTest.java

License:Apache License

/**
 * Test job submission./* www .jav  a2  s . c o  m*/
 *
 * @param noCombiners Whether there are no combiners.
 * @param noReducers Whether there are no reducers.
 * @throws Exception If failed.
 */
public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception {
    IgniteFs igfs = grid(0).fileSystem(GridHadoopAbstractSelfTest.igfsName);

    igfs.mkdirs(new IgfsPath(PATH_INPUT));

    try (BufferedWriter bw = new BufferedWriter(
            new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {

        bw.write("word");
    }

    Configuration conf = config(GridHadoopAbstractSelfTest.REST_PORT);

    final Job job = Job.getInstance(conf);

    job.setJobName(JOB_NAME);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(TestMapper.class);
    job.setReducerClass(TestReducer.class);

    if (!noCombiners)
        job.setCombinerClass(TestCombiner.class);

    if (noReducers)
        job.setNumReduceTasks(0);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(TestOutputFormat.class);

    FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
    FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));

    job.submit();

    JobID jobId = job.getJobID();

    // Setup phase.
    JobStatus jobStatus = job.getStatus();
    checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
    assert jobStatus.getSetupProgress() >= 0.0f && jobStatus.getSetupProgress() < 1.0f;
    assert jobStatus.getMapProgress() == 0.0f;
    assert jobStatus.getReduceProgress() == 0.0f;

    U.sleep(2100);

    JobStatus recentJobStatus = job.getStatus();

    assert recentJobStatus.getSetupProgress() > jobStatus.getSetupProgress() : "Old="
            + jobStatus.getSetupProgress() + ", new=" + recentJobStatus.getSetupProgress();

    // Transferring to map phase.
    setupLockFile.delete();

    assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
        @Override
        public boolean apply() {
            try {
                return F.eq(1.0f, job.getStatus().getSetupProgress());
            } catch (Exception e) {
                throw new RuntimeException("Unexpected exception.", e);
            }
        }
    }, 5000L);

    // Map phase.
    jobStatus = job.getStatus();
    checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
    assert jobStatus.getSetupProgress() == 1.0f;
    assert jobStatus.getMapProgress() >= 0.0f && jobStatus.getMapProgress() < 1.0f;
    assert jobStatus.getReduceProgress() == 0.0f;

    U.sleep(2100);

    recentJobStatus = job.getStatus();

    assert recentJobStatus.getMapProgress() > jobStatus.getMapProgress() : "Old=" + jobStatus.getMapProgress()
            + ", new=" + recentJobStatus.getMapProgress();

    // Transferring to reduce phase.
    mapLockFile.delete();

    assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
        @Override
        public boolean apply() {
            try {
                return F.eq(1.0f, job.getStatus().getMapProgress());
            } catch (Exception e) {
                throw new RuntimeException("Unexpected exception.", e);
            }
        }
    }, 5000L);

    if (!noReducers) {
        // Reduce phase.
        jobStatus = job.getStatus();
        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
        assert jobStatus.getSetupProgress() == 1.0f;
        assert jobStatus.getMapProgress() == 1.0f;
        assert jobStatus.getReduceProgress() >= 0.0f && jobStatus.getReduceProgress() < 1.0f;

        // Ensure that reduces progress increases.
        U.sleep(2100);

        recentJobStatus = job.getStatus();

        assert recentJobStatus.getReduceProgress() > jobStatus.getReduceProgress() : "Old="
                + jobStatus.getReduceProgress() + ", new=" + recentJobStatus.getReduceProgress();

        reduceLockFile.delete();
    }

    job.waitForCompletion(false);

    jobStatus = job.getStatus();
    checkJobStatus(job.getStatus(), jobId, JOB_NAME, JobStatus.State.SUCCEEDED, 1.0f);
    assert jobStatus.getSetupProgress() == 1.0f;
    assert jobStatus.getMapProgress() == 1.0f;
    assert jobStatus.getReduceProgress() == 1.0f;

    dumpIgfs(igfs, new IgfsPath(PATH_OUTPUT));
}

From source file:org.apache.ignite.client.hadoop.HadoopClientProtocolSelfTest.java

License:Apache License

/**
 * Tests job counters retrieval./*  w  w  w.  j ava 2s.  c o m*/
 *
 * @throws Exception If failed.
 */
public void testJobCounters() throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);

    igfs.mkdirs(new IgfsPath(PATH_INPUT));

    try (BufferedWriter bw = new BufferedWriter(
            new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {

        bw.write("alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n"
                + "gamma\n");
    }

    Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);

    final Job job = Job.getInstance(conf);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(TestCountingMapper.class);
    job.setReducerClass(TestCountingReducer.class);
    job.setCombinerClass(TestCountingCombiner.class);

    FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
    FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));

    job.submit();

    final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);

    assertEquals(0, cntr.getValue());

    cntr.increment(10);

    assertEquals(10, cntr.getValue());

    // Transferring to map phase.
    setupLockFile.delete();

    // Transferring to reduce phase.
    mapLockFile.delete();

    job.waitForCompletion(false);

    assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());

    final Counters counters = job.getCounters();

    assertNotNull("counters cannot be null", counters);
    assertEquals("wrong counters count", 3, counters.countCounters());
    assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
    assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
    assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
}

From source file:org.apache.ignite.client.hadoop.HadoopClientProtocolSelfTest.java

License:Apache License

/**
 * Test job submission.//from w ww  .ja va 2s.co  m
 *
 * @param noCombiners Whether there are no combiners.
 * @param noReducers Whether there are no reducers.
 * @throws Exception If failed.
 */
public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);

    igfs.mkdirs(new IgfsPath(PATH_INPUT));

    try (BufferedWriter bw = new BufferedWriter(
            new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {

        bw.write("word");
    }

    Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);

    final Job job = Job.getInstance(conf);

    job.setJobName(JOB_NAME);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(TestMapper.class);
    job.setReducerClass(TestReducer.class);

    if (!noCombiners)
        job.setCombinerClass(TestCombiner.class);

    if (noReducers)
        job.setNumReduceTasks(0);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(TestOutputFormat.class);

    FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
    FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));

    job.submit();

    JobID jobId = job.getJobID();

    // Setup phase.
    JobStatus jobStatus = job.getStatus();
    checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
    assert jobStatus.getSetupProgress() >= 0.0f && jobStatus.getSetupProgress() < 1.0f;
    assert jobStatus.getMapProgress() == 0.0f;
    assert jobStatus.getReduceProgress() == 0.0f;

    U.sleep(2100);

    JobStatus recentJobStatus = job.getStatus();

    assert recentJobStatus.getSetupProgress() > jobStatus.getSetupProgress() : "Old="
            + jobStatus.getSetupProgress() + ", new=" + recentJobStatus.getSetupProgress();

    // Transferring to map phase.
    setupLockFile.delete();

    assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
        @Override
        public boolean apply() {
            try {
                return F.eq(1.0f, job.getStatus().getSetupProgress());
            } catch (Exception e) {
                throw new RuntimeException("Unexpected exception.", e);
            }
        }
    }, 5000L);

    // Map phase.
    jobStatus = job.getStatus();
    checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
    assert jobStatus.getSetupProgress() == 1.0f;
    assert jobStatus.getMapProgress() >= 0.0f && jobStatus.getMapProgress() < 1.0f;
    assert jobStatus.getReduceProgress() == 0.0f;

    U.sleep(2100);

    recentJobStatus = job.getStatus();

    assert recentJobStatus.getMapProgress() > jobStatus.getMapProgress() : "Old=" + jobStatus.getMapProgress()
            + ", new=" + recentJobStatus.getMapProgress();

    // Transferring to reduce phase.
    mapLockFile.delete();

    assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
        @Override
        public boolean apply() {
            try {
                return F.eq(1.0f, job.getStatus().getMapProgress());
            } catch (Exception e) {
                throw new RuntimeException("Unexpected exception.", e);
            }
        }
    }, 5000L);

    if (!noReducers) {
        // Reduce phase.
        jobStatus = job.getStatus();
        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
        assert jobStatus.getSetupProgress() == 1.0f;
        assert jobStatus.getMapProgress() == 1.0f;
        assert jobStatus.getReduceProgress() >= 0.0f && jobStatus.getReduceProgress() < 1.0f;

        // Ensure that reduces progress increases.
        U.sleep(2100);

        recentJobStatus = job.getStatus();

        assert recentJobStatus.getReduceProgress() > jobStatus.getReduceProgress() : "Old="
                + jobStatus.getReduceProgress() + ", new=" + recentJobStatus.getReduceProgress();

        reduceLockFile.delete();
    }

    job.waitForCompletion(false);

    jobStatus = job.getStatus();
    checkJobStatus(job.getStatus(), jobId, JOB_NAME, JobStatus.State.SUCCEEDED, 1.0f);
    assert jobStatus.getSetupProgress() == 1.0f;
    assert jobStatus.getMapProgress() == 1.0f;
    assert jobStatus.getReduceProgress() == 1.0f;

    dumpIgfs(igfs, new IgfsPath(PATH_OUTPUT));
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.client.HadoopClientProtocolMultipleServersSelfTest.java

License:Apache License

/**
 * Test job submission./*from  w  ww .j  a  v a2s  .  c  o m*/
 *
 * @param conf Hadoop configuration.
 * @throws Exception If failed.
 */
private void checkJobSubmit(Configuration conf) throws Exception {
    final Job job = Job.getInstance(conf);

    try {
        job.setJobName(JOB_NAME);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(OutFormat.class);

        job.setMapperClass(TestMapper.class);
        job.setReducerClass(TestReducer.class);

        job.setNumReduceTasks(0);

        FileInputFormat.setInputPaths(job, new Path("igfs://" + igfsName + "@" + PATH_INPUT));

        job.submit();

        job.waitForCompletion(false);

        assert job.getStatus().getState() == JobStatus.State.SUCCEEDED : job.getStatus().getState();
    } finally {
        job.getCluster().close();
    }
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.client.HadoopClientProtocolSelfTest.java

License:Apache License

/**
 * Tests job counters retrieval.//  ww w .  j a  v  a 2 s.c  o m
 *
 * @throws Exception If failed.
 */
public void testJobCounters() throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);

    igfs.mkdirs(new IgfsPath(PATH_INPUT));

    try (BufferedWriter bw = new BufferedWriter(
            new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {

        bw.write("alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n"
                + "gamma\n");
    }

    Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);

    final Job job = Job.getInstance(conf);

    try {
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        job.setMapperClass(TestCountingMapper.class);
        job.setReducerClass(TestCountingReducer.class);
        job.setCombinerClass(TestCountingCombiner.class);

        FileInputFormat.setInputPaths(job, new Path("igfs://" + igfsName + "@" + PATH_INPUT));
        FileOutputFormat.setOutputPath(job, new Path("igfs://" + igfsName + "@" + PATH_OUTPUT));

        job.submit();

        final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);

        assertEquals(0, cntr.getValue());

        cntr.increment(10);

        assertEquals(10, cntr.getValue());

        // Transferring to map phase.
        setupLockFile.delete();

        // Transferring to reduce phase.
        mapLockFile.delete();

        job.waitForCompletion(false);

        assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());

        final Counters counters = job.getCounters();

        assertNotNull("counters cannot be null", counters);
        assertEquals("wrong counters count", 3, counters.countCounters());
        assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
        assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
        assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
    } catch (Throwable t) {
        log.error("Unexpected exception", t);
    } finally {
        job.getCluster().close();
    }
}