Example usage for org.apache.hadoop.conf Configuration setClass

List of usage examples for org.apache.hadoop.conf Configuration setClass

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setClass.

Prototype

public void setClass(String name, Class<?> theClass, Class<?> xface) 

Source Link

Document

Set the value of the name property to the name of a theClass implementing the given interface xface.

Usage

From source file:org.apache.pig.backend.hadoop.executionengine.tez.TezDagBuilder.java

License:Apache License

void selectOutputComparator(byte keyType, Configuration conf, TezOperator tezOp) throws JobCreationException {
    // TODO: Handle sorting like in JobControlCompiler
    // TODO: Group comparators as in JobControlCompiler
    if (tezOp != null && tezOp.isUseSecondaryKey()) {
        conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_COMPARATOR_CLASS,
                PigSecondaryKeyComparator.class.getName());
        setGroupingComparator(conf, PigSecondaryKeyGroupComparator.class.getName());
    } else {/*  w w  w.jav a  2s .c  o m*/
        if (tezOp != null && tezOp.isSkewedJoin()) {
            // TODO: PigGroupingPartitionWritableComparator only used as Group comparator in MR.
            // What should be TEZ_RUNTIME_KEY_COMPARATOR_CLASS if same as MR?
            conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_COMPARATOR_CLASS,
                    PigGroupingPartitionWritableComparator.class.getName());
            setGroupingComparator(conf, PigGroupingPartitionWritableComparator.class.getName());
        } else {
            boolean hasOrderby = hasOrderby(tezOp);
            conf.setClass(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_COMPARATOR_CLASS,
                    comparatorForKeyType(keyType, hasOrderby), RawComparator.class);
            if (!hasOrderby) {
                setGroupingComparator(conf, getGroupingComparatorForKeyType(keyType).getName());
            }
        }
    }
}

From source file:org.apache.solr.hadoop.hack.MiniMRYarnCluster.java

License:Apache License

@Override
public void serviceInit(Configuration conf) throws Exception {
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
    if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
        conf.set(MRJobConfig.MR_AM_STAGING_DIR,
                new File(getTestWorkDir(), "apps_staging_dir/").getAbsolutePath());
    }/* ww w.j  a v  a  2s .  c  o  m*/

    // By default, VMEM monitoring disabled, PMEM monitoring enabled.
    if (!conf.getBoolean(MRConfig.MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING,
            MRConfig.DEFAULT_MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING)) {
        conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
        conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
    }

    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");

    try {
        Path stagingPath = FileContext.getFileContext(conf)
                .makeQualified(new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
        /*
         * Re-configure the staging path on Windows if the file system is localFs.
         * We need to use a absolute path that contains the drive letter. The unit
         * test could run on a different drive than the AM. We can run into the
         * issue that job files are localized to the drive where the test runs on,
         * while the AM starts on a different drive and fails to find the job
         * metafiles. Using absolute path can avoid this ambiguity.
         */
        if (Path.WINDOWS) {
            if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) {
                conf.set(MRJobConfig.MR_AM_STAGING_DIR,
                        new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR)).getAbsolutePath());
            }
        }
        FileContext fc = FileContext.getFileContext(stagingPath.toUri(), conf);
        if (fc.util().exists(stagingPath)) {
            LOG.info(stagingPath + " exists! deleting...");
            fc.delete(stagingPath, true);
        }
        LOG.info("mkdir: " + stagingPath);
        //mkdir the staging directory so that right permissions are set while running as proxy user
        fc.mkdir(stagingPath, null, true);
        //mkdir done directory as well 
        String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
        Path doneDirPath = fc.makeQualified(new Path(doneDir));
        fc.mkdir(doneDirPath, null, true);
    } catch (IOException e) {
        throw new YarnRuntimeException("Could not create staging directory. ", e);
    }
    conf.set(MRConfig.MASTER_ADDRESS, "test"); // The default is local because of
                                               // which shuffle doesn't happen
                                               //configure the shuffle service in NM
    conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
            new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID });
    conf.setClass(String.format(Locale.ENGLISH, YarnConfiguration.NM_AUX_SERVICE_FMT,
            ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class, Service.class);

    // Non-standard shuffle port
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);

    conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class,
            ContainerExecutor.class);

    // TestMRJobs is for testing non-uberized operation only; see TestUberAM
    // for corresponding uberized tests.
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);

    super.serviceInit(conf);
}

From source file:org.apache.sqoop.mapreduce.AccumuloImportJob.java

License:Apache License

@Override
protected void configureOutputFormat(Job job, String tableName, String tableClassName)
        throws ClassNotFoundException, IOException {

    // Use the DelegatingOutputFormat with the AccumuloMutationProcessor.
    job.setOutputFormatClass(getOutputFormatClass());

    Configuration conf = job.getConfiguration();
    conf.setClass("sqoop.output.delegate.field.map.processor.class", AccumuloMutationProcessor.class,
            FieldMapProcessor.class);

    // Set the Accumulo parameters (table, column family, row key):
    conf.set(AccumuloConstants.ZOOKEEPERS, options.getAccumuloZookeepers());
    conf.set(AccumuloConstants.ACCUMULO_INSTANCE, options.getAccumuloInstance());
    conf.set(AccumuloConstants.ACCUMULO_USER_NAME, options.getAccumuloUser());
    String pw = options.getAccumuloPassword();
    if (null == pw) {
        pw = "";//from ww  w .  ja  v a2  s  .c  o m
    }
    conf.set(AccumuloConstants.ACCUMULO_PASSWORD, pw);
    conf.set(AccumuloConstants.TABLE_NAME_KEY, options.getAccumuloTable());
    conf.set(AccumuloConstants.COL_FAMILY_KEY, options.getAccumuloColFamily());
    conf.setLong(AccumuloConstants.BATCH_SIZE, options.getAccumuloBatchSize());
    conf.setLong(AccumuloConstants.MAX_LATENCY, options.getAccumuloMaxLatency());

    // What column of the input becomes the row key?
    String rowKeyCol = options.getAccumuloRowKeyColumn();
    if (null == rowKeyCol) {
        // User didn't explicitly set one. If there's a split-by column set,
        // use that.
        rowKeyCol = options.getSplitByCol();
    }

    if (null == rowKeyCol) {
        // No split-by column is explicitly set.
        // If the table has a primary key, use that.
        ConnManager manager = getContext().getConnManager();
        rowKeyCol = manager.getPrimaryKey(tableName);
    }

    if (null == rowKeyCol) {
        // Give up here if this is still unset.
        throw new IOException("Could not determine the row-key column. "
                + "Use --accumulo-row-key to specify the input column that " + "names each row.");
    }

    conf.set(AccumuloConstants.ROW_KEY_COLUMN_KEY, rowKeyCol);
}

From source file:org.apache.sqoop.mapreduce.odps.HdfsOdpsImportJob.java

License:Apache License

@Override
protected void configureOutputFormat(Job job, String tableName, String tableClassName)
        throws ClassNotFoundException {
    job.setOutputFormatClass(getOutputFormatClass());
    Configuration conf = job.getConfiguration();
    conf.setClass("sqoop.output.delegate.field.map.processor.class", OdpsUploadProcessor.class,
            FieldMapProcessor.class);

    conf.setStrings(OdpsConstants.INPUT_COL_NAMES, options.getColumns());

    String odpsTableName = options.getOdpsTable();
    if (odpsTableName == null) {
        odpsTableName = tableName;//from  ww w .j a va  2s  . c o  m
    }
    conf.set(OdpsConstants.TABLE_NAME, odpsTableName);
    conf.set(OdpsConstants.ACCESS_ID, options.getOdpsAccessID());
    conf.set(OdpsConstants.ACCESS_KEY, options.getOdpsAccessKey());
    conf.set(OdpsConstants.ENDPOINT, options.getOdpsEndPoint());

    String tunnelEndPoint = options.getOdpsTunnelEndPoint();
    if (tunnelEndPoint != null) {
        conf.set(OdpsConstants.TUNNEL_ENDPOINT, options.getOdpsTunnelEndPoint());
    }

    conf.set(OdpsConstants.PROJECT, options.getOdpsProject());

    String partitionKey = options.getOdpsPartitionKey();
    String partitionValue = options.getOdpsPartitionValue();
    if (partitionKey != null && partitionValue != null) {
        conf.set(OdpsConstants.PARTITION_KEY, partitionKey);
        conf.set(OdpsConstants.PARTITION_VALUE, partitionValue);
    }
    conf.setBoolean(OdpsConstants.CREATE_TABLE, options.isOdpsCreateTable());
    String dateFormat = options.getOdpsInputDateFormat();
    if (dateFormat != null) {
        conf.set(OdpsConstants.DATE_FORMAT, dateFormat);
    }
    conf.setInt(OdpsConstants.RETRY_COUNT, options.getOdpsRetryCount());
    conf.setInt(OdpsConstants.BATCH_SIZE, options.getOdpsBatchSize());
    conf.setBoolean(OdpsConstants.USE_COMPRESS_IN_UPLOAD, options.isOdpsUseCompressInUpload());

    job.getConfiguration().set(ExportJobBase.SQOOP_EXPORT_TABLE_CLASS_KEY, tableClassName);
}

From source file:org.apache.sqoop.mapreduce.odps.OdpsImportJob.java

License:Apache License

@Override
protected void configureOutputFormat(Job job, String tableName, String tableClassName)
        throws ClassNotFoundException {
    job.setOutputFormatClass(getOutputFormatClass());
    Configuration conf = job.getConfiguration();
    conf.setClass("sqoop.output.delegate.field.map.processor.class", OdpsUploadProcessor.class,
            FieldMapProcessor.class);

    conf.setStrings(OdpsConstants.INPUT_COL_NAMES, getColumnNames());

    String odpsTableName = options.getOdpsTable();
    if (odpsTableName == null) {
        odpsTableName = tableName;/*w  w  w. ja  va  2s  .  c  o  m*/
    }
    conf.set(OdpsConstants.TABLE_NAME, odpsTableName);
    conf.set(OdpsConstants.ACCESS_ID, options.getOdpsAccessID());
    conf.set(OdpsConstants.ACCESS_KEY, options.getOdpsAccessKey());
    conf.set(OdpsConstants.ENDPOINT, options.getOdpsEndPoint());

    String tunnelEndPoint = options.getOdpsTunnelEndPoint();
    if (tunnelEndPoint != null) {
        conf.set(OdpsConstants.TUNNEL_ENDPOINT, options.getOdpsTunnelEndPoint());
    }

    conf.set(OdpsConstants.PROJECT, options.getOdpsProject());

    String partitionKey = options.getOdpsPartitionKey();
    String partitionValue = options.getOdpsPartitionValue();
    if (partitionKey != null && partitionValue != null) {
        conf.set(OdpsConstants.PARTITION_KEY, partitionKey);
        conf.set(OdpsConstants.PARTITION_VALUE, partitionValue);
    }
    conf.setBoolean(OdpsConstants.CREATE_TABLE, options.isOdpsCreateTable());
    String dateFormat = options.getOdpsInputDateFormat();
    if (dateFormat != null) {
        conf.set(OdpsConstants.DATE_FORMAT, dateFormat);
    }
    conf.setInt(OdpsConstants.RETRY_COUNT, options.getOdpsRetryCount());
    conf.setInt(OdpsConstants.BATCH_SIZE, options.getOdpsBatchSize());
}

From source file:org.apache.tajo.MiniTajoYarnCluster.java

License:Apache License

@Override
public void init(Configuration conf) {

    conf.setSocketAddr(YarnConfiguration.RM_ADDRESS, new InetSocketAddress("127.0.0.1", 0));
    conf.setSocketAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS, new InetSocketAddress("127.0.0.1", 0));

    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
    if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
        conf.set(MRJobConfig.MR_AM_STAGING_DIR,
                new File(getTestWorkDir(), "apps_staging_dir/").getAbsolutePath());
    }/*from   w  w w.  ja v a  2  s .  co m*/
    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");

    try {
        Path stagingPath = FileContext.getFileContext(conf)
                .makeQualified(new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
        FileContext fc = FileContext.getFileContext(stagingPath.toUri(), conf);
        if (fc.util().exists(stagingPath)) {
            LOG.info(stagingPath + " exists! deleting...");
            fc.delete(stagingPath, true);
        }
        LOG.info("mkdir: " + stagingPath);
        //mkdir the staging directory so that right permissions are set while running as proxy user
        fc.mkdir(stagingPath, null, true);
        //mkdir done directory as well
        String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
        Path doneDirPath = fc.makeQualified(new Path(doneDir));
        fc.mkdir(doneDirPath, null, true);
    } catch (IOException e) {
        throw new YarnRuntimeException("Could not create staging directory. ", e);
    }
    conf.set(MRConfig.MASTER_ADDRESS, "test"); // The default is local because of
    // which shuffle doesn't happen
    //configure the shuffle service in NM
    conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, PullServerAuxService.PULLSERVER_SERVICEID);
    conf.setClass(
            String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, PullServerAuxService.PULLSERVER_SERVICEID),
            PullServerAuxService.class, Service.class);

    // Non-standard shuffle port
    conf.setInt(TajoConf.ConfVars.PULLSERVER_PORT.name(), 0);

    // local directory
    conf.set(TajoConf.ConfVars.WORKER_TEMPORAL_DIR.name(), "/tmp/tajo-localdir");

    conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class,
            ContainerExecutor.class);

    // TestMRJobs is for testing non-uberized operation only; see TestUberAM
    // for corresponding uberized tests.
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);

    conf.setInt("yarn.nodemanager.delete.debug-delay-sec", 600);

    super.init(conf);
}

From source file:org.apache.tez.dag.app.dag.impl.TestTaskAttempt.java

License:Apache License

@Test(timeout = 5000)
// Ensure the dag does not go into an error state if a attempt kill is
// received while STARTING
public void testLaunchFailedWhileKilling() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(1, 2);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    TezDAGID dagID = TezDAGID.getInstance(appId, 1);
    TezVertexID vertexID = TezVertexID.getInstance(dagID, 1);
    TezTaskID taskID = TezTaskID.getInstance(vertexID, 1);
    TezTaskAttemptID taskAttemptID = TezTaskAttemptID.getInstance(taskID, 0);

    MockEventHandler eventHandler = new MockEventHandler();
    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

    Configuration taskConf = new Configuration();
    taskConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    taskConf.setBoolean("fs.file.impl.disable.cache", true);

    TaskLocationHint locationHint = TaskLocationHint
            .createTaskLocationHint(new HashSet<String>(Arrays.asList(new String[] { "127.0.0.1" })), null);
    Resource resource = Resource.newInstance(1024, 1);

    AppContext mockAppContext = mock(AppContext.class);
    doReturn(new ClusterInfo()).when(mockAppContext).getClusterInfo();

    TaskAttemptImpl taImpl = new MockTaskAttemptImpl(taskID, 1, eventHandler, taListener, taskConf,
            new SystemClock(), mock(TaskHeartbeatHandler.class), mockAppContext, locationHint, false, resource,
            createFakeContainerContext(), false);

    NodeId nid = NodeId.newInstance("127.0.0.1", 0);
    ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
    Container container = mock(Container.class);
    when(container.getId()).thenReturn(contId);
    when(container.getNodeId()).thenReturn(nid);

    taImpl.handle(new TaskAttemptEventSchedule(taskAttemptID, 0, 0));
    // At state STARTING.
    taImpl.handle(new TaskAttemptEventKillRequest(taskAttemptID, null,
            TaskAttemptTerminationCause.TERMINATED_BY_CLIENT));
    // At some KILLING state.
    taImpl.handle(new TaskAttemptEventKillRequest(taskAttemptID, null,
            TaskAttemptTerminationCause.TERMINATED_BY_CLIENT));
    // taImpl.handle(new TaskAttemptEventContainerTerminating(taskAttemptID,
    // null));//from   www .  j a  v  a2  s .  com
    assertFalse(eventHandler.internalError);
}

From source file:org.apache.tez.dag.app.dag.impl.TestTaskAttempt.java

License:Apache License

@Test(timeout = 5000)
// Ensure ContainerTerminating and ContainerTerminated is handled correctly by
// the TaskAttempt
public void testContainerTerminationWhileRunning() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(1, 2);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    TezDAGID dagID = TezDAGID.getInstance(appId, 1);
    TezVertexID vertexID = TezVertexID.getInstance(dagID, 1);
    TezTaskID taskID = TezTaskID.getInstance(vertexID, 1);

    MockEventHandler eventHandler = spy(new MockEventHandler());
    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

    Configuration taskConf = new Configuration();
    taskConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    taskConf.setBoolean("fs.file.impl.disable.cache", true);

    TaskLocationHint locationHint = TaskLocationHint
            .createTaskLocationHint(new HashSet<String>(Arrays.asList(new String[] { "127.0.0.1" })), null);
    Resource resource = Resource.newInstance(1024, 1);

    NodeId nid = NodeId.newInstance("127.0.0.1", 0);
    ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
    Container container = mock(Container.class);
    when(container.getId()).thenReturn(contId);
    when(container.getNodeId()).thenReturn(nid);
    when(container.getNodeHttpAddress()).thenReturn("localhost:0");

    AppContext appCtx = mock(AppContext.class);
    AMContainerMap containers = new AMContainerMap(mock(ContainerHeartbeatHandler.class),
            mock(TaskAttemptListener.class), new ContainerContextMatcher(), appCtx);
    containers.addContainerIfNew(container);

    doReturn(new ClusterInfo()).when(appCtx).getClusterInfo();
    doReturn(containers).when(appCtx).getAllContainers();

    TaskHeartbeatHandler mockHeartbeatHandler = mock(TaskHeartbeatHandler.class);
    TaskAttemptImpl taImpl = new MockTaskAttemptImpl(taskID, 1, eventHandler, taListener, taskConf,
            new SystemClock(), mockHeartbeatHandler, appCtx, locationHint, false, resource,
            createFakeContainerContext(), false);
    TezTaskAttemptID taskAttemptID = taImpl.getID();
    ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);

    taImpl.handle(new TaskAttemptEventSchedule(taskAttemptID, 0, 0));
    // At state STARTING.
    taImpl.handle(new TaskAttemptEventStartedRemotely(taskAttemptID, contId, null));
    assertEquals("Task attempt is not in the RUNNING state", taImpl.getState(), TaskAttemptState.RUNNING);
    verify(mockHeartbeatHandler).register(taskAttemptID);

    int expectedEventsAtRunning = 3;
    verify(eventHandler, times(expectedEventsAtRunning)).handle(arg.capture());

    taImpl.handle(new TaskAttemptEventContainerTerminating(taskAttemptID, "Terminating",
            TaskAttemptTerminationCause.APPLICATION_ERROR));
    assertFalse("InternalError occurred trying to handle TA_CONTAINER_TERMINATING", eventHandler.internalError);
    verify(mockHeartbeatHandler).unregister(taskAttemptID);
    assertEquals("Task attempt is not in the  FAILED state", taImpl.getState(), TaskAttemptState.FAILED);

    assertEquals(1, taImpl.getDiagnostics().size());
    assertEquals("Terminating", taImpl.getDiagnostics().get(0));
    assertEquals(TaskAttemptTerminationCause.APPLICATION_ERROR, taImpl.getTerminationCause());

    int expectedEvenstAfterTerminating = expectedEventsAtRunning + 3;
    arg = ArgumentCaptor.forClass(Event.class);
    verify(eventHandler, times(expectedEvenstAfterTerminating)).handle(arg.capture());

    verifyEventType(arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating),
            TaskEventTAUpdate.class, 1);
    verifyEventType(arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating),
            AMSchedulerEventTAEnded.class, 1);
    verifyEventType(arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating),
            DAGEventCounterUpdate.class, 1);

    taImpl.handle(new TaskAttemptEventContainerTerminated(taskAttemptID, "Terminated",
            TaskAttemptTerminationCause.CONTAINER_EXITED));
    // verify unregister is not invoked again
    verify(mockHeartbeatHandler, times(1)).unregister(taskAttemptID);
    int expectedEventAfterTerminated = expectedEvenstAfterTerminating + 0;
    arg = ArgumentCaptor.forClass(Event.class);
    verify(eventHandler, times(expectedEventAfterTerminated)).handle(arg.capture());

    assertEquals(2, taImpl.getDiagnostics().size());
    assertEquals("Terminated", taImpl.getDiagnostics().get(1));

    // check that original error cause is retained
    assertEquals(TaskAttemptTerminationCause.APPLICATION_ERROR, taImpl.getTerminationCause());
}

From source file:org.apache.tez.dag.app.dag.impl.TestTaskAttempt.java

License:Apache License

@Test(timeout = 5000)
// Ensure ContainerTerminated is handled correctly by the TaskAttempt
public void testContainerTerminatedWhileRunning() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(1, 2);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    TezDAGID dagID = TezDAGID.getInstance(appId, 1);
    TezVertexID vertexID = TezVertexID.getInstance(dagID, 1);
    TezTaskID taskID = TezTaskID.getInstance(vertexID, 1);

    MockEventHandler eventHandler = new MockEventHandler();
    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

    Configuration taskConf = new Configuration();
    taskConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    taskConf.setBoolean("fs.file.impl.disable.cache", true);

    TaskLocationHint locationHint = TaskLocationHint
            .createTaskLocationHint(new HashSet<String>(Arrays.asList(new String[] { "127.0.0.1" })), null);
    Resource resource = Resource.newInstance(1024, 1);

    NodeId nid = NodeId.newInstance("127.0.0.1", 0);
    ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
    Container container = mock(Container.class);
    when(container.getId()).thenReturn(contId);
    when(container.getNodeId()).thenReturn(nid);
    when(container.getNodeHttpAddress()).thenReturn("localhost:0");

    AppContext appCtx = mock(AppContext.class);
    AMContainerMap containers = new AMContainerMap(mock(ContainerHeartbeatHandler.class),
            mock(TaskAttemptListener.class), new ContainerContextMatcher(), appCtx);
    containers.addContainerIfNew(container);

    doReturn(new ClusterInfo()).when(appCtx).getClusterInfo();
    doReturn(containers).when(appCtx).getAllContainers();

    TaskHeartbeatHandler mockHeartbeatHandler = mock(TaskHeartbeatHandler.class);
    TaskAttemptImpl taImpl = new MockTaskAttemptImpl(taskID, 1, eventHandler, taListener, taskConf,
            new SystemClock(), mockHeartbeatHandler, appCtx, locationHint, false, resource,
            createFakeContainerContext(), false);
    TezTaskAttemptID taskAttemptID = taImpl.getID();
    taImpl.handle(new TaskAttemptEventSchedule(taskAttemptID, 0, 0));
    // At state STARTING.
    taImpl.handle(new TaskAttemptEventStartedRemotely(taskAttemptID, contId, null));
    assertEquals("Task attempt is not in running state", taImpl.getState(), TaskAttemptState.RUNNING);
    verify(mockHeartbeatHandler).register(taskAttemptID);

    taImpl.handle(new TaskAttemptEventContainerTerminated(taskAttemptID, "Terminated",
            TaskAttemptTerminationCause.CONTAINER_EXITED));
    assertFalse("InternalError occurred trying to handle TA_CONTAINER_TERMINATED", eventHandler.internalError);
    verify(mockHeartbeatHandler).unregister(taskAttemptID);
    assertEquals("Terminated", taImpl.getDiagnostics().get(0));
    assertEquals(TaskAttemptTerminationCause.CONTAINER_EXITED, taImpl.getTerminationCause());
    // TODO Ensure TA_TERMINATING after this is ingored.
}

From source file:org.apache.tez.dag.app.dag.impl.TestTaskAttempt.java

License:Apache License

@Test(timeout = 5000)
// Ensure ContainerTerminating and ContainerTerminated is handled correctly by
// the TaskAttempt
public void testContainerTerminatedAfterSuccess() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(1, 2);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    TezDAGID dagID = TezDAGID.getInstance(appId, 1);
    TezVertexID vertexID = TezVertexID.getInstance(dagID, 1);
    TezTaskID taskID = TezTaskID.getInstance(vertexID, 1);

    MockEventHandler eventHandler = spy(new MockEventHandler());
    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

    Configuration taskConf = new Configuration();
    taskConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    taskConf.setBoolean("fs.file.impl.disable.cache", true);

    TaskLocationHint locationHint = TaskLocationHint
            .createTaskLocationHint(new HashSet<String>(Arrays.asList(new String[] { "127.0.0.1" })), null);
    Resource resource = Resource.newInstance(1024, 1);

    NodeId nid = NodeId.newInstance("127.0.0.1", 0);
    ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
    Container container = mock(Container.class);
    when(container.getId()).thenReturn(contId);
    when(container.getNodeId()).thenReturn(nid);
    when(container.getNodeHttpAddress()).thenReturn("localhost:0");

    AppContext appCtx = mock(AppContext.class);
    AMContainerMap containers = new AMContainerMap(mock(ContainerHeartbeatHandler.class),
            mock(TaskAttemptListener.class), new ContainerContextMatcher(), appCtx);
    containers.addContainerIfNew(container);

    doReturn(new ClusterInfo()).when(appCtx).getClusterInfo();
    doReturn(containers).when(appCtx).getAllContainers();

    TaskHeartbeatHandler mockHeartbeatHandler = mock(TaskHeartbeatHandler.class);
    TaskAttemptImpl taImpl = new MockTaskAttemptImpl(taskID, 1, eventHandler, taListener, taskConf,
            new SystemClock(), mockHeartbeatHandler, appCtx, locationHint, false, resource,
            createFakeContainerContext(), false);
    TezTaskAttemptID taskAttemptID = taImpl.getID();
    ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);

    taImpl.handle(new TaskAttemptEventSchedule(taskAttemptID, 0, 0));
    // At state STARTING.
    taImpl.handle(new TaskAttemptEventStartedRemotely(taskAttemptID, contId, null));
    assertEquals("Task attempt is not in the RUNNING state", taImpl.getState(), TaskAttemptState.RUNNING);
    verify(mockHeartbeatHandler).register(taskAttemptID);

    int expectedEventsAtRunning = 3;
    verify(eventHandler, times(expectedEventsAtRunning)).handle(arg.capture());

    taImpl.handle(new TaskAttemptEvent(taskAttemptID, TaskAttemptEventType.TA_DONE));

    assertEquals("Task attempt is not in the  SUCCEEDED state", taImpl.getState(), TaskAttemptState.SUCCEEDED);
    verify(mockHeartbeatHandler).unregister(taskAttemptID);
    assertEquals(0, taImpl.getDiagnostics().size());

    int expectedEvenstAfterTerminating = expectedEventsAtRunning + 3;
    arg = ArgumentCaptor.forClass(Event.class);
    verify(eventHandler, times(expectedEvenstAfterTerminating)).handle(arg.capture());

    verifyEventType(arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating),
            TaskEventTAUpdate.class, 1);
    verifyEventType(arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating),
            AMSchedulerEventTAEnded.class, 1);
    verifyEventType(arg.getAllValues().subList(expectedEventsAtRunning, expectedEvenstAfterTerminating),
            DAGEventCounterUpdate.class, 1);

    taImpl.handle(new TaskAttemptEventContainerTerminated(taskAttemptID, "Terminated",
            TaskAttemptTerminationCause.CONTAINER_EXITED));
    // verify unregister is not invoked again
    verify(mockHeartbeatHandler, times(1)).unregister(taskAttemptID);
    int expectedEventAfterTerminated = expectedEvenstAfterTerminating + 0;
    arg = ArgumentCaptor.forClass(Event.class);
    verify(eventHandler, times(expectedEventAfterTerminated)).handle(arg.capture());

    // Verify that the diagnostic message included in the Terminated event is not
    // captured - TA already succeeded. Error cause is the default value.
    assertEquals(0, taImpl.getDiagnostics().size());
    assertEquals(TaskAttemptTerminationCause.UNKNOWN_ERROR, taImpl.getTerminationCause());
}