Example usage for java.util Set clear

List of usage examples for java.util Set clear

Introduction

In this page you can find the example usage for java.util Set clear.

Prototype

void clear();

Source Link

Document

Removes all of the elements from this set (optional operation).

Usage

From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java

@Test
public void testCreateHostWithInvalidRequests() throws AmbariException {
    // unknown host
    // invalid clusters
    // duplicate host

    Set<HostRequest> set1 = new HashSet<HostRequest>();

    try {/*from  w  w  w. java2  s  .c  o  m*/
        set1.clear();
        HostRequest rInvalid = new HostRequest("h1", null, null);
        set1.add(rInvalid);
        HostResourceProviderTest.createHosts(controller, set1);
        fail("Expected failure for invalid host");
    } catch (Exception e) {
        // Expected
    }

    clusters.addHost("h1");

    String clusterName = "c1";

    try {
        set1.clear();
        HostRequest rInvalid = new HostRequest("h1", clusterName, null);
        set1.add(rInvalid);
        HostResourceProviderTest.createHosts(controller, set1);
        fail("Expected failure for invalid cluster");
    } catch (Exception e) {
        // Expected
    }

    clusters.addCluster("c1");

    try {
        set1.clear();
        HostRequest rInvalid1 = new HostRequest("h1", clusterName, null);
        HostRequest rInvalid2 = new HostRequest("h1", clusterName, null);
        set1.add(rInvalid1);
        set1.add(rInvalid2);
        HostResourceProviderTest.createHosts(controller, set1);
        fail("Expected failure for dup requests");
    } catch (Exception e) {
        // Expected
    }

}

From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java

@Test
public void testCreateServicesWithInvalidRequest() throws AmbariException {
    // invalid request
    // dups in requests
    // multi cluster updates

    Set<ServiceRequest> set1 = new HashSet<ServiceRequest>();

    try {//from   ww  w .  j a v a 2s.  c o m
        set1.clear();
        ServiceRequest rInvalid = new ServiceRequest(null, null, null);
        set1.add(rInvalid);
        ServiceResourceProviderTest.createServices(controller, set1);
        fail("Expected failure for invalid requests");
    } catch (Exception e) {
        // Expected
    }

    try {
        set1.clear();
        ServiceRequest rInvalid = new ServiceRequest("foo", null, null);
        set1.add(rInvalid);
        ServiceResourceProviderTest.createServices(controller, set1);
        fail("Expected failure for invalid requests");
    } catch (Exception e) {
        // Expected
    }

    try {
        set1.clear();
        ServiceRequest rInvalid = new ServiceRequest("foo", "bar", null);
        set1.add(rInvalid);
        ServiceResourceProviderTest.createServices(controller, set1);
        fail("Expected failure for invalid cluster");
    } catch (AmbariException e) {
        // Expected
        Assert.assertTrue(checkExceptionType(e, ParentObjectNotFoundException.class));
    }

    clusters.addCluster("foo");
    clusters.addCluster("bar");
    clusters.getCluster("foo").setDesiredStackVersion(new StackId("HDP-0.1"));
    clusters.getCluster("bar").setDesiredStackVersion(new StackId("HDP-0.1"));

    try {
        set1.clear();
        ServiceRequest valid1 = new ServiceRequest("foo", "HDFS", null);
        ServiceRequest valid2 = new ServiceRequest("foo", "HDFS", null);
        set1.add(valid1);
        set1.add(valid2);
        ServiceResourceProviderTest.createServices(controller, set1);
        fail("Expected failure for invalid requests");
    } catch (Exception e) {
        // Expected
    }

    try {
        set1.clear();
        ServiceRequest valid1 = new ServiceRequest("foo", "bar", null);
        set1.add(valid1);
        ServiceResourceProviderTest.createServices(controller, set1);
        fail("Expected failure for invalid service");
    } catch (Exception e) {
        // Expected
    }

    try {
        set1.clear();
        ServiceRequest valid1 = new ServiceRequest("foo", "HDFS", null);
        ServiceRequest valid2 = new ServiceRequest("bar", "HDFS", null);
        set1.add(valid1);
        set1.add(valid2);
        ServiceResourceProviderTest.createServices(controller, set1);
        fail("Expected failure for multiple clusters");
    } catch (Exception e) {
        // Expected
    }

    Assert.assertNotNull(clusters.getCluster("foo"));
    Assert.assertEquals(0, clusters.getCluster("foo").getServices().size());

    set1.clear();
    ServiceRequest valid = new ServiceRequest("foo", "HDFS", null);
    set1.add(valid);
    ServiceResourceProviderTest.createServices(controller, set1);

    try {
        set1.clear();
        ServiceRequest valid1 = new ServiceRequest("foo", "HDFS", null);
        ServiceRequest valid2 = new ServiceRequest("foo", "HDFS", null);
        set1.add(valid1);
        set1.add(valid2);
        ServiceResourceProviderTest.createServices(controller, set1);
        fail("Expected failure for existing service");
    } catch (Exception e) {
        // Expected
    }

    Assert.assertEquals(1, clusters.getCluster("foo").getServices().size());

}

From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java

@Test
public void testServiceUpdateInvalidRequest() throws AmbariException {
    // multiple clusters
    // dup services
    // multiple diff end states

    String clusterName1 = "foo1";
    createCluster(clusterName1);//  w  w w.  j a va 2  s  . co m
    String clusterName2 = "foo2";
    createCluster(clusterName2);
    String serviceName1 = "HDFS";
    createService(clusterName1, serviceName1, null);
    String serviceName2 = "HBASE";
    String serviceName3 = "HBASE";

    Map<String, String> mapRequestProps = new HashMap<String, String>();
    mapRequestProps.put("context", "Called from a test");

    try {
        createService(clusterName2, serviceName3, null);
        fail("Expected fail for invalid service for stack 0.1");
    } catch (Exception e) {
        // Expected
    }

    clusters.getCluster(clusterName1).setDesiredStackVersion(new StackId("HDP-0.2"));
    clusters.getCluster(clusterName2).setDesiredStackVersion(new StackId("HDP-0.2"));
    createService(clusterName1, serviceName2, null);
    createService(clusterName2, serviceName3, null);

    Set<ServiceRequest> reqs = new HashSet<ServiceRequest>();
    ServiceRequest req1, req2;
    try {
        reqs.clear();
        req1 = new ServiceRequest(clusterName1, serviceName1, State.INSTALLED.toString());
        req2 = new ServiceRequest(clusterName2, serviceName2, State.INSTALLED.toString());
        reqs.add(req1);
        reqs.add(req2);
        ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
        fail("Expected failure for multi cluster update");
    } catch (Exception e) {
        // Expected
    }

    try {
        reqs.clear();
        req1 = new ServiceRequest(clusterName1, serviceName1, State.INSTALLED.toString());
        req2 = new ServiceRequest(clusterName1, serviceName1, State.INSTALLED.toString());
        reqs.add(req1);
        reqs.add(req2);
        ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
        fail("Expected failure for dups services");
    } catch (Exception e) {
        // Expected
    }

    clusters.getCluster(clusterName1).getService(serviceName2).setDesiredState(State.INSTALLED);

    try {
        reqs.clear();
        req1 = new ServiceRequest(clusterName1, serviceName1, State.INSTALLED.toString());
        req2 = new ServiceRequest(clusterName1, serviceName2, State.STARTED.toString());
        reqs.add(req1);
        reqs.add(req2);
        ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
        fail("Expected failure for different states");
    } catch (Exception e) {
        // Expected
    }

}

From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java

@Test
public void testDeleteHostComponentInVariousStates() throws Exception {
    String clusterName = "foo1";
    createCluster(clusterName);//from   w w  w .  j  a v  a2  s .  co m
    clusters.getCluster(clusterName).setDesiredStackVersion(new StackId("HDP-1.3.1"));
    String serviceName = "HDFS";
    String mapred = "MAPREDUCE";
    createService(clusterName, serviceName, null);
    createService(clusterName, mapred, null);
    String componentName1 = "NAMENODE";
    String componentName2 = "DATANODE";
    String componentName3 = "HDFS_CLIENT";
    String componentName4 = "JOBTRACKER";
    String componentName5 = "TASKTRACKER";
    String componentName6 = "MAPREDUCE_CLIENT";

    createServiceComponent(clusterName, serviceName, componentName1, State.INIT);
    createServiceComponent(clusterName, serviceName, componentName2, State.INIT);
    createServiceComponent(clusterName, serviceName, componentName3, State.INIT);
    createServiceComponent(clusterName, mapred, componentName4, State.INIT);
    createServiceComponent(clusterName, mapred, componentName5, State.INIT);
    createServiceComponent(clusterName, mapred, componentName6, State.INIT);

    String host1 = "h1";

    addHost(host1, clusterName);

    createServiceComponentHost(clusterName, serviceName, componentName1, host1, null);
    createServiceComponentHost(clusterName, serviceName, componentName2, host1, null);
    createServiceComponentHost(clusterName, serviceName, componentName3, host1, null);
    createServiceComponentHost(clusterName, mapred, componentName4, host1, null);
    createServiceComponentHost(clusterName, mapred, componentName5, host1, null);
    createServiceComponentHost(clusterName, mapred, componentName6, host1, null);

    // Install
    installService(clusterName, serviceName, false, false);
    installService(clusterName, mapred, false, false);

    Cluster cluster = clusters.getCluster(clusterName);
    Service s1 = cluster.getService(serviceName);
    Service s2 = cluster.getService(mapred);
    ServiceComponent sc1 = s1.getServiceComponent(componentName1);
    sc1.getServiceComponentHosts().values().iterator().next().setState(State.STARTED);

    Set<ServiceComponentHostRequest> schRequests = new HashSet<ServiceComponentHostRequest>();
    // delete HC
    schRequests.clear();
    schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName1, host1, null));
    try {
        controller.deleteHostComponents(schRequests);
        Assert.fail("Expect failure while deleting.");
    } catch (Exception ex) {
        Assert.assertTrue(ex.getMessage().contains("Host Component cannot be removed"));
    }

    sc1.getServiceComponentHosts().values().iterator().next().setDesiredState(State.STARTED);
    sc1.getServiceComponentHosts().values().iterator().next().setState(State.UNKNOWN);
    ServiceComponent sc2 = s1.getServiceComponent(componentName2);
    sc2.getServiceComponentHosts().values().iterator().next().setState(State.INIT);
    ServiceComponent sc3 = s1.getServiceComponent(componentName3);
    sc3.getServiceComponentHosts().values().iterator().next().setState(State.INSTALL_FAILED);
    ServiceComponent sc4 = s2.getServiceComponent(componentName4);
    sc4.getServiceComponentHosts().values().iterator().next().setDesiredState(State.INSTALLED);
    sc4.getServiceComponentHosts().values().iterator().next().setState(State.DISABLED);
    ServiceComponent sc5 = s2.getServiceComponent(componentName5);
    sc5.getServiceComponentHosts().values().iterator().next().setState(State.INSTALLED);
    ServiceComponent sc6 = s2.getServiceComponent(componentName6);
    sc6.getServiceComponentHosts().values().iterator().next().setState(State.INIT);

    schRequests.clear();
    schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName1, host1, null));
    schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName2, host1, null));
    schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName3, host1, null));
    schRequests.add(new ServiceComponentHostRequest(clusterName, mapred, componentName4, host1, null));
    schRequests.add(new ServiceComponentHostRequest(clusterName, mapred, componentName5, host1, null));
    schRequests.add(new ServiceComponentHostRequest(clusterName, mapred, componentName6, host1, null));
    controller.deleteHostComponents(schRequests);
}

From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java

@Ignore
@Test//from   w w  w . ja  va  2  s  .  co m
public void testServiceComponentHostUpdateStackIdError() throws Exception {
    String clusterName = "foo1";
    createCluster(clusterName);
    String serviceName1 = "HDFS";
    createService(clusterName, serviceName1, null);
    String componentName1 = "NAMENODE";
    createServiceComponent(clusterName, serviceName1, componentName1, State.INIT);
    String host1 = "h1";
    String host2 = "h2";
    addHost(host1, clusterName);
    addHost(host2, clusterName);

    Set<ServiceComponentHostRequest> set1 = new HashSet<ServiceComponentHostRequest>();
    ServiceComponentHostRequest r1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1,
            host1, State.INIT.toString());
    ServiceComponentHostRequest r2 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1,
            host2, State.INIT.toString());

    set1.add(r1);
    set1.add(r2);
    controller.createHostComponents(set1);

    Cluster c1 = clusters.getCluster(clusterName);
    Service s1 = c1.getService(serviceName1);
    ServiceComponent sc1 = s1.getServiceComponent(componentName1);
    ServiceComponentHost sch1 = sc1.getServiceComponentHost(host1);
    ServiceComponentHost sch2 = sc1.getServiceComponentHost(host2);

    s1.setDesiredState(State.INIT);
    sc1.setDesiredState(State.INIT);

    ServiceComponentHostRequest req1;
    ServiceComponentHostRequest req2;
    Set<ServiceComponentHostRequest> reqs = new HashSet<ServiceComponentHostRequest>();

    reqs.clear();
    req1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1,
            State.STARTED.toString());
    req1.setDesiredStackId("invalid stack id");
    reqs.add(req1);
    updateHostAndCompareExpectedFailure(reqs, "Invalid desired stack id");

    c1.setCurrentStackVersion(null);
    sch1.setStackVersion(new StackId("HDP-0.1"));
    reqs.clear();
    req1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1,
            State.STARTED.toString());
    req1.setDesiredStackId("HDP-0.2");
    reqs.add(req1);
    updateHostAndCompareExpectedFailure(reqs, "Cluster has not been upgraded yet");

    c1.setCurrentStackVersion(new StackId("HDP2-0.1"));
    reqs.clear();
    req1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1,
            State.STARTED.toString());
    req1.setDesiredStackId("HDP-0.2");
    reqs.add(req1);
    updateHostAndCompareExpectedFailure(reqs, "Deployed stack name and requested stack names");

    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
    reqs.clear();
    req1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1,
            State.STARTED.toString());
    req1.setDesiredStackId("HDP-0.3");
    reqs.add(req1);
    updateHostAndCompareExpectedFailure(reqs, "Component host can only be upgraded to the same version");

    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
    sch1.setState(State.STARTED);
    reqs.clear();
    req1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1,
            State.STARTED.toString());
    req1.setDesiredStackId("HDP-0.2");
    reqs.add(req1);
    updateHostAndCompareExpectedFailure(reqs, "Component host is in an invalid state for upgrade");

    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
    sch1.setState(State.UPGRADING);
    reqs.clear();
    req1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1,
            State.STARTED.toString());
    req1.setDesiredStackId("HDP-0.2");
    reqs.add(req1);
    updateHostAndCompareExpectedFailure(reqs, "The desired state for an upgrade request must be");

    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
    sch1.setState(State.UPGRADING);
    reqs.clear();
    req1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1, null);
    req1.setDesiredStackId("HDP-0.2");
    reqs.add(req1);
    updateHostAndCompareExpectedFailure(reqs, "The desired state for an upgrade request must be");

    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
    sch1.setState(State.INSTALLED);
    sch1.setDesiredState(State.INSTALLED);
    sch2.setState(State.INSTALLED);
    sch2.setDesiredState(State.INSTALLED);
    reqs.clear();
    req1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1,
            State.INSTALLED.toString());
    req1.setDesiredStackId("HDP-0.2");
    reqs.add(req1);
    req2 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host2,
            State.STARTED.toString());
    reqs.add(req2);
    updateHostAndCompareExpectedFailure(reqs, "An upgrade request cannot be combined with other");

    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
    sch1.setState(State.INSTALLED);
    sch1.setStackVersion(new StackId("HDP-0.2"));
    reqs.clear();
    req1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1, null);
    req1.setDesiredStackId("HDP-0.2");
    reqs.add(req1);

    RequestStatusResponse resp = updateHostComponents(reqs, Collections.<String, String>emptyMap(), true);
    Assert.assertNull(resp);

    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
    sch1.setState(State.INSTALLED);
    sch1.setStackVersion(new StackId("HDP-0.2"));
    reqs.clear();
    req1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1,
            State.INSTALLED.toString());
    req1.setDesiredStackId("HDP-0.2");
    reqs.add(req1);
    resp = updateHostComponents(reqs, Collections.<String, String>emptyMap(), true);
    Assert.assertNull(resp);
}

From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java

@Test
public void testCreateServiceComponentWithInvalidRequest() throws AmbariException {
    // multiple clusters
    // dup objects
    // existing components
    // invalid request params
    // invalid service
    // invalid cluster

    Set<ServiceComponentRequest> set1 = new HashSet<ServiceComponentRequest>();

    try {//from   ww w . j  a va  2 s  .  c o m
        set1.clear();
        ServiceComponentRequest rInvalid = new ServiceComponentRequest(null, null, null, null);
        set1.add(rInvalid);
        ComponentResourceProviderTest.createComponents(controller, set1);
        fail("Expected failure for invalid requests");
    } catch (Exception e) {
        // Expected
    }

    try {
        set1.clear();
        ServiceComponentRequest rInvalid = new ServiceComponentRequest("c1", null, null, null);
        set1.add(rInvalid);
        ComponentResourceProviderTest.createComponents(controller, set1);
        fail("Expected failure for invalid requests");
    } catch (Exception e) {
        // Expected
    }

    try {
        set1.clear();
        ServiceComponentRequest rInvalid = new ServiceComponentRequest("c1", "s1", null, null);
        set1.add(rInvalid);
        ComponentResourceProviderTest.createComponents(controller, set1);
        fail("Expected failure for invalid requests");
    } catch (Exception e) {
        // Expected
    }

    try {
        set1.clear();
        ServiceComponentRequest rInvalid = new ServiceComponentRequest("c1", "s1", "sc1", null);
        set1.add(rInvalid);
        ComponentResourceProviderTest.createComponents(controller, set1);
        fail("Expected failure for invalid cluster");
    } catch (ParentObjectNotFoundException e) {
        // Expected
    }

    clusters.addCluster("c1");
    clusters.addCluster("c2");

    try {
        set1.clear();
        ServiceComponentRequest rInvalid = new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null);
        set1.add(rInvalid);
        ComponentResourceProviderTest.createComponents(controller, set1);
        fail("Expected failure for invalid service");
    } catch (ParentObjectNotFoundException e) {
        // Expected
    }

    Cluster c1 = clusters.getCluster("c1");
    StackId stackId = new StackId("HDP-0.1");
    c1.setDesiredStackVersion(stackId);
    helper.getOrCreateRepositoryVersion(stackId.getStackName(), stackId.getStackVersion());
    c1.createClusterVersion(stackId.getStackName(), stackId.getStackVersion(), "admin",
            RepositoryVersionState.UPGRADING);
    Service s1 = serviceFactory.createNew(c1, "HDFS");
    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
    c1.addService(s1);
    c1.addService(s2);
    s1.persist();
    s2.persist();

    set1.clear();
    ServiceComponentRequest valid1 = new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null);
    ServiceComponentRequest valid2 = new ServiceComponentRequest("c1", "MAPREDUCE", "JOBTRACKER", null);
    ServiceComponentRequest valid3 = new ServiceComponentRequest("c1", "MAPREDUCE", "TASKTRACKER", null);
    set1.add(valid1);
    set1.add(valid2);
    set1.add(valid3);
    ComponentResourceProviderTest.createComponents(controller, set1);

    try {
        set1.clear();
        ServiceComponentRequest rInvalid1 = new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null);
        ServiceComponentRequest rInvalid2 = new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null);
        set1.add(rInvalid1);
        set1.add(rInvalid2);
        ComponentResourceProviderTest.createComponents(controller, set1);
        fail("Expected failure for dups in requests");
    } catch (Exception e) {
        // Expected
    }

    try {
        set1.clear();
        ServiceComponentRequest rInvalid1 = new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null);
        ServiceComponentRequest rInvalid2 = new ServiceComponentRequest("c2", "HDFS", "HDFS_CLIENT", null);
        set1.add(rInvalid1);
        set1.add(rInvalid2);
        ComponentResourceProviderTest.createComponents(controller, set1);
        fail("Expected failure for multiple clusters");
    } catch (Exception e) {
        // Expected
    }

    try {
        set1.clear();
        ServiceComponentRequest rInvalid = new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null);
        set1.add(rInvalid);
        ComponentResourceProviderTest.createComponents(controller, set1);
        fail("Expected failure for already existing component");
    } catch (Exception e) {
        // Expected
    }

    Assert.assertEquals(1, s1.getServiceComponents().size());
    Assert.assertNotNull(s1.getServiceComponent("NAMENODE"));
    Assert.assertEquals(2, s2.getServiceComponents().size());
    Assert.assertNotNull(s2.getServiceComponent("JOBTRACKER"));
    Assert.assertNotNull(s2.getServiceComponent("TASKTRACKER"));

}

From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java

@Test
public void testDeleteHost() throws Exception {
    String clusterName = "foo1";

    createCluster(clusterName);/*from   ww  w . j  a  va 2 s  . c o m*/

    Cluster cluster = clusters.getCluster(clusterName);
    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));

    String serviceName = "HDFS";
    createService(clusterName, serviceName, null);
    String componentName1 = "NAMENODE";
    String componentName2 = "DATANODE";
    String componentName3 = "HDFS_CLIENT";

    createServiceComponent(clusterName, serviceName, componentName1, State.INIT);
    createServiceComponent(clusterName, serviceName, componentName2, State.INIT);
    createServiceComponent(clusterName, serviceName, componentName3, State.INIT);

    String host1 = "h1";

    String host2 = "h2";
    clusters.addHost(host2);
    setOsFamily(clusters.getHost("h2"), "redhat", "6.3");
    clusters.getHost("h2").persist();

    String host3 = "h3";

    addHost(host1, clusterName);

    createServiceComponentHost(clusterName, null, componentName1, host1, null);
    createServiceComponentHost(clusterName, serviceName, componentName2, host1, null);
    createServiceComponentHost(clusterName, serviceName, componentName3, host1, null);

    // Install
    installService(clusterName, serviceName, false, false);

    // make them believe they are up
    Map<String, ServiceComponentHost> hostComponents = cluster.getService(serviceName)
            .getServiceComponent(componentName1).getServiceComponentHosts();
    for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
        ServiceComponentHost cHost = entry.getValue();
        cHost.handleEvent(
                new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(),
                        System.currentTimeMillis(), cluster.getDesiredStackVersion().getStackId()));
        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(),
                cHost.getHostName(), System.currentTimeMillis()));
    }
    hostComponents = cluster.getService(serviceName).getServiceComponent(componentName2)
            .getServiceComponentHosts();
    for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
        ServiceComponentHost cHost = entry.getValue();
        cHost.handleEvent(
                new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(),
                        System.currentTimeMillis(), cluster.getDesiredStackVersion().getStackId()));
        cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(),
                cHost.getHostName(), System.currentTimeMillis()));
    }

    Set<HostRequest> requests = new HashSet<HostRequest>();
    // delete from cluster
    requests.clear();
    requests.add(new HostRequest(host1, clusterName, null));
    try {
        HostResourceProviderTest.deleteHosts(controller, requests);
        fail("Expect failure deleting hosts when components exist.");
    } catch (Exception e) {
    }

    Set<ServiceComponentHostRequest> schRequests = new HashSet<ServiceComponentHostRequest>();
    // disable HC for non-clients
    schRequests
            .add(new ServiceComponentHostRequest(clusterName, serviceName, componentName1, host1, "DISABLED"));
    schRequests
            .add(new ServiceComponentHostRequest(clusterName, serviceName, componentName2, host1, "DISABLED"));
    updateHostComponents(schRequests, new HashMap<String, String>(), false);

    // delete HC
    schRequests.clear();
    schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName1, host1, null));
    schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName2, host1, null));
    schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName3, host1, null));
    controller.deleteHostComponents(schRequests);

    Assert.assertEquals(0, cluster.getServiceComponentHosts(host1).size());

    // delete, which should fail since it is part of a cluster
    requests.clear();
    requests.add(new HostRequest(host1, null, null));
    try {
        HostResourceProviderTest.deleteHosts(controller, requests);
        fail("Expect failure when removing from host when it is part of a cluster.");
    } catch (Exception e) {
    }

    // delete host from cluster
    requests.clear();
    requests.add(new HostRequest(host1, clusterName, null));
    HostResourceProviderTest.deleteHosts(controller, requests);

    // host is no longer part of the cluster
    Assert.assertFalse(clusters.getHostsForCluster(clusterName).containsKey(host1));
    Assert.assertFalse(clusters.getClustersForHost(host1).contains(cluster));

    // delete entirely
    requests.clear();
    requests.add(new HostRequest(host1, null, null));
    HostResourceProviderTest.deleteHosts(controller, requests);

    // verify host does not exist
    try {
        clusters.getHost(host1);
        Assert.fail("Expected a HostNotFoundException.");
    } catch (HostNotFoundException e) {
        // expected
    }

    // remove host2
    requests.clear();
    requests.add(new HostRequest(host2, null, null));
    HostResourceProviderTest.deleteHosts(controller, requests);

    // verify host does not exist
    try {
        clusters.getHost(host2);
        Assert.fail("Expected a HostNotFoundException.");
    } catch (HostNotFoundException e) {
        // expected
    }

    // try removing a host that never existed
    requests.clear();
    requests.add(new HostRequest(host3, null, null));
    try {
        HostResourceProviderTest.deleteHosts(controller, requests);
        Assert.fail("Expected a HostNotFoundException trying to remove a host that was never added.");
    } catch (HostNotFoundException e) {
        // expected
    }

}

From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java

@Test
public void testServiceUpdateBasic() throws AmbariException {
    String clusterName = "foo1";
    createCluster(clusterName);/*from  w  w  w . ja va2 s.c  o m*/
    String serviceName = "HDFS";

    Map<String, String> mapRequestProps = new HashMap<String, String>();
    mapRequestProps.put("context", "Called from a test");

    clusters.getCluster("foo1").setDesiredStackVersion(new StackId("HDP-0.2"));
    createService(clusterName, serviceName, State.INIT);

    Service s = clusters.getCluster(clusterName).getService(serviceName);
    Assert.assertNotNull(s);
    Assert.assertEquals(serviceName, s.getName());
    Assert.assertEquals(State.INIT, s.getDesiredState());
    Assert.assertEquals(clusterName, s.getCluster().getClusterName());

    Set<ServiceRequest> reqs = new HashSet<ServiceRequest>();
    ServiceRequest r;

    try {
        r = new ServiceRequest(clusterName, serviceName, State.INSTALLING.toString());
        reqs.clear();
        reqs.add(r);
        ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
        fail("Expected fail for invalid state transition");
    } catch (Exception e) {
        // Expected
    }

    r = new ServiceRequest(clusterName, serviceName, State.INSTALLED.toString());
    reqs.clear();
    reqs.add(r);
    RequestStatusResponse trackAction = ServiceResourceProviderTest.updateServices(controller, reqs,
            mapRequestProps, true, false);
    Assert.assertNull(trackAction);
}

From source file:com.opengamma.masterdb.batch.DbBatchWriter.java

@SuppressWarnings("unchecked")
public synchronized void addJobResultsInTransaction(TransactionStatus transactionStatus, ObjectId runId,
        ViewComputationResultModel resultModel) {
    ArgumentChecker.notNull(runId, "runId");
    ArgumentChecker.notNull(resultModel, "resultModel");

    final long riskRunId = extractOid(runId);
    ArgumentChecker.notNull(riskRunId, "riskRunId");

    Map<ComputeFailureKey, ComputeFailure> computeFailureCache = _computeFailureCacheByRunId.get(riskRunId);
    Map<Pair<Long, Long>, StatusEntry> statusCache = _statusCacheByRunId.get(riskRunId);

    Map<ValueSpecification, BatchResultWriterFailure> errorCache = populateErrorCache(computeFailureCache,
            resultModel.getAllResults());

    RiskRun run = _riskRunsByIds.get(riskRunId);
    if (run.getSnapshotMode().equals(SnapshotMode.WRITE_THROUGH)) {
        addComputedValuesToMarketDataInTransaction(run.getMarketData().getObjectId(),
                resultModel.getAllMarketData());
    }// w  w w .  java 2 s.c  o m

    for (String calcConfigName : resultModel.getCalculationConfigurationNames()) {
        ViewCalculationResultModel viewCalculationResultModel = resultModel
                .getCalculationResult(calcConfigName);

        final Set<ComputationTargetSpecification> successfulTargets = newHashSet();
        final Set<ComputationTargetSpecification> failedTargets = newHashSet();

        List<SqlParameterSource> targetProperties = newArrayList();
        List<SqlParameterSource> successes = newArrayList();
        List<SqlParameterSource> failures = newArrayList();
        List<SqlParameterSource> failureReasons = newArrayList();

        Instant evalInstant = Instant.now();

        long calcConfId = _calculationConfigurations.get(calcConfigName);

        for (final ComputationTargetSpecification targetSpec : viewCalculationResultModel.getAllTargets()) {
            final long computationTargetId = _computationTargets.get(targetSpec);
            boolean specFailures = false;
            for (final ComputedValueResult computedValue : viewCalculationResultModel
                    .getAllValues(targetSpec)) {
                ResultConverter<Object> resultConverter = null;
                if (!(computedValue.getValue() instanceof MissingValue)) {
                    try {
                        resultConverter = (ResultConverter<Object>) _resultConverterCache
                                .getConverter(computedValue.getValue());
                    } catch (IllegalArgumentException e) {
                        s_logger.info("No converter for value of type " + computedValue.getValue().getClass()
                                + " for " + computedValue.getSpecification());
                    }
                }

                final ValueSpecification specification = computedValue.getSpecification();
                if (!_riskValueSpecifications.containsKey(specification)) {
                    s_logger.error("Unexpected result specification " + specification
                            + ". Result cannot be written. Result value was " + computedValue.getValue());
                    continue;
                }
                final long valueSpecificationId = _riskValueSpecifications.get(specification);
                final long functionUniqueId = getFunctionUniqueIdInTransaction(
                        specification.getFunctionUniqueId()).getId();
                final long computeNodeId = getOrCreateComputeNode(computedValue.getComputeNodeId()).getId();

                if (resultConverter != null
                        && computedValue.getInvocationResult() == InvocationResult.SUCCESS) {
                    s_logger.debug("Writing value {} for value spec {}", computedValue.getValue(),
                            specification);
                    Map<String, Double> valueAsDoublesMap = resultConverter
                            .convert(computedValue.getSpecification().getValueName(), computedValue.getValue());
                    for (Map.Entry<String, Double> valueEntry : valueAsDoublesMap.entrySet()) {
                        final String valueName = valueEntry.getKey();
                        final Double doubleValue = ensureDatabasePrecision(valueEntry.getValue());
                        final long successId = nextId(RSK_SEQUENCE_NAME);
                        successes.add(getSuccessArgs(successId, riskRunId, evalInstant, calcConfId,
                                computationTargetId, valueSpecificationId, functionUniqueId, computeNodeId,
                                valueName, doubleValue));
                    }
                } else {
                    s_logger.info("Writing failure for {} with invocation result {}, {} ",
                            newArray(computedValue.getSpecification(), computedValue.getInvocationResult(),
                                    computedValue.getAggregatedExecutionLog()));
                    specFailures = true;

                    final long failureId = nextId(RSK_SEQUENCE_NAME);
                    failures.add(getFailureArgs(failureId, riskRunId, evalInstant, calcConfId,
                            computationTargetId, valueSpecificationId, functionUniqueId, computeNodeId,
                            specification.getValueName()));

                    BatchResultWriterFailure cachedFailure = errorCache.get(specification);
                    if (cachedFailure != null) {
                        for (long computeFailureId : cachedFailure.getComputeFailureIds()) {
                            ArgumentChecker.notNull(computeFailureId, "computeFailureId");
                            final long failureReasonId = nextId(RSK_SEQUENCE_NAME);
                            failureReasons
                                    .add(getFailureReasonArgs(failureReasonId, failureId, computeFailureId));
                        }
                    }
                }
            }
            StatusEntry.Status status = getStatus(statusCache, calcConfigName, targetSpec);
            if (specFailures || status == StatusEntry.Status.FAILURE) {
                successfulTargets.remove(targetSpec);
                failedTargets.add(targetSpec);
            } else {
                successfulTargets.add(targetSpec);
            }

            // storing target data
            ComputationTarget computationTarget = _computationTargetResolver.resolve(targetSpec,
                    VersionCorrection.LATEST);
            Object targetValue = computationTarget.getValue();
            if (targetValue instanceof Bean) {
                Bean bean = (Bean) targetValue;
                for (String propertyName : bean.propertyNames()) {
                    Property<Object> property = bean.property(propertyName);
                    final long targetPropertyId = nextId(RSK_SEQUENCE_NAME);
                    targetProperties.add(getTargetPropertyArgs(targetPropertyId, computationTargetId,
                            propertyName, property.get() == null ? "NULL" : property.get().toString()));
                }
            }
        }

        if (successes.isEmpty() && failures.isEmpty() && failureReasons.isEmpty() && successfulTargets.isEmpty()
                && failedTargets.isEmpty()) {
            s_logger.debug("Nothing to write to DB for {}", resultModel);
            return;
        }

        Object preSuccessSavepoint = transactionStatus.createSavepoint();
        try {
            getJdbcTemplate().batchUpdate(getElSqlBundle().getSql("InsertRiskSuccess"),
                    successes.toArray(new DbMapSqlParameterSource[successes.size()]));
        } catch (Exception e) {
            s_logger.error("Failed to write successful calculations to batch database. Converting to failures.",
                    e);
            transactionStatus.rollbackToSavepoint(preSuccessSavepoint);
            if (!successes.isEmpty()) {
                String exceptionClass = e.getClass().getName();
                String exceptionMsg = e.getMessage();
                final StringBuilder buffer = new StringBuilder();
                for (StackTraceElement element : e.getStackTrace()) {
                    buffer.append(element.toString()).append("\n");
                }
                final String stackTrace = buffer.toString();
                for (SqlParameterSource success : successes) {
                    failures.add(convertSuccessToFailure(success));
                    final long failureId = getId(success);
                    final long functionId = getFunctionId(success);
                    ComputeFailureKey computeFailureKey = new ComputeFailureKey(String.valueOf(functionId),
                            exceptionClass, exceptionMsg, stackTrace);
                    ComputeFailure computeFailure = getComputeFailureFromDb(computeFailureCache,
                            computeFailureKey);
                    final long failureReasonId = nextId(RSK_SEQUENCE_NAME);
                    failureReasons
                            .add(getFailureReasonArgs(failureReasonId, failureId, computeFailure.getId()));
                }
                failedTargets.addAll(successfulTargets);
                successes.clear();
                successfulTargets.clear();
                targetProperties.clear();
            }
        }
        Object preTargetPropertiesFailureSavepoint = transactionStatus.createSavepoint();
        try {
            getJdbcTemplate().batchUpdate(getElSqlBundle().getSql("InsertTargetProperties"),
                    targetProperties.toArray(new DbMapSqlParameterSource[targetProperties.size()]));
        } catch (Exception e) {
            s_logger.error("Failed to write target properties to batch database", e);
            transactionStatus.rollbackToSavepoint(preTargetPropertiesFailureSavepoint);
        }
        Object preFailureSavepoint = transactionStatus.createSavepoint();
        try {
            getJdbcTemplate().batchUpdate(getElSqlBundle().getSql("InsertRiskFailure"),
                    failures.toArray(new DbMapSqlParameterSource[failures.size()]));
            getJdbcTemplate().batchUpdate(getElSqlBundle().getSql("InsertRiskFailureReason"),
                    failureReasons.toArray(new DbMapSqlParameterSource[failureReasons.size()]));
        } catch (Exception e) {
            s_logger.error("Failed to write failures to batch database", e);
            transactionStatus.rollbackToSavepoint(preFailureSavepoint);
        }

        updateStatusEntries(riskRunId, statusCache, calcConfigName, StatusEntry.Status.SUCCESS,
                successfulTargets);
        updateStatusEntries(riskRunId, statusCache, calcConfigName, StatusEntry.Status.FAILURE, failedTargets);
    }
}

From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java

@Test
public void testRcaOnJobtrackerHost() throws AmbariException {
    String clusterName = "foo1";
    createCluster(clusterName);/*from   w  w  w.j  av  a2 s  .c  o m*/
    Cluster cluster = clusters.getCluster(clusterName);
    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
    String serviceName = "MAPREDUCE";
    createService(clusterName, serviceName, null);
    String componentName1 = "JOBTRACKER";
    String componentName2 = "TASKTRACKER";
    String componentName3 = "MAPREDUCE_CLIENT";

    Map<String, String> mapRequestProps = new HashMap<String, String>();
    mapRequestProps.put("context", "Called from a test");

    createServiceComponent(clusterName, serviceName, componentName1, State.INIT);
    createServiceComponent(clusterName, serviceName, componentName2, State.INIT);
    createServiceComponent(clusterName, serviceName, componentName3, State.INIT);

    String host1 = "h1";
    String host2 = "h2";

    addHost(host1, clusterName);
    addHost(host2, clusterName);

    createServiceComponentHost(clusterName, serviceName, componentName1, host1, null);
    createServiceComponentHost(clusterName, serviceName, componentName2, host1, null);
    createServiceComponentHost(clusterName, serviceName, componentName2, host2, null);
    createServiceComponentHost(clusterName, serviceName, componentName3, host1, null);
    createServiceComponentHost(clusterName, serviceName, componentName3, host2, null);

    Map<String, String> configs = new HashMap<String, String>();
    configs.put("a", "b");
    configs.put("rca_enabled", "true");

    ClusterRequest cr = new ClusterRequest(cluster.getClusterId(), clusterName, null, null);
    cr.setDesiredConfig(
            Collections.singletonList(new ConfigurationRequest(clusterName, "global", "v1", configs, null)));
    controller.updateClusters(Collections.singleton(cr), Collections.<String, String>emptyMap());

    Set<ServiceRequest> sReqs = new HashSet<ServiceRequest>();
    Map<String, String> configVersions = new HashMap<String, String>();
    configVersions.put("global", "v1");
    sReqs.clear();
    sReqs.add(new ServiceRequest(clusterName, serviceName, State.INSTALLED.name()));
    RequestStatusResponse trackAction = ServiceResourceProviderTest.updateServices(controller, sReqs,
            mapRequestProps, true, false);
    List<Stage> stages = actionDB.getAllStages(trackAction.getRequestId());
    for (ExecutionCommandWrapper cmd : stages.get(0).getExecutionCommands(host1)) {
        assertEquals("true", cmd.getExecutionCommand().getConfigurations().get("global").get("rca_enabled"));
    }
    for (ExecutionCommandWrapper cmd : stages.get(0).getExecutionCommands(host2)) {
        assertEquals("false", cmd.getExecutionCommand().getConfigurations().get("global").get("rca_enabled"));
    }
}