List of usage examples for java.util Set clear
void clear();
From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java
@Ignore @Test//from w w w .j a va2 s . c o m public void testServiceComponentHostUpdateStackId() throws Exception { String clusterName = "foo1"; createCluster(clusterName); String serviceName1 = "HDFS"; createService(clusterName, serviceName1, null); String componentName1 = "NAMENODE"; String componentName2 = "DATANODE"; createServiceComponent(clusterName, serviceName1, componentName1, State.INIT); createServiceComponent(clusterName, serviceName1, componentName2, State.INIT); String host1 = "h1"; String host2 = "h2"; addHost(host1, clusterName); addHost(host2, clusterName); Set<ServiceComponentHostRequest> set1 = new HashSet<ServiceComponentHostRequest>(); ServiceComponentHostRequest r1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1, State.INIT.toString()); ServiceComponentHostRequest r2 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host2, State.INIT.toString()); ServiceComponentHostRequest r3 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName2, host1, State.INIT.toString()); set1.add(r1); set1.add(r2); set1.add(r3); controller.createHostComponents(set1); Cluster c1 = clusters.getCluster(clusterName); Service s1 = c1.getService(serviceName1); ServiceComponent sc1 = s1.getServiceComponent(componentName1); ServiceComponent sc2 = s1.getServiceComponent(componentName2); ServiceComponentHost sch1 = sc1.getServiceComponentHost(host1); ServiceComponentHost sch2 = sc1.getServiceComponentHost(host2); ServiceComponentHost sch3 = sc2.getServiceComponentHost(host1); s1.setDesiredState(State.INSTALLED); sc1.setDesiredState(State.INSTALLED); sc2.setDesiredState(State.INSTALLED); ServiceComponentHostRequest req1; ServiceComponentHostRequest req2; ServiceComponentHostRequest req3; Set<ServiceComponentHostRequest> reqs = new HashSet<ServiceComponentHostRequest>(); StackId newStack = new StackId("HDP-0.2"); StackId oldStack = new StackId("HDP-0.1"); c1.setCurrentStackVersion(newStack); c1.setDesiredStackVersion(newStack); sch1.setState(State.INSTALLED); sch2.setState(State.UPGRADING); sch1.setDesiredState(State.INSTALLED); sch2.setDesiredState(State.INSTALLED); sch1.setStackVersion(oldStack); sch2.setStackVersion(oldStack); sch1.setDesiredStackVersion(newStack); sch2.setDesiredStackVersion(oldStack); reqs.clear(); req1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1, State.INSTALLED.toString()); req1.setDesiredStackId("HDP-0.2"); reqs.add(req1); req2 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host2, State.INSTALLED.toString()); req2.setDesiredStackId("HDP-0.2"); reqs.add(req2); Map<String, String> mapRequestProps = new HashMap<String, String>(); mapRequestProps.put("context", "testServiceComponentHostUpdateStackId"); RequestStatusResponse resp = updateHostComponents(reqs, mapRequestProps, true); List<Stage> stages = actionDB.getAllStages(resp.getRequestId()); Assert.assertEquals(1, stages.size()); Assert.assertEquals(2, stages.get(0).getOrderedHostRoleCommands().size()); Assert.assertEquals("testServiceComponentHostUpdateStackId", stages.get(0).getRequestContext()); Assert.assertEquals(State.UPGRADING, sch1.getState()); Assert.assertEquals(State.UPGRADING, sch2.getState()); sch1.refresh(); Assert.assertTrue(sch1.getDesiredStackVersion().compareTo(newStack) == 0); sch2.refresh(); Assert.assertTrue(sch2.getDesiredStackVersion().compareTo(newStack) == 0); for (HostRoleCommand command : stages.get(0).getOrderedHostRoleCommands()) { ExecutionCommand execCommand = command.getExecutionCommandWrapper().getExecutionCommand(); Assert.assertTrue(execCommand.getCommandParams().containsKey("source_stack_version")); Assert.assertTrue(execCommand.getCommandParams().containsKey("target_stack_version")); Assert.assertEquals(RoleCommand.UPGRADE, execCommand.getRoleCommand()); } sch1.setState(State.INSTALLED); sch1.setDesiredState(State.INSTALLED); sch2.setState(State.UPGRADING); sch2.setDesiredState(State.INSTALLED); sch3.setState(State.UPGRADING); sch3.setDesiredState(State.INSTALLED); sch3.setStackVersion(oldStack); sch3.setDesiredStackVersion(newStack); reqs.clear(); req1 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host1, State.INSTALLED.toString()); req1.setDesiredStackId("HDP-0.2"); reqs.add(req1); req2 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName1, host2, State.INSTALLED.toString()); req2.setDesiredStackId("HDP-0.2"); reqs.add(req2); req3 = new ServiceComponentHostRequest(clusterName, serviceName1, componentName2, host1, State.INSTALLED.toString()); req3.setDesiredStackId("HDP-0.2"); reqs.add(req3); resp = updateHostComponents(reqs, Collections.<String, String>emptyMap(), true); stages = actionDB.getAllStages(resp.getRequestId()); Assert.assertEquals(2, stages.size()); Assert.assertEquals(2, stages.get(0).getOrderedHostRoleCommands().size()); Assert.assertEquals("", stages.get(0).getRequestContext()); Assert.assertEquals(State.UPGRADING, sch1.getState()); Assert.assertEquals(State.UPGRADING, sch2.getState()); Assert.assertEquals(State.UPGRADING, sch3.getState()); sch1.refresh(); Assert.assertTrue(sch1.getDesiredStackVersion().compareTo(newStack) == 0); sch2.refresh(); Assert.assertTrue(sch2.getDesiredStackVersion().compareTo(newStack) == 0); sch3.refresh(); Assert.assertTrue(sch3.getDesiredStackVersion().compareTo(newStack) == 0); for (Stage stage : stages) { for (HostRoleCommand command : stage.getOrderedHostRoleCommands()) { ExecutionCommand execCommand = command.getExecutionCommandWrapper().getExecutionCommand(); Assert.assertTrue(execCommand.getCommandParams().containsKey("source_stack_version")); Assert.assertTrue(execCommand.getCommandParams().containsKey("target_stack_version")); Assert.assertEquals("{\"stackName\":\"HDP\",\"stackVersion\":\"0.2\"}", execCommand.getCommandParams().get("target_stack_version")); Assert.assertEquals(RoleCommand.UPGRADE, execCommand.getRoleCommand()); } } }
From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java
@Test public void testDeleteComponentsOnHost() throws Exception { String clusterName = "foo1"; createCluster(clusterName);// w ww . j a v a 2 s. c o m Cluster cluster = clusters.getCluster(clusterName); cluster.setDesiredStackVersion(new StackId("HDP-0.1")); String serviceName = "HDFS"; createService(clusterName, serviceName, null); String componentName1 = "NAMENODE"; String componentName2 = "DATANODE"; String componentName3 = "HDFS_CLIENT"; createServiceComponent(clusterName, serviceName, componentName1, State.INIT); createServiceComponent(clusterName, serviceName, componentName2, State.INIT); createServiceComponent(clusterName, serviceName, componentName3, State.INIT); String host1 = "h1"; addHost(host1, clusterName); createServiceComponentHost(clusterName, null, componentName1, host1, null); createServiceComponentHost(clusterName, serviceName, componentName2, host1, null); createServiceComponentHost(clusterName, serviceName, componentName3, host1, null); // Install installService(clusterName, serviceName, false, false); // make them believe they are up Map<String, ServiceComponentHost> hostComponents = cluster.getService(serviceName) .getServiceComponent(componentName1).getServiceComponentHosts(); for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) { ServiceComponentHost cHost = entry.getValue(); cHost.handleEvent( new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), cluster.getDesiredStackVersion().getStackId())); cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis())); } hostComponents = cluster.getService(serviceName).getServiceComponent(componentName2) .getServiceComponentHosts(); for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) { ServiceComponentHost cHost = entry.getValue(); cHost.handleEvent( new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), cluster.getDesiredStackVersion().getStackId())); cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis())); } ServiceComponentHost sch = cluster.getService(serviceName).getServiceComponent(componentName2) .getServiceComponentHost(host1); Assert.assertNotNull(sch); sch.handleEvent(new ServiceComponentHostStartEvent(sch.getServiceComponentName(), sch.getHostName(), System.currentTimeMillis())); sch.handleEvent(new ServiceComponentHostStartedEvent(sch.getServiceComponentName(), sch.getHostName(), System.currentTimeMillis())); Set<ServiceComponentHostRequest> schRequests = new HashSet<ServiceComponentHostRequest>(); schRequests.add(new ServiceComponentHostRequest(clusterName, null, null, host1, null)); try { controller.deleteHostComponents(schRequests); fail("Expected exception while deleting all host components."); } catch (AmbariException e) { } Assert.assertEquals(3, cluster.getServiceComponentHosts(host1).size()); sch.handleEvent(new ServiceComponentHostStopEvent(sch.getServiceComponentName(), sch.getHostName(), System.currentTimeMillis())); sch.handleEvent(new ServiceComponentHostStoppedEvent(sch.getServiceComponentName(), sch.getHostName(), System.currentTimeMillis())); schRequests.clear(); // disable HC, DN was already stopped schRequests .add(new ServiceComponentHostRequest(clusterName, serviceName, componentName1, host1, "DISABLED")); updateHostComponents(schRequests, new HashMap<String, String>(), false); // delete HC schRequests.clear(); schRequests.add(new ServiceComponentHostRequest(clusterName, null, null, host1, null)); controller.deleteHostComponents(schRequests); Assert.assertEquals(0, cluster.getServiceComponentHosts(host1).size()); }
From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java
@Test public void testServiceStopWhileStopping() throws Exception { String clusterName = "foo1"; createCluster(clusterName);/*from w ww .j a v a2s . c om*/ clusters.getCluster(clusterName).setDesiredStackVersion(new StackId("HDP-0.1")); String serviceName = "HDFS"; createService(clusterName, serviceName, null); String componentName1 = "NAMENODE"; String componentName2 = "DATANODE"; String componentName3 = "HDFS_CLIENT"; createServiceComponent(clusterName, serviceName, componentName1, State.INIT); createServiceComponent(clusterName, serviceName, componentName2, State.INIT); createServiceComponent(clusterName, serviceName, componentName3, State.INIT); String host1 = "h1"; String host2 = "h2"; addHost(host1, clusterName); addHost(host2, clusterName); Map<String, String> mapRequestProps = new HashMap<String, String>(); mapRequestProps.put("context", "Called from a test"); // null service should work createServiceComponentHost(clusterName, null, componentName1, host1, null); createServiceComponentHost(clusterName, serviceName, componentName2, host1, null); createServiceComponentHost(clusterName, serviceName, componentName2, host2, null); createServiceComponentHost(clusterName, serviceName, componentName3, host1, null); createServiceComponentHost(clusterName, serviceName, componentName3, host2, null); Assert.assertNotNull(clusters.getCluster(clusterName).getService(serviceName) .getServiceComponent(componentName1).getServiceComponentHost(host1)); Assert.assertNotNull(clusters.getCluster(clusterName).getService(serviceName) .getServiceComponent(componentName2).getServiceComponentHost(host1)); Assert.assertNotNull(clusters.getCluster(clusterName).getService(serviceName) .getServiceComponent(componentName2).getServiceComponentHost(host2)); Assert.assertNotNull(clusters.getCluster(clusterName).getService(serviceName) .getServiceComponent(componentName3).getServiceComponentHost(host1)); Assert.assertNotNull(clusters.getCluster(clusterName).getService(serviceName) .getServiceComponent(componentName3).getServiceComponentHost(host2)); // Install ServiceRequest r = new ServiceRequest(clusterName, serviceName, State.INSTALLED.toString()); Set<ServiceRequest> requests = new HashSet<ServiceRequest>(); requests.add(r); ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false); Assert.assertEquals(State.INSTALLED, clusters.getCluster(clusterName).getService(serviceName).getDesiredState()); // manually change live state to installed as no running action manager for (ServiceComponent sc : clusters.getCluster(clusterName).getService(serviceName).getServiceComponents() .values()) { for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) { sch.setState(State.INSTALLED); } } // Start r = new ServiceRequest(clusterName, serviceName, State.STARTED.toString()); requests.clear(); requests.add(r); ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false); // manually change live state to started as no running action manager for (ServiceComponent sc : clusters.getCluster(clusterName).getService(serviceName).getServiceComponents() .values()) { for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) { if (!sch.getServiceComponentName().equals("HDFS_CLIENT")) { sch.setState(State.STARTED); } } } Assert.assertEquals(State.STARTED, clusters.getCluster(clusterName).getService(serviceName).getDesiredState()); // Set Current state to stopping clusters.getCluster(clusterName).getService(serviceName).setDesiredState(State.STOPPING); for (ServiceComponent sc : clusters.getCluster(clusterName).getService(serviceName).getServiceComponents() .values()) { for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) { if (!sch.getServiceComponentName().equals("HDFS_CLIENT")) { Assert.assertEquals(State.STARTED, sch.getDesiredState()); sch.setState(State.STOPPING); } else if (sch.getServiceComponentName().equals("DATANODE")) { ServiceComponentHostRequest r1 = new ServiceComponentHostRequest(clusterName, serviceName, sch.getServiceComponentName(), sch.getHostName(), State.INSTALLED.name()); Set<ServiceComponentHostRequest> reqs1 = new HashSet<ServiceComponentHostRequest>(); reqs1.add(r1); updateHostComponents(reqs1, Collections.<String, String>emptyMap(), true); Assert.assertEquals(State.INSTALLED, sch.getDesiredState()); } } } // Stop all services r = new ServiceRequest(clusterName, serviceName, State.INSTALLED.toString()); requests.clear(); requests.add(r); ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false); for (ServiceComponent sc : clusters.getCluster(clusterName).getService(serviceName).getServiceComponents() .values()) { for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) { if (!sch.getServiceComponentName().equals("HDFS_CLIENT")) { Assert.assertEquals(State.INSTALLED, sch.getDesiredState()); } } } }
From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java
@Test public void testUpdateConfigForRunningService() throws Exception { String clusterName = "foo1"; createCluster(clusterName);// w w w. j a v a2 s. c o m clusters.getCluster(clusterName).setDesiredStackVersion(new StackId("HDP-0.1")); String serviceName = "HDFS"; createService(clusterName, serviceName, null); String componentName1 = "NAMENODE"; String componentName2 = "DATANODE"; String componentName3 = "HDFS_CLIENT"; Map<String, String> mapRequestProps = new HashMap<String, String>(); mapRequestProps.put("context", "Called from a test"); createServiceComponent(clusterName, serviceName, componentName1, State.INIT); createServiceComponent(clusterName, serviceName, componentName2, State.INIT); createServiceComponent(clusterName, serviceName, componentName3, State.INIT); String host1 = "h1"; String host2 = "h2"; addHost(host1, clusterName); addHost(host2, clusterName); // null service should work createServiceComponentHost(clusterName, null, componentName1, host1, null); createServiceComponentHost(clusterName, serviceName, componentName2, host1, null); createServiceComponentHost(clusterName, serviceName, componentName2, host2, null); createServiceComponentHost(clusterName, serviceName, componentName3, host1, null); createServiceComponentHost(clusterName, serviceName, componentName3, host2, null); Assert.assertNotNull(clusters.getCluster(clusterName).getService(serviceName) .getServiceComponent(componentName1).getServiceComponentHost(host1)); Assert.assertNotNull(clusters.getCluster(clusterName).getService(serviceName) .getServiceComponent(componentName2).getServiceComponentHost(host1)); Assert.assertNotNull(clusters.getCluster(clusterName).getService(serviceName) .getServiceComponent(componentName2).getServiceComponentHost(host2)); Assert.assertNotNull(clusters.getCluster(clusterName).getService(serviceName) .getServiceComponent(componentName3).getServiceComponentHost(host1)); Assert.assertNotNull(clusters.getCluster(clusterName).getService(serviceName) .getServiceComponent(componentName3).getServiceComponentHost(host2)); // Install ServiceRequest r = new ServiceRequest(clusterName, serviceName, State.INSTALLED.toString()); Set<ServiceRequest> requests = new HashSet<ServiceRequest>(); requests.add(r); ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false); Assert.assertEquals(State.INSTALLED, clusters.getCluster(clusterName).getService(serviceName).getDesiredState()); // manually change live state to installed as no running action manager for (ServiceComponent sc : clusters.getCluster(clusterName).getService(serviceName).getServiceComponents() .values()) { for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) { sch.setState(State.INSTALLED); } } // Start r = new ServiceRequest(clusterName, serviceName, State.STARTED.toString()); requests.clear(); requests.add(r); ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false); // manually change live state to started as no running action manager for (ServiceComponent sc : clusters.getCluster(clusterName).getService(serviceName).getServiceComponents() .values()) { for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) { sch.setState(State.STARTED); } } Assert.assertEquals(State.STARTED, clusters.getCluster(clusterName).getService(serviceName).getDesiredState()); for (ServiceComponent sc : clusters.getCluster(clusterName).getService(serviceName).getServiceComponents() .values()) { if (sc.getName().equals("HDFS_CLIENT")) { Assert.assertEquals(State.INSTALLED, sc.getDesiredState()); } else { Assert.assertEquals(State.STARTED, sc.getDesiredState()); } for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) { if (sch.getServiceComponentName().equals("HDFS_CLIENT")) { Assert.assertEquals(State.INSTALLED, sch.getDesiredState()); } else { Assert.assertEquals(State.STARTED, sch.getDesiredState()); } } } Map<String, String> configs = new HashMap<String, String>(); configs.put("a", "b"); ConfigurationRequest cr1, cr2, cr3, cr4, cr5, cr6, cr7, cr8; cr1 = new ConfigurationRequest(clusterName, "typeA", "v1", configs, null); cr2 = new ConfigurationRequest(clusterName, "typeB", "v1", configs, null); cr3 = new ConfigurationRequest(clusterName, "typeC", "v1", configs, null); cr4 = new ConfigurationRequest(clusterName, "typeD", "v1", configs, null); cr5 = new ConfigurationRequest(clusterName, "typeA", "v2", configs, null); cr6 = new ConfigurationRequest(clusterName, "typeB", "v2", configs, null); cr7 = new ConfigurationRequest(clusterName, "typeC", "v2", configs, null); cr8 = new ConfigurationRequest(clusterName, "typeE", "v1", configs, null); controller.createConfiguration(cr1); controller.createConfiguration(cr2); controller.createConfiguration(cr3); controller.createConfiguration(cr4); controller.createConfiguration(cr5); controller.createConfiguration(cr6); controller.createConfiguration(cr7); controller.createConfiguration(cr8); Cluster cluster = clusters.getCluster(clusterName); Service s = cluster.getService(serviceName); ServiceComponent sc1 = s.getServiceComponent(componentName1); ServiceComponent sc2 = s.getServiceComponent(componentName2); ServiceComponentHost sch1 = sc1.getServiceComponentHost(host1); Set<ServiceComponentHostRequest> schReqs = new HashSet<ServiceComponentHostRequest>(); Set<ServiceComponentRequest> scReqs = new HashSet<ServiceComponentRequest>(); Set<ServiceRequest> sReqs = new HashSet<ServiceRequest>(); Map<String, String> configVersions = new HashMap<String, String>(); // update configs at SCH and SC level configVersions.clear(); configVersions.put("typeA", "v1"); configVersions.put("typeB", "v1"); configVersions.put("typeC", "v1"); schReqs.clear(); schReqs.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName1, host1, null)); Assert.assertNull(updateHostComponents(schReqs, Collections.<String, String>emptyMap(), true)); configVersions.clear(); configVersions.put("typeC", "v1"); configVersions.put("typeD", "v1"); scReqs.clear(); scReqs.add(new ServiceComponentRequest(clusterName, serviceName, componentName2, null)); Assert.assertNull(ComponentResourceProviderTest.updateComponents(controller, scReqs, Collections.<String, String>emptyMap(), true)); // update configs at service level configVersions.clear(); configVersions.put("typeA", "v2"); configVersions.put("typeC", "v2"); configVersions.put("typeE", "v1"); sReqs.clear(); sReqs.add(new ServiceRequest(clusterName, serviceName, null)); Assert.assertNull( ServiceResourceProviderTest.updateServices(controller, sReqs, mapRequestProps, true, false)); // update configs at SCH level configVersions.clear(); configVersions.put("typeA", "v1"); configVersions.put("typeB", "v1"); configVersions.put("typeC", "v1"); schReqs.clear(); schReqs.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName1, host1, null)); Assert.assertNull(updateHostComponents(schReqs, Collections.<String, String>emptyMap(), true)); // update configs at SC level configVersions.clear(); configVersions.put("typeC", "v2"); configVersions.put("typeD", "v1"); scReqs.clear(); scReqs.add(new ServiceComponentRequest(clusterName, serviceName, componentName1, null)); Assert.assertNull(ComponentResourceProviderTest.updateComponents(controller, scReqs, Collections.<String, String>emptyMap(), true)); }
From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java
@Test public void testClientServiceSmokeTests() throws AmbariException { String clusterName = "foo1"; createCluster(clusterName);// ww w . j ava 2 s.c om clusters.getCluster(clusterName).setDesiredStackVersion(new StackId("HDP-0.1")); String serviceName = "PIG"; createService(clusterName, serviceName, null); String componentName1 = "PIG"; createServiceComponent(clusterName, serviceName, componentName1, State.INIT); String host1 = "h1"; String host2 = "h2"; addHost(host1, clusterName); addHost(host2, clusterName); Map<String, String> mapRequestProps = new HashMap<String, String>(); mapRequestProps.put("context", "Called from a test"); // null service should work createServiceComponentHost(clusterName, null, componentName1, host1, null); createServiceComponentHost(clusterName, null, componentName1, host2, null); ServiceRequest r = new ServiceRequest(clusterName, serviceName, State.INSTALLED.toString()); Set<ServiceRequest> requests = new HashSet<ServiceRequest>(); requests.add(r); RequestStatusResponse trackAction = ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false); Assert.assertEquals(State.INSTALLED, clusters.getCluster(clusterName).getService(serviceName).getDesiredState()); for (ServiceComponent sc : clusters.getCluster(clusterName).getService(serviceName).getServiceComponents() .values()) { Assert.assertEquals(State.INSTALLED, sc.getDesiredState()); for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) { Assert.assertEquals(State.INSTALLED, sch.getDesiredState()); Assert.assertEquals(State.INIT, sch.getState()); } } List<ShortTaskStatus> taskStatuses = trackAction.getTasks(); Assert.assertEquals(2, taskStatuses.size()); List<Stage> stages = actionDB.getAllStages(trackAction.getRequestId()); Assert.assertEquals(1, stages.size()); Assert.assertEquals("Called from a test", stages.get(0).getRequestContext()); for (ServiceComponent sc : clusters.getCluster(clusterName).getService(serviceName).getServiceComponents() .values()) { for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) { sch.setState(State.INSTALLED); } } r = new ServiceRequest(clusterName, serviceName, State.STARTED.toString()); requests.clear(); requests.add(r); injector.getInstance(ActionMetadata.class).addServiceCheckAction("PIG"); trackAction = ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false); Assert.assertNotNull(trackAction); Assert.assertEquals(State.INSTALLED, clusters.getCluster(clusterName).getService(serviceName).getDesiredState()); for (ServiceComponent sc : clusters.getCluster(clusterName).getService(serviceName).getServiceComponents() .values()) { Assert.assertEquals(State.INSTALLED, sc.getDesiredState()); for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) { Assert.assertEquals(State.INSTALLED, sch.getDesiredState()); Assert.assertEquals(State.INSTALLED, sch.getState()); } } stages = actionDB.getAllStages(trackAction.getRequestId()); for (Stage s : stages) { LOG.info("Stage dump : " + s.toString()); } Assert.assertEquals(1, stages.size()); taskStatuses = trackAction.getTasks(); Assert.assertEquals(1, taskStatuses.size()); Assert.assertEquals(Role.PIG_SERVICE_CHECK.toString(), taskStatuses.get(0).getRole()); }
From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java
@Test public void testDisableAndDeleteStates() throws Exception { Map<String, String> mapRequestProps = new HashMap<String, String>(); Injector injector = Guice.createInjector(new AbstractModule() { @Override/* w ww . j av a 2 s . c om*/ protected void configure() { Properties properties = new Properties(); properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory"); properties.setProperty(Configuration.METADETA_DIR_PATH, "src/test/resources/stacks"); properties.setProperty(Configuration.SERVER_VERSION_FILE, "src/test/resources/version"); properties.setProperty(Configuration.OS_VERSION_KEY, "centos5"); properties.setProperty(Configuration.SHARED_RESOURCES_DIR_KEY, "src/test/resources/"); try { install(new ControllerModule(properties)); // ambari events interfere with the workflow of this test bind(AmbariEventPublisher.class).toInstance(EasyMock.createMock(AmbariEventPublisher.class)); } catch (Exception e) { throw new RuntimeException(e); } } }); injector.getInstance(GuiceJpaInitializer.class); try { AmbariManagementController amc = injector.getInstance(AmbariManagementController.class); Clusters clusters = injector.getInstance(Clusters.class); Gson gson = new Gson(); clusters.addHost("host1"); clusters.addHost("host2"); clusters.addHost("host3"); Host host = clusters.getHost("host1"); setOsFamily(host, "redhat", "5.9"); host.persist(); host = clusters.getHost("host2"); setOsFamily(host, "redhat", "5.9"); host.persist(); host = clusters.getHost("host3"); setOsFamily(host, "redhat", "5.9"); host.persist(); ClusterRequest clusterRequest = new ClusterRequest(null, "c1", "HDP-1.2.0", null); amc.createCluster(clusterRequest); Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>(); serviceRequests.add(new ServiceRequest("c1", "HDFS", null)); serviceRequests.add(new ServiceRequest("c1", "HIVE", null)); ServiceResourceProviderTest.createServices(amc, serviceRequests); Type confType = new TypeToken<Map<String, String>>() { }.getType(); ConfigurationRequest configurationRequest = new ConfigurationRequest("c1", "core-site", "version1", gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType), null); amc.createConfiguration(configurationRequest); configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version1", gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType), null); amc.createConfiguration(configurationRequest); configurationRequest = new ConfigurationRequest("c1", "global", "version1", gson.<Map<String, String>>fromJson("{ \"hive.server2.enable.doAs\" : \"true\"}", confType), null); amc.createConfiguration(configurationRequest); Assert.assertTrue(clusters.getCluster("c1").getDesiredConfigs().containsKey("hive-site")); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", null)); ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false); Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>(); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null)); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null)); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null)); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null)); ComponentResourceProviderTest.createComponents(amc, serviceComponentRequests); Set<HostRequest> hostRequests = new HashSet<HostRequest>(); hostRequests.add(new HostRequest("host1", "c1", null)); hostRequests.add(new HostRequest("host2", "c1", null)); hostRequests.add(new HostRequest("host3", "c1", null)); HostResourceProviderTest.createHosts(amc, hostRequests); Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null)); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null)); componentHostRequests .add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null)); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null)); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null)); amc.createHostComponents(componentHostRequests); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", "INSTALLED")); ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false); Cluster cluster = clusters.getCluster("c1"); Map<String, ServiceComponentHost> namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE") .getServiceComponentHosts(); org.junit.Assert.assertEquals(1, namenodes.size()); ServiceComponentHost componentHost = namenodes.get("host1"); Map<String, ServiceComponentHost> hostComponents = cluster.getService("HDFS") .getServiceComponent("DATANODE").getServiceComponentHosts(); for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) { ServiceComponentHost cHost = entry.getValue(); cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis())); } hostComponents = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts(); for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) { ServiceComponentHost cHost = entry.getValue(); cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis())); } hostComponents = cluster.getService("HDFS").getServiceComponent("SECONDARY_NAMENODE") .getServiceComponentHosts(); for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) { ServiceComponentHost cHost = entry.getValue(); cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis())); } componentHostRequests.clear(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", "DISABLED")); updateHostComponents(amc, componentHostRequests, mapRequestProps, true); Assert.assertEquals(State.DISABLED, componentHost.getState()); componentHostRequests.clear(); componentHostRequests .add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", "INSTALLED")); updateHostComponents(amc, componentHostRequests, mapRequestProps, true); Assert.assertEquals(State.INSTALLED, componentHost.getState()); componentHostRequests.clear(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", "DISABLED")); updateHostComponents(amc, componentHostRequests, mapRequestProps, true); Assert.assertEquals(State.DISABLED, componentHost.getState()); componentHostRequests.clear(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null)); amc.createHostComponents(componentHostRequests); componentHostRequests.clear(); componentHostRequests .add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", "INSTALLED")); updateHostComponents(amc, componentHostRequests, mapRequestProps, true); namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts(); Assert.assertEquals(2, namenodes.size()); componentHost = namenodes.get("host2"); componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); componentHost .handleEvent(new ServiceComponentHostOpSucceededEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis())); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", "STARTED")); RequestStatusResponse response = ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false); for (ShortTaskStatus shortTaskStatus : response.getTasks()) { assertFalse("host1".equals(shortTaskStatus.getHostName()) && "NAMENODE".equals(shortTaskStatus.getRole())); } componentHostRequests.clear(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null)); amc.deleteHostComponents(componentHostRequests); namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts(); org.junit.Assert.assertEquals(1, namenodes.size()); // testing the behavior for runSmokeTest flag // piggybacking on this test to avoid setting up the mock cluster testRunSmokeTestFlag(mapRequestProps, amc, serviceRequests); // should be able to add the host component back componentHostRequests.clear(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null)); amc.createHostComponents(componentHostRequests); namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts(); assertEquals(2, namenodes.size()); // make INSTALLED again componentHost = namenodes.get("host1"); componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); componentHost .handleEvent(new ServiceComponentHostOpSucceededEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis())); componentHostRequests.clear(); componentHostRequests .add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", "INSTALLED")); updateHostComponents(amc, componentHostRequests, mapRequestProps, true); assertEquals(State.INSTALLED, namenodes.get("host1").getState()); // make unknown ServiceComponentHost sch = null; for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) { if (tmp.getServiceComponentName().equals("DATANODE")) { tmp.setState(State.UNKNOWN); sch = tmp; } } assertNotNull(sch); // make disabled componentHostRequests.clear(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", "DISABLED")); updateHostComponents(amc, componentHostRequests, mapRequestProps, false); org.junit.Assert.assertEquals(State.DISABLED, sch.getState()); // State should not be changed if componentHostRequests are empty componentHostRequests.clear(); mapRequestProps.put(RequestOperationLevel.OPERATION_CLUSTER_ID, "c1"); updateHostComponents(amc, componentHostRequests, mapRequestProps, false); org.junit.Assert.assertEquals(State.DISABLED, sch.getState()); mapRequestProps.clear(); // ServiceComponentHost remains in disabled after service stop assertEquals(sch.getServiceComponentName(), "DATANODE"); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", "INSTALLED")); ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false); assertEquals(State.DISABLED, sch.getState()); // ServiceComponentHost remains in disabled after service start serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", "STARTED")); ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false); assertEquals(State.DISABLED, sch.getState()); // confirm delete componentHostRequests.clear(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null)); amc.deleteHostComponents(componentHostRequests); sch = null; for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) { if (tmp.getServiceComponentName().equals("DATANODE")) { sch = tmp; } } org.junit.Assert.assertNull(sch); /* *Test remove service */ serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", "INSTALLED")); ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", null, null)); org.junit.Assert.assertEquals(2, ServiceResourceProviderTest.getServices(amc, serviceRequests).size()); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", null)); serviceRequests.add(new ServiceRequest("c1", "HIVE", null)); ServiceResourceProviderTest.deleteServices(amc, serviceRequests); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", null, null)); org.junit.Assert.assertEquals(0, ServiceResourceProviderTest.getServices(amc, serviceRequests).size()); /* *Test add service again */ serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", null)); ServiceResourceProviderTest.createServices(amc, serviceRequests); org.junit.Assert.assertEquals(1, ServiceResourceProviderTest.getServices(amc, serviceRequests).size()); //Create new configs configurationRequest = new ConfigurationRequest("c1", "core-site", "version2", gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType), null); amc.createConfiguration(configurationRequest); configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version2", gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType), null); amc.createConfiguration(configurationRequest); configurationRequest = new ConfigurationRequest("c1", "global", "version2", gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType), null); amc.createConfiguration(configurationRequest); //Add configs to service serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", null)); ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false); //Crate service components serviceComponentRequests = new HashSet<ServiceComponentRequest>(); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null)); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null)); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null)); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null)); ComponentResourceProviderTest.createComponents(amc, serviceComponentRequests); //Create ServiceComponentHosts componentHostRequests = new HashSet<ServiceComponentHostRequest>(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null)); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null)); componentHostRequests .add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null)); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null)); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null)); amc.createHostComponents(componentHostRequests); namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts(); org.junit.Assert.assertEquals(1, namenodes.size()); Map<String, ServiceComponentHost> datanodes = cluster.getService("HDFS").getServiceComponent("DATANODE") .getServiceComponentHosts(); org.junit.Assert.assertEquals(3, datanodes.size()); Map<String, ServiceComponentHost> namenodes2 = cluster.getService("HDFS") .getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts(); org.junit.Assert.assertEquals(1, namenodes2.size()); } finally { injector.getInstance(PersistService.class).stop(); } }
From source file:org.apache.ambari.server.controller.AmbariManagementControllerTest.java
@Test public void testScheduleSmokeTest() throws Exception { final String HOST1 = "host1"; final String OS_TYPE = "centos5"; final String STACK_ID = "HDP-2.0.1"; final String CLUSTER_NAME = "c1"; final String HDFS_SERVICE_CHECK_ROLE = "HDFS_SERVICE_CHECK"; final String MAPREDUCE2_SERVICE_CHECK_ROLE = "MAPREDUCE2_SERVICE_CHECK"; final String YARN_SERVICE_CHECK_ROLE = "YARN_SERVICE_CHECK"; Map<String, String> mapRequestProps = Collections.emptyMap(); Injector injector = Guice.createInjector(new AbstractModule() { @Override/* ww w . j a va2 s .c om*/ protected void configure() { Properties properties = new Properties(); properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory"); properties.setProperty(Configuration.METADETA_DIR_PATH, "src/test/resources/stacks"); properties.setProperty(Configuration.SERVER_VERSION_FILE, "../version"); properties.setProperty(Configuration.OS_VERSION_KEY, OS_TYPE); properties.setProperty(Configuration.SHARED_RESOURCES_DIR_KEY, "src/test/resources/"); try { install(new ControllerModule(properties)); } catch (Exception e) { throw new RuntimeException(e); } } }); injector.getInstance(GuiceJpaInitializer.class); try { AmbariManagementController amc = injector.getInstance(AmbariManagementController.class); Clusters clusters = injector.getInstance(Clusters.class); clusters.addHost(HOST1); Host host = clusters.getHost(HOST1); setOsFamily(host, "redhat", "5.9"); host.persist(); ClusterRequest clusterRequest = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null); amc.createCluster(clusterRequest); Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>(); serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null)); serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null)); serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null)); ServiceResourceProviderTest.createServices(amc, serviceRequests); Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>(); serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "NAMENODE", null)); serviceComponentRequests .add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "SECONDARY_NAMENODE", null)); serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "DATANODE", null)); serviceComponentRequests .add(new ServiceComponentRequest(CLUSTER_NAME, "MAPREDUCE2", "HISTORYSERVER", null)); serviceComponentRequests .add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "RESOURCEMANAGER", null)); serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "YARN", "NODEMANAGER", null)); ComponentResourceProviderTest.createComponents(amc, serviceComponentRequests); Set<HostRequest> hostRequests = new HashSet<HostRequest>(); hostRequests.add(new HostRequest(HOST1, CLUSTER_NAME, null)); HostResourceProviderTest.createHosts(amc, hostRequests); Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>(); componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "DATANODE", HOST1, null)); componentHostRequests.add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NAMENODE", HOST1, null)); componentHostRequests .add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "SECONDARY_NAMENODE", HOST1, null)); componentHostRequests .add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "HISTORYSERVER", HOST1, null)); componentHostRequests .add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "RESOURCEMANAGER", HOST1, null)); componentHostRequests .add(new ServiceComponentHostRequest(CLUSTER_NAME, null, "NODEMANAGER", HOST1, null)); amc.createHostComponents(componentHostRequests); //Install services serviceRequests.clear(); serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", State.INSTALLED.name())); serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", State.INSTALLED.name())); serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", State.INSTALLED.name())); ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false); Cluster cluster = clusters.getCluster(CLUSTER_NAME); for (String serviceName : cluster.getServices().keySet()) { for (String componentName : cluster.getService(serviceName).getServiceComponents().keySet()) { Map<String, ServiceComponentHost> serviceComponentHosts = cluster.getService(serviceName) .getServiceComponent(componentName).getServiceComponentHosts(); for (Map.Entry<String, ServiceComponentHost> entry : serviceComponentHosts.entrySet()) { ServiceComponentHost cHost = entry.getValue(); cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), STACK_ID)); cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis())); } } } //Start services serviceRequests.clear(); serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", State.STARTED.name())); serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", State.STARTED.name())); serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", State.STARTED.name())); RequestStatusResponse response = ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false); Collection<?> hdfsSmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(HDFS_SERVICE_CHECK_ROLE)); //Ensure that smoke test task was created for HDFS org.junit.Assert.assertEquals(1, hdfsSmokeTasks.size()); Collection<?> mapreduce2SmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(MAPREDUCE2_SERVICE_CHECK_ROLE)); //Ensure that smoke test task was created for MAPREDUCE2 org.junit.Assert.assertEquals(1, mapreduce2SmokeTasks.size()); Collection<?> yarnSmokeTasks = CollectionUtils.select(response.getTasks(), new RolePredicate(YARN_SERVICE_CHECK_ROLE)); //Ensure that smoke test task was created for YARN org.junit.Assert.assertEquals(1, yarnSmokeTasks.size()); } finally { injector.getInstance(PersistService.class).stop(); } }
From source file:org.atomserver.core.AbstractAtomCollection.java
/** * {@inheritDoc}//from w ww . j ava2 s .co m */ public java.util.Collection<UpdateCreateOrDeleteEntry> updateEntries(final RequestContext request) throws AtomServerException { Document<Feed> document; try { document = request.getDocument(); } catch (IOException e) { throw new AtomServerException(e); } if (document.getRoot().getEntries().size() > getMaxFullEntriesPerPage()) { throw new BadRequestException(MessageFormat.format("too many entries ({0}) in batch - max is {1}", document.getRoot().getEntries().size(), getMaxFullEntriesPerPage())); } final List<EntryTarget> entriesToUpdate = new ArrayList<EntryTarget>(); final List<EntryTarget> entriesToDelete = new ArrayList<EntryTarget>(); final EntryMap<String> entryXmlMap = new EntryMap<String>(); final Map<EntryTarget, Entry> entryMap = new HashMap<EntryTarget, Entry>(); final HashMap<EntryTarget, Integer> orderMap = new HashMap<EntryTarget, Integer>(); Operation defaultOperationExtension = document.getRoot().getExtension(AtomServerConstants.OPERATION); String defaultOperation = defaultOperationExtension == null ? "update" : defaultOperationExtension.getType(); List<Entry> entries = document.getRoot().getEntries(); UpdateCreateOrDeleteEntry[] updateEntries = new UpdateCreateOrDeleteEntry[entries.size()]; Set<RelaxedEntryTarget> relaxedEntryTargetSet = new HashSet<RelaxedEntryTarget>(); int order = 0; for (Entry entry : entries) { try { IRI baseIri = new IRI(getServiceBaseUri()); IRI iri = baseIri.relativize(entry.getLink("edit").getHref()); EntryTarget entryTarget = null; try { // The request is always as PUT, so we will get back a FeedTarget when we want an insert URITarget uriTarget = getURIHandler().parseIRI(request, iri); if (uriTarget instanceof FeedTarget) { entryTarget = new EntryTarget((FeedTarget) uriTarget); // determine if we are creating the entryId -- i.e. if this was a POST if (getEntryIdGenerator() == null) { throw new AtomServerException("No EntryIdGenerator was wired into the Collection (" + entryTarget.toString() + ")"); } else { entryTarget.setEntryId(getEntryIdGenerator().generateId()); } } else { entryTarget = (EntryTarget) uriTarget; } } catch (Exception e) { throw new BadRequestException("Bad request URI: " + iri, e); } if (entryTarget == null) { throw new BadRequestException("Bad request URI: " + iri); } String collection = entryTarget.getCollection(); ensureCollectionExists(collection); // Verify that we do not have multiple <operation> elements List<Operation> operationExtensions = entry.getExtensions(AtomServerConstants.OPERATION); if (operationExtensions != null && operationExtensions.size() > 1) { throw new BadRequestException("Multiple operations applied to one entry"); } // Set to the default operation if none is set. String operation = operationExtensions == null || operationExtensions.isEmpty() ? defaultOperation : operationExtensions.get(0).getType(); if (log.isDebugEnabled()) { log.debug("operation : " + operation); } // We do not allow an Entry to occur twice in the batch. // NOTE: the first one wins !! RelaxedEntryTarget relaxedEntryTarget = new RelaxedEntryTarget(entryTarget); if (relaxedEntryTargetSet.contains(relaxedEntryTarget)) { throw new BadRequestException( "You may not include the same Entry twice (" + entryTarget + ")."); } else { relaxedEntryTargetSet.add(relaxedEntryTarget); } entryMap.put(entryTarget, entry); // Add to the processing lists. if ("delete".equalsIgnoreCase(operation)) { entriesToDelete.add(entryTarget); orderMap.put(entryTarget, order); } else if ("update".equalsIgnoreCase(operation) || "insert".equalsIgnoreCase(operation)) { String entryXml = validateAndPreprocessEntryContents(entry, entryTarget); entriesToUpdate.add(entryTarget); entryXmlMap.put(entryTarget, entryXml); orderMap.put(entryTarget, order); setTargetContentHashCode(entryTarget, entry, entryXml); } } catch (AtomServerException e) { UpdateCreateOrDeleteEntry.CreateOrUpdateEntry updateEntry = new UpdateCreateOrDeleteEntry.CreateOrUpdateEntry( entry, false); updateEntry.setException(e); updateEntries[order] = updateEntry; } order++; } // update entry count if (getEntriesMonitor() != null) { getEntriesMonitor().updateNumberOfEntriesToUpdate(entries.size()); } Abdera abdera = request.getServiceContext().getAbdera(); // ---------------- process updates ------------------ if (!entriesToUpdate.isEmpty()) { java.util.Collection<BatchEntryResult> results = executeTransactionally( new TransactionalTask<java.util.Collection<BatchEntryResult>>() { public Collection<BatchEntryResult> execute() { java.util.Collection<BatchEntryResult> results = modifyEntries(request, entriesToUpdate); for (BatchEntryResult result : results) { boolean categoriesUpdated = false; if (result.getMetaData() != null) { categoriesUpdated = postProcessEntryContents( entryXmlMap.get(result.getMetaData()), result.getMetaData()); } if (!result.isModified() && !categoriesUpdated) { // Same contents and categories if (getEntriesMonitor() != null) { getEntriesMonitor().updateNumberOfEntriesNotUpdatedDueToSameContent(1); } continue; } // if contents is the same but the categories have changed, // go back and update the entry so that it'll have a new revision and timestamp. if (!result.isModified()) { EntryMetaDataStatus mdStatus = reModifyEntry(null, result.getEntryTarget()); // update the result to indicate Entry has been modified. result.setMetaData(mdStatus.getEntryMetaData()); result.setModified(true); } if (result.getException() == null) { String entryXml = entryXmlMap.get(result.getEntryTarget()); getContentStorage().putContent(entryXml, result.getMetaData()); } if (getEntriesMonitor() != null) { getEntriesMonitor().updateNumberOfEntriesActuallyUpdated(1); } } return results; } }); for (BatchEntryResult result : results) { EntryMetaData metaData = result.getMetaData(); if (metaData == null) { EntryTarget target = result.getEntryTarget().cloneWithNewRevision(URIHandler.REVISION_OVERRIDE); try { metaData = getEntry(target); } catch (AtomServerException e) { metaData = null; } } Entry entry = metaData == null ? newEntryWithCommonContentOnly(abdera, result.getEntryTarget()) : newEntry(abdera, metaData, EntryType.full); entry.addSimpleExtension(AtomServerConstants.ENTRY_UPDATED, (result.isModified()) ? "true" : "false"); if (metaData != null && metaData.getContentHashCode() != null) { entry.addSimpleExtension(AtomServerConstants.CONTENT_HASH, metaData.getContentHashCode()); } UpdateCreateOrDeleteEntry.CreateOrUpdateEntry updateEntry = new UpdateCreateOrDeleteEntry.CreateOrUpdateEntry( entry, metaData != null && metaData.isNewlyCreated()); if (result.getException() != null) { updateEntry.setException(result.getException()); } Integer listOrder = orderMap.get(result.getEntryTarget()); if (listOrder == null) { // This should never happen.... String msg = "Could not map (" + result.getEntryTarget() + ") in Batch Order Map"; log.error(msg); throw new AtomServerException(msg); } updateEntries[listOrder] = updateEntry; } } // ---------------- process deletes ------------------ if (!entriesToDelete.isEmpty()) { java.util.Collection<BatchEntryResult> results = executeTransactionally( new TransactionalTask<Collection<BatchEntryResult>>() { public Collection<BatchEntryResult> execute() { java.util.Collection<BatchEntryResult> results = deleteEntries(request, entriesToDelete); for (BatchEntryResult result : results) { if (result.getException() == null) { EntryMetaData entryMetaDataClone = (EntryMetaData) (result.getMetaData() .clone()); int currentRevision = result.getMetaData().getRevision(); entryMetaDataClone.setRevision((currentRevision - 1)); String deletedEntryXml = createDeletedEntryXML(entryMetaDataClone); getContentStorage().deleteContent(deletedEntryXml, result.getMetaData()); } } return results; } }); for (BatchEntryResult result : results) { // TODO: WRONG! EntryMetaData metaData = result.getMetaData(); UpdateCreateOrDeleteEntry.DeleteEntry deleteEntry = null; if (metaData == null) { Factory factory = AtomServer.getFactory(abdera); Entry entry = factory.newEntry(); String workspace = result.getEntryTarget().getWorkspace(); String collection = result.getEntryTarget().getCollection(); String entryId = result.getEntryTarget().getEntryId(); Locale locale = result.getEntryTarget().getLocale(); String fileURI = getURIHandler().constructURIString(workspace, collection, entryId, locale); setEntryId(factory, entry, fileURI); setEntryTitle(factory, entry, isLocalized() ? (" Entry: " + collection + " " + entryId + "." + locale) : (" Entry: " + collection + " " + entryId)); addAuthorToEntry(factory, entry, "AtomServer APP Service"); addLinkToEntry(factory, entry, fileURI, "self"); String editURL = fileURI + "/" + (result.getEntryTarget().getRevision() + 1); addLinkToEntry(factory, entry, editURL, "edit"); deleteEntry = new UpdateCreateOrDeleteEntry.DeleteEntry(entry); } else { deleteEntry = new UpdateCreateOrDeleteEntry.DeleteEntry( newEntry(abdera, metaData, EntryType.full)); } if (result.getException() != null) { deleteEntry.setException(result.getException()); } Integer listOrder = orderMap.get(result.getEntryTarget()); if (listOrder == null) { // This should never happen.... String msg = "Could not map (" + result.getEntryTarget() + ") in Batch Order Map"; log.error(msg); throw new AtomServerException(msg); } updateEntries[listOrder] = deleteEntry; } } // Clear the maps to help out the Garbage Collector entryXmlMap.clear(); entriesToUpdate.clear(); entriesToDelete.clear(); orderMap.clear(); relaxedEntryTargetSet.clear(); return Arrays.asList(updateEntries); }
From source file:net.countercraft.movecraft.async.translation.TranslationTask.java
@Override public void excecute() { MovecraftLocation[] blocksList = data.getBlockList(); final int[] fallThroughBlocks = new int[] { 0, 8, 9, 10, 11, 31, 37, 38, 39, 40, 50, 51, 55, 59, 63, 65, 68, 69, 70, 72, 75, 76, 77, 78, 83, 85, 93, 94, 111, 141, 142, 143, 171 }; // blockedByWater=false means an ocean-going vessel boolean waterCraft = !getCraft().getType().blockedByWater(); boolean hoverCraft = getCraft().getType().getCanHover(); boolean airCraft = getCraft().getType().blockedByWater(); int hoverLimit = getCraft().getType().getHoverLimit(); Player craftPilot = CraftManager.getInstance().getPlayerFromCraft(getCraft()); int[][][] hb = getCraft().getHitBox(); if (hb == null) return;/* w ww . ja va 2s. c o m*/ // start by finding the crafts borders int minY = 65535; int maxY = -65535; for (int[][] i1 : hb) { for (int[] i2 : i1) { if (i2 != null) { if (i2[0] < minY) { minY = i2[0]; } if (i2[1] > maxY) { maxY = i2[1]; } } } } int maxX = getCraft().getMinX() + hb.length; int maxZ = getCraft().getMinZ() + hb[0].length; // safe because if the first x array doesn't have a z array, then it wouldn't be the first x array int minX = getCraft().getMinX(); int minZ = getCraft().getMinZ(); // treat sinking crafts specially if (getCraft().getSinking()) { waterCraft = true; hoverCraft = false; } if (getCraft().getDisabled() && (!getCraft().getSinking())) { fail(String.format(I18nSupport.getInternationalisedString("Craft is disabled!"))); } // check the maxheightaboveground limitation, move 1 down if that limit is exceeded if (getCraft().getType().getMaxHeightAboveGround() > 0 && data.getDy() >= 0) { int x = getCraft().getMaxX() + getCraft().getMinX(); x = x >> 1; int y = getCraft().getMaxY(); int z = getCraft().getMaxZ() + getCraft().getMinZ(); z = z >> 1; int cy = getCraft().getMinY(); boolean done = false; while (!done) { cy = cy - 1; if (getCraft().getW().getBlockTypeIdAt(x, cy, z) != 0) done = true; if (cy <= 1) done = true; } if (y - cy > getCraft().getType().getMaxHeightAboveGround()) { data.setDy(-1); } } // Find the waterline from the surrounding terrain or from the static level in the craft type int waterLine = 0; if (waterCraft) { if (getCraft().getType().getStaticWaterLevel() != 0) { if (waterLine <= maxY + 1) { waterLine = getCraft().getType().getStaticWaterLevel(); } } else { // figure out the water level by examining blocks next to the outer boundaries of the craft for (int posY = maxY + 1; (posY >= minY - 1) && (waterLine == 0); posY--) { int numWater = 0; int numAir = 0; int posX; int posZ; posZ = minZ - 1; for (posX = minX - 1; (posX <= maxX + 1) && (waterLine == 0); posX++) { int typeID = getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId(); if (typeID == 9) numWater++; if (typeID == 0) numAir++; } posZ = maxZ + 1; for (posX = minX - 1; (posX <= maxX + 1) && (waterLine == 0); posX++) { int typeID = getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId(); if (typeID == 9) numWater++; if (typeID == 0) numAir++; } posX = minX - 1; for (posZ = minZ; (posZ <= maxZ) && (waterLine == 0); posZ++) { int typeID = getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId(); if (typeID == 9) numWater++; if (typeID == 0) numAir++; } posX = maxX + 1; for (posZ = minZ; (posZ <= maxZ) && (waterLine == 0); posZ++) { int typeID = getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId(); if (typeID == 9) numWater++; if (typeID == 0) numAir++; } if (numWater > numAir) { waterLine = posY; } } } // now add all the air blocks found within the craft's hitbox immediately above the waterline and below to the craft blocks so they will be translated HashSet<MovecraftLocation> newHSBlockList = new HashSet<MovecraftLocation>(Arrays.asList(blocksList)); int posY = waterLine + 1; for (int posX = minX; posX < maxX; posX++) { for (int posZ = minZ; posZ < maxZ; posZ++) { if (hb[posX - minX] != null) { if (hb[posX - minX][posZ - minZ] != null) { if (getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId() == 0 && posY > hb[posX - minX][posZ - minZ][0] && posY < hb[posX - minX][posZ - minZ][1]) { MovecraftLocation l = new MovecraftLocation(posX, posY, posZ); newHSBlockList.add(l); } } } } } // dont check the hitbox for the underwater portion. Otherwise open-hulled ships would flood. for (posY = waterLine; posY >= minY; posY--) { for (int posX = minX; posX < maxX; posX++) { for (int posZ = minZ; posZ < maxZ; posZ++) { if (getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId() == 0) { MovecraftLocation l = new MovecraftLocation(posX, posY, posZ); newHSBlockList.add(l); } } } } blocksList = newHSBlockList.toArray(new MovecraftLocation[newHSBlockList.size()]); } // check for fuel, burn some from a furnace if needed. Blocks of coal are supported, in addition to coal and charcoal double fuelBurnRate = getCraft().getType().getFuelBurnRate(); // going down doesn't require fuel if (data.getDy() == -1 && data.getDx() == 0 && data.getDz() == 0) fuelBurnRate = 0.0; if (fuelBurnRate != 0.0 && getCraft().getSinking() == false) { if (getCraft().getBurningFuel() < fuelBurnRate) { Block fuelHolder = null; for (MovecraftLocation bTest : blocksList) { Block b = getCraft().getW().getBlockAt(bTest.getX(), bTest.getY(), bTest.getZ()); if (b.getTypeId() == 61) { InventoryHolder inventoryHolder = (InventoryHolder) b.getState(); if (inventoryHolder.getInventory().contains(263) || inventoryHolder.getInventory().contains(173)) { fuelHolder = b; } } } if (fuelHolder == null) { fail(String.format( I18nSupport.getInternationalisedString("Translation - Failed Craft out of fuel"))); } else { InventoryHolder inventoryHolder = (InventoryHolder) fuelHolder.getState(); if (inventoryHolder.getInventory().contains(263)) { ItemStack iStack = inventoryHolder.getInventory() .getItem(inventoryHolder.getInventory().first(263)); int amount = iStack.getAmount(); if (amount == 1) { inventoryHolder.getInventory().remove(iStack); } else { iStack.setAmount(amount - 1); } getCraft().setBurningFuel(getCraft().getBurningFuel() + 7.0); } else { ItemStack iStack = inventoryHolder.getInventory() .getItem(inventoryHolder.getInventory().first(173)); int amount = iStack.getAmount(); if (amount == 1) { inventoryHolder.getInventory().remove(iStack); } else { iStack.setAmount(amount - 1); } getCraft().setBurningFuel(getCraft().getBurningFuel() + 79.0); } } } else { getCraft().setBurningFuel(getCraft().getBurningFuel() - fuelBurnRate); } } List<MovecraftLocation> tempBlockList = new ArrayList<MovecraftLocation>(); HashSet<MovecraftLocation> existingBlockSet = new HashSet<MovecraftLocation>(Arrays.asList(blocksList)); HashSet<EntityUpdateCommand> entityUpdateSet = new HashSet<EntityUpdateCommand>(); Set<MapUpdateCommand> updateSet = new HashSet<MapUpdateCommand>(); data.setCollisionExplosion(false); Set<MapUpdateCommand> explosionSet = new HashSet<MapUpdateCommand>(); List<Material> harvestBlocks = getCraft().getType().getHarvestBlocks(); List<MovecraftLocation> harvestedBlocks = new ArrayList<MovecraftLocation>(); List<MovecraftLocation> destroyedBlocks = new ArrayList<MovecraftLocation>(); List<Material> harvesterBladeBlocks = getCraft().getType().getHarvesterBladeBlocks(); int hoverOver = data.getDy(); int craftMinY = 0; int craftMaxY = 0; boolean clearNewData = false; boolean hoverUseGravity = getCraft().getType().getUseGravity(); boolean checkHover = (data.getDx() != 0 || data.getDz() != 0);// we want to check only horizontal moves boolean canHoverOverWater = getCraft().getType().getCanHoverOverWater(); boolean townyEnabled = Movecraft.getInstance().getTownyPlugin() != null; boolean explosionBlockedByTowny = false; boolean moveBlockedByTowny = false; boolean validateTownyExplosion = false; String townName = ""; Set<TownBlock> townBlockSet = new HashSet<TownBlock>(); TownyWorld townyWorld = null; TownyWorldHeightLimits townyWorldHeightLimits = null; if (townyEnabled && Settings.TownyBlockMoveOnSwitchPerm) { townyWorld = TownyUtils.getTownyWorld(getCraft().getW()); if (townyWorld != null) { townyEnabled = townyWorld.isUsingTowny(); if (townyEnabled) { townyWorldHeightLimits = TownyUtils.getWorldLimits(getCraft().getW()); if (getCraft().getType().getCollisionExplosion() != 0.0F) { validateTownyExplosion = true; } } } } else { townyEnabled = false; } for (int i = 0; i < blocksList.length; i++) { MovecraftLocation oldLoc = blocksList[i]; MovecraftLocation newLoc = oldLoc.translate(data.getDx(), data.getDy(), data.getDz()); if (newLoc.getY() > data.getMaxHeight() && newLoc.getY() > oldLoc.getY()) { fail(String.format( I18nSupport.getInternationalisedString("Translation - Failed Craft hit height limit"))); break; } else if (newLoc.getY() < data.getMinHeight() && newLoc.getY() < oldLoc.getY() && getCraft().getSinking() == false) { fail(String.format(I18nSupport .getInternationalisedString("Translation - Failed Craft hit minimum height limit"))); break; } boolean blockObstructed = false; boolean harvestBlock = false; boolean bladeOK = true; Material testMaterial; Location plugLoc = new Location(getCraft().getW(), newLoc.getX(), newLoc.getY(), newLoc.getZ()); if (craftPilot != null) { // See if they are permitted to build in the area, if WorldGuard integration is turned on if (Movecraft.getInstance().getWorldGuardPlugin() != null && Settings.WorldGuardBlockMoveOnBuildPerm) { if (Movecraft.getInstance().getWorldGuardPlugin().canBuild(craftPilot, plugLoc) == false) { fail(String.format(I18nSupport.getInternationalisedString( "Translation - Failed Player is not permitted to build in this WorldGuard region") + " @ %d,%d,%d", oldLoc.getX(), oldLoc.getY(), oldLoc.getZ())); break; } } } Player p; if (craftPilot == null) { p = getCraft().getNotificationPlayer(); } else { p = craftPilot; } if (p != null) { if (Movecraft.getInstance().getWorldGuardPlugin() != null && Movecraft.getInstance().getWGCustomFlagsPlugin() != null && Settings.WGCustomFlagsUsePilotFlag) { LocalPlayer lp = Movecraft.getInstance().getWorldGuardPlugin().wrapPlayer(p); WGCustomFlagsUtils WGCFU = new WGCustomFlagsUtils(); if (!WGCFU.validateFlag(plugLoc, Movecraft.FLAG_MOVE, lp)) { fail(String .format(I18nSupport.getInternationalisedString("WGCustomFlags - Translation Failed") + " @ %d,%d,%d", oldLoc.getX(), oldLoc.getY(), oldLoc.getZ())); break; } } if (townyEnabled) { TownBlock townBlock = TownyUtils.getTownBlock(plugLoc); if (townBlock != null && !townBlockSet.contains(townBlock)) { if (validateTownyExplosion) { if (!explosionBlockedByTowny) { if (!TownyUtils.validateExplosion(townBlock)) { explosionBlockedByTowny = true; } } } if (TownyUtils.validateCraftMoveEvent(p, plugLoc, townyWorld)) { townBlockSet.add(townBlock); } else { int y = plugLoc.getBlockY(); boolean oChange = false; if (craftMinY > y) { craftMinY = y; oChange = true; } if (craftMaxY < y) { craftMaxY = y; oChange = true; } if (oChange) { boolean failed = false; Town town = TownyUtils.getTown(townBlock); if (town != null) { Location locSpawn = TownyUtils.getTownSpawn(townBlock); if (locSpawn != null) { if (!townyWorldHeightLimits.validate(y, locSpawn.getBlockY())) { failed = true; } } else { failed = true; } if (failed) { if (Movecraft.getInstance().getWorldGuardPlugin() != null && Movecraft.getInstance().getWGCustomFlagsPlugin() != null && Settings.WGCustomFlagsUsePilotFlag) { LocalPlayer lp = Movecraft.getInstance().getWorldGuardPlugin() .wrapPlayer(p); ApplicableRegionSet regions = Movecraft.getInstance() .getWorldGuardPlugin().getRegionManager(plugLoc.getWorld()) .getApplicableRegions(plugLoc); if (regions.size() != 0) { WGCustomFlagsUtils WGCFU = new WGCustomFlagsUtils(); if (WGCFU.validateFlag(plugLoc, Movecraft.FLAG_MOVE, lp)) { failed = false; } } } } if (failed) { townName = town.getName(); moveBlockedByTowny = true; } } } } } } } //check for chests around testMaterial = getCraft().getW().getBlockAt(oldLoc.getX(), oldLoc.getY(), oldLoc.getZ()).getType(); if (testMaterial.equals(Material.CHEST) || testMaterial.equals(Material.TRAPPED_CHEST)) { if (!checkChests(testMaterial, newLoc, existingBlockSet)) { //prevent chests collision fail(String.format( I18nSupport.getInternationalisedString("Translation - Failed Craft is obstructed") + " @ %d,%d,%d,%s", newLoc.getX(), newLoc.getY(), newLoc.getZ(), getCraft().getW() .getBlockAt(newLoc.getX(), newLoc.getY(), newLoc.getZ()).getType().toString())); break; } } if (getCraft().getSinking()) { int testID = getCraft().getW().getBlockAt(newLoc.getX(), newLoc.getY(), newLoc.getZ()).getTypeId(); blockObstructed = !(Arrays.binarySearch(fallThroughBlocks, testID) >= 0) && !existingBlockSet.contains(newLoc); } else if (!waterCraft) { // New block is not air or a piston head and is not part of the existing ship testMaterial = getCraft().getW().getBlockAt(newLoc.getX(), newLoc.getY(), newLoc.getZ()).getType(); blockObstructed = (!testMaterial.equals(Material.AIR)) && !existingBlockSet.contains(newLoc); } else { // New block is not air or water or a piston head and is not part of the existing ship testMaterial = getCraft().getW().getBlockAt(newLoc.getX(), newLoc.getY(), newLoc.getZ()).getType(); blockObstructed = (!testMaterial.equals(Material.AIR) && !testMaterial.equals(Material.STATIONARY_WATER) && !testMaterial.equals(Material.WATER)) && !existingBlockSet.contains(newLoc); } boolean ignoreBlock = false; // air never obstructs anything (changed 4/18/2017 to prevent drilling machines) if (getCraft().getW().getBlockAt(oldLoc.getX(), oldLoc.getY(), oldLoc.getZ()).getType() .equals(Material.AIR) && blockObstructed) { ignoreBlock = true; // blockObstructed=false; } testMaterial = getCraft().getW().getBlockAt(newLoc.getX(), newLoc.getY(), newLoc.getZ()).getType(); if (blockObstructed) { if (hoverCraft || harvestBlocks.size() > 0) { // New block is not harvested block if (harvestBlocks.contains(testMaterial) && !existingBlockSet.contains(newLoc)) { Material tmpType = getCraft().getW().getBlockAt(oldLoc.getX(), oldLoc.getY(), oldLoc.getZ()) .getType(); if (harvesterBladeBlocks.size() > 0) { if (!harvesterBladeBlocks.contains(tmpType)) { bladeOK = false; } } if (bladeOK) { blockObstructed = false; harvestBlock = true; tryPutToDestroyBox(testMaterial, newLoc, harvestedBlocks, destroyedBlocks); harvestedBlocks.add(newLoc); } } } } if (blockObstructed || moveBlockedByTowny) { if (hoverCraft && checkHover) { //we check one up ever, if it is hovercraft and one down if it's using gravity if (hoverOver == 0 && newLoc.getY() + 1 <= data.getMaxHeight()) { //first was checked actual level, now check if we can go up hoverOver = 1; data.setDy(1); clearNewData = true; } else if (hoverOver >= 1) { //check other options to go up if (hoverOver < hoverLimit + 1 && newLoc.getY() + 1 <= data.getMaxHeight()) { data.setDy(hoverOver + 1); hoverOver += 1; clearNewData = true; } else { if (hoverUseGravity && newLoc.getY() - hoverOver - 1 >= data.getMinHeight()) { //we are on the maximum of top //if we can't go up so we test bottom side data.setDy(-1); hoverOver = -1; } else { // no way - back to original dY, turn off hovercraft for this move // and get original data again for all explosions data.setDy(0); hoverOver = 0; hoverCraft = false; hoverUseGravity = false; } clearNewData = true; } } else if (hoverOver <= -1) { //we cant go down for 1 block, check more to hoverLimit if (hoverOver > -hoverLimit - 1 && newLoc.getY() - 1 >= data.getMinHeight()) { data.setDy(hoverOver - 1); hoverOver -= 1; clearNewData = true; } else { // no way - back to original dY, turn off hovercraft for this move // and get original data again for all explosions data.setDy(0); hoverOver = 0; hoverUseGravity = false; clearNewData = true; hoverCraft = false; } } else { // no way - reached MaxHeight during looking new way upstairss if (hoverUseGravity && newLoc.getY() - 1 >= data.getMinHeight()) { //we are on the maximum of top //if we can't go up so we test bottom side data.setDy(-1); hoverOver = -1; } else { // - back to original dY, turn off hovercraft for this move // and get original data again for all explosions data.setDy(0); hoverOver = 0; hoverUseGravity = false; hoverCraft = false; } clearNewData = true; } // End hovercraft stuff } else { // handle sinking ship collisions if (getCraft().getSinking()) { if (getCraft().getType().getExplodeOnCrash() != 0.0F && !explosionBlockedByTowny) { int explosionKey = (int) (0 - (getCraft().getType().getExplodeOnCrash() * 100)); if (System.currentTimeMillis() - getCraft().getOrigPilotTime() > 1000) if (!getCraft().getW().getBlockAt(oldLoc.getX(), oldLoc.getY(), oldLoc.getZ()) .getType().equals(Material.AIR)) { explosionSet .add(new MapUpdateCommand(oldLoc, explosionKey, (byte) 0, getCraft())); data.setCollisionExplosion(true); } } else { // use the explosion code to clean up the craft, but not with enough force to do anything int explosionKey = 0 - 1; if (!getCraft().getW().getBlockAt(oldLoc.getX(), oldLoc.getY(), oldLoc.getZ()).getType() .equals(Material.AIR)) { explosionSet.add(new MapUpdateCommand(oldLoc, explosionKey, (byte) 0, getCraft())); data.setCollisionExplosion(true); } } } else { // Explode if the craft is set to have a CollisionExplosion. Also keep moving for spectacular ramming collisions if (getCraft().getType().getCollisionExplosion() == 0.0F) { if (moveBlockedByTowny) { fail(String.format( I18nSupport.getInternationalisedString("Towny - Translation Failed") + " %s @ %d,%d,%d", townName, oldLoc.getX(), oldLoc.getY(), oldLoc.getZ())); } else { fail(String.format( I18nSupport.getInternationalisedString( "Translation - Failed Craft is obstructed") + " @ %d,%d,%d,%s", oldLoc.getX(), oldLoc.getY(), oldLoc.getZ(), getCraft().getW().getBlockAt(newLoc.getX(), newLoc.getY(), newLoc.getZ()) .getType().toString())); if (getCraft().getNotificationPlayer() != null) { Location location = getCraft().getNotificationPlayer().getLocation(); } } break; } else if (explosionBlockedByTowny) { int explosionKey = 0 - 1; if (!getCraft().getW().getBlockAt(oldLoc.getX(), oldLoc.getY(), oldLoc.getZ()).getType() .equals(Material.AIR)) { explosionSet.add(new MapUpdateCommand(oldLoc, explosionKey, (byte) 0, getCraft())); data.setCollisionExplosion(true); } } else if (System.currentTimeMillis() - getCraft().getOrigPilotTime() > 1000) { int explosionKey; float explosionForce = getCraft().getType().getCollisionExplosion(); if (getCraft().getType().getFocusedExplosion() == true) { explosionForce = explosionForce * getCraft().getBlockList().length; } if (oldLoc.getY() < waterLine) { // underwater explosions require more force to do anything explosionForce += 25; } explosionKey = (int) (0 - (explosionForce * 100)); if (!getCraft().getW().getBlockAt(oldLoc.getX(), oldLoc.getY(), oldLoc.getZ()).getType() .equals(Material.AIR)) { explosionSet.add(new MapUpdateCommand(oldLoc, explosionKey, (byte) 0, getCraft())); data.setCollisionExplosion(true); } if (getCraft().getType().getFocusedExplosion() == true) { // don't handle any further collisions if it is set to focusedexplosion break; } } } } } else { //block not obstructed int oldID = getCraft().getW().getBlockTypeIdAt(oldLoc.getX(), oldLoc.getY(), oldLoc.getZ()); byte oldData = getCraft().getW().getBlockAt(oldLoc.getX(), oldLoc.getY(), oldLoc.getZ()).getData(); int currentID = getCraft().getW().getBlockTypeIdAt(newLoc.getX(), newLoc.getY(), newLoc.getZ()); byte currentData = getCraft().getW().getBlockAt(newLoc.getX(), newLoc.getY(), newLoc.getZ()) .getData(); // remove water from sinking crafts if (getCraft().getSinking()) { if ((oldID == 8 || oldID == 9) && oldLoc.getY() > waterLine) oldID = 0; } if (!ignoreBlock) { updateSet.add(new MapUpdateCommand(oldLoc, currentID, currentData, newLoc, oldID, oldData, getCraft())); tempBlockList.add(newLoc); } if (i == blocksList.length - 1) { if ((hoverCraft && hoverUseGravity) || (hoverUseGravity && newLoc.getY() > data.getMaxHeight() && hoverOver == 0)) { //hovecraft using gravity or something else using gravity and flying over its limit int iFreeSpace = 0; //canHoverOverWater adds 1 to dY for better check water under craft // best way should be expand selected region to each first blocks under craft if (hoverOver == 0) { //we go directly forward so we check if we can go down for (int ii = -1; ii > -hoverLimit - 2 - (canHoverOverWater ? 0 : 1); ii--) { if (!isFreeSpace(data.getDx(), hoverOver + ii, data.getDz(), blocksList, existingBlockSet, waterCraft, hoverCraft, harvestBlocks, canHoverOverWater, checkHover)) { break; } iFreeSpace++; } if (data.failed()) { break; } if (iFreeSpace > hoverLimit - (canHoverOverWater ? 0 : 1)) { data.setDy(-1); hoverOver = -1; clearNewData = true; } } else if (hoverOver == 1 && !airCraft) { //prevent fly heigher than hoverLimit for (int ii = -1; ii > -hoverLimit - 2; ii--) { if (!isFreeSpace(data.getDx(), hoverOver + ii, data.getDz(), blocksList, existingBlockSet, waterCraft, hoverCraft, harvestBlocks, canHoverOverWater, checkHover)) { break; } iFreeSpace++; } if (data.failed()) { break; } if (iFreeSpace > hoverLimit) { if (bladeOK) { fail(String.format(I18nSupport.getInternationalisedString( "Translation - Failed Craft hit height limit"))); } else { fail(String.format( I18nSupport.getInternationalisedString( "Translation - Failed Craft is obstructed") + " @ %d,%d,%d,%s", oldLoc.getX(), oldLoc.getY(), oldLoc.getZ(), getCraft().getW() .getBlockAt(newLoc.getX(), newLoc.getY(), newLoc.getZ()) .getType().toString())); } break; } } else if (hoverOver > 1) { //prevent jump thru block for (int ii = 1; ii < hoverOver - 1; ii++) { if (!isFreeSpace(0, ii, 0, blocksList, existingBlockSet, waterCraft, hoverCraft, harvestBlocks, canHoverOverWater, checkHover)) { break; } iFreeSpace++; } if (data.failed()) { break; } if (iFreeSpace + 2 < hoverOver) { data.setDy(-1); hoverOver = -1; clearNewData = true; } } else if (hoverOver < -1) { //prevent jump thru block for (int ii = -1; ii > hoverOver + 1; ii--) { if (!isFreeSpace(0, ii, 0, blocksList, existingBlockSet, waterCraft, hoverCraft, harvestBlocks, canHoverOverWater, checkHover)) { break; } iFreeSpace++; } if (data.failed()) { break; } if (iFreeSpace + 2 < -hoverOver) { data.setDy(0); hoverOver = 0; hoverCraft = false; clearNewData = true; } } if (!canHoverOverWater) { if (hoverOver >= 1) { //others hoverOver values we have checked jet for (int ii = hoverOver - 1; ii > hoverOver - hoverLimit - 2; ii--) { if (!isFreeSpace(0, ii, 0, blocksList, existingBlockSet, waterCraft, hoverCraft, harvestBlocks, canHoverOverWater, checkHover)) { break; } iFreeSpace++; } if (data.failed()) { break; } } } } } } //END OF: if (blockObstructed) if (clearNewData) { i = -1; tempBlockList.clear(); updateSet.clear(); harvestedBlocks.clear(); data.setCollisionExplosion(false); explosionSet.clear(); clearNewData = false; townBlockSet.clear(); craftMinY = 0; craftMaxY = 0; } } //END OF: for ( int i = 0; i < blocksList.length; i++ ) { // now move the scheduled block changes along with the ship HashMap<MapUpdateCommand, Long> newScheduledBlockChanges = new HashMap<MapUpdateCommand, Long>(); HashMap<MapUpdateCommand, Long> oldScheduledBlockChanges = getCraft().getScheduledBlockChanges(); if (oldScheduledBlockChanges != null) { for (MapUpdateCommand muc : oldScheduledBlockChanges.keySet()) { MovecraftLocation newLoc = muc.getNewBlockLocation().translate(data.getDx(), data.getDy(), data.getDz()); // Long newTime=oldScheduledBlockChanges.get(muc); Long newTime = System.currentTimeMillis() + 5000; MapUpdateCommand newMuc = new MapUpdateCommand(newLoc, muc.getTypeID(), muc.getDataID(), getCraft()); newScheduledBlockChanges.put(newMuc, newTime); } data.setScheduledBlockChanges(newScheduledBlockChanges); } if (data.collisionExplosion()) { // mark the craft to check for sinking, remove the exploding blocks from the blocklist, and submit the explosions for map update for (MapUpdateCommand m : explosionSet) { if (existingBlockSet.contains(m.getNewBlockLocation())) { existingBlockSet.remove(m.getNewBlockLocation()); if (Settings.FadeWrecksAfter > 0) { int typeID = getCraft().getW().getBlockAt(m.getNewBlockLocation().getX(), m.getNewBlockLocation().getY(), m.getNewBlockLocation().getZ()).getTypeId(); if (typeID != 0 && typeID != 9) { Movecraft.getInstance().blockFadeTimeMap.put(m.getNewBlockLocation(), System.currentTimeMillis()); Movecraft.getInstance().blockFadeTypeMap.put(m.getNewBlockLocation(), typeID); if (m.getNewBlockLocation().getY() <= waterLine) { Movecraft.getInstance().blockFadeWaterMap.put(m.getNewBlockLocation(), true); } else { Movecraft.getInstance().blockFadeWaterMap.put(m.getNewBlockLocation(), false); } Movecraft.getInstance().blockFadeWorldMap.put(m.getNewBlockLocation(), getCraft().getW()); } } } // if the craft is sinking, remove all solid blocks above the one that hit the ground from the craft for smoothing sinking if (getCraft().getSinking() == true && (getCraft().getType().getExplodeOnCrash() == 0.0 || explosionBlockedByTowny)) { int posy = m.getNewBlockLocation().getY() + 1; int testID = getCraft().getW() .getBlockAt(m.getNewBlockLocation().getX(), posy, m.getNewBlockLocation().getZ()) .getTypeId(); while (posy <= maxY && !(Arrays.binarySearch(fallThroughBlocks, testID) >= 0)) { MovecraftLocation testLoc = new MovecraftLocation(m.getNewBlockLocation().getX(), posy, m.getNewBlockLocation().getZ()); if (existingBlockSet.contains(testLoc)) { existingBlockSet.remove(testLoc); if (Settings.FadeWrecksAfter > 0) { int typeID = getCraft().getW() .getBlockAt(testLoc.getX(), testLoc.getY(), testLoc.getZ()).getTypeId(); if (typeID != 0 && typeID != 9) { Movecraft.getInstance().blockFadeTimeMap.put(testLoc, System.currentTimeMillis()); Movecraft.getInstance().blockFadeTypeMap.put(testLoc, typeID); if (testLoc.getY() <= waterLine) { Movecraft.getInstance().blockFadeWaterMap.put(testLoc, true); } else { Movecraft.getInstance().blockFadeWaterMap.put(testLoc, false); } Movecraft.getInstance().blockFadeWorldMap.put(testLoc, getCraft().getW()); } } } posy = posy + 1; testID = getCraft().getW() .getBlockAt(m.getNewBlockLocation().getX(), posy, m.getNewBlockLocation().getZ()) .getTypeId(); } } } MovecraftLocation[] newBlockList = (MovecraftLocation[]) existingBlockSet .toArray(new MovecraftLocation[0]); data.setBlockList(newBlockList); data.setUpdates(explosionSet.toArray(new MapUpdateCommand[1])); fail(String.format(I18nSupport.getInternationalisedString("Translation - Failed Craft is obstructed"))); if (getCraft().getSinking() == false) { // FROG changed from ==true, think that was a typo if (getCraft().getType().getSinkPercent() != 0.0) { getCraft().setLastBlockCheck(0); } getCraft().setLastCruisUpdate(System.currentTimeMillis() - 30000); } } if (!data.failed()) { MovecraftLocation[] newBlockList = (MovecraftLocation[]) tempBlockList .toArray(new MovecraftLocation[0]); data.setBlockList(newBlockList); //prevents torpedo and rocket pilots :) if (getCraft().getType().getMoveEntities() && getCraft().getSinking() == false) { // Move entities within the craft List<Entity> eList = null; int numTries = 0; while ((eList == null) && (numTries < 100)) { try { eList = getCraft().getW().getEntities(); } catch (java.util.ConcurrentModificationException e) { numTries++; } } Iterator<Entity> i = eList.iterator(); while (i.hasNext()) { Entity pTest = i.next(); // if ( MathUtils.playerIsWithinBoundingPolygon( getCraft().getHitBox(), getCraft().getMinX(), getCraft().getMinZ(), MathUtils.bukkit2MovecraftLoc( pTest.getLocation() ) ) ) { if (MathUtils.locIsNearCraftFast(getCraft(), MathUtils.bukkit2MovecraftLoc(pTest.getLocation()))) { if (pTest.getType() == org.bukkit.entity.EntityType.PLAYER) { Player player = (Player) pTest; getCraft().getMovedPlayers().put(player, System.currentTimeMillis()); Location tempLoc = pTest.getLocation(); // Direct control no longer locks the player in place // if(getCraft().getPilotLocked()==true && pTest==CraftManager.getInstance().getPlayerFromCraft(getCraft())) { // tempLoc.setX(getCraft().getPilotLockedX()); // tempLoc.setY(getCraft().getPilotLockedY()); // tempLoc.setZ(getCraft().getPilotLockedZ()); // } tempLoc = tempLoc.add(data.getDx(), data.getDy(), data.getDz()); Location newPLoc = new Location(getCraft().getW(), tempLoc.getX(), tempLoc.getY(), tempLoc.getZ()); newPLoc.setPitch(pTest.getLocation().getPitch()); newPLoc.setYaw(pTest.getLocation().getYaw()); EntityUpdateCommand eUp = new EntityUpdateCommand(pTest.getLocation().clone(), newPLoc, pTest); entityUpdateSet.add(eUp); // if(getCraft().getPilotLocked()==true && pTest==CraftManager.getInstance().getPlayerFromCraft(getCraft())) { // getCraft().setPilotLockedX(tempLoc.getX()); // getCraft().setPilotLockedY(tempLoc.getY()); // getCraft().setPilotLockedZ(tempLoc.getZ()); // } } if (pTest.getType() == org.bukkit.entity.EntityType.PRIMED_TNT) { Entity ent = (Entity) pTest; Location tempLoc = pTest.getLocation(); tempLoc = tempLoc.add(data.getDx(), data.getDy(), data.getDz()); EntityUpdateCommand eUp = new EntityUpdateCommand(pTest.getLocation().clone(), tempLoc, pTest); entityUpdateSet.add(eUp); } } } } else { //add releaseTask without playermove to manager if (getCraft().getType().getCruiseOnPilot() == false && getCraft().getSinking() == false) // not necessary to release cruiseonpilot crafts, because they will already be released CraftManager.getInstance().addReleaseTask(getCraft()); } // remove water near sinking crafts if (getCraft().getSinking()) { int posX; int posY = maxY; int posZ; if (posY > waterLine) { for (posX = minX - 1; posX <= maxX + 1; posX++) { for (posZ = minZ - 1; posZ <= maxZ + 1; posZ++) { if (getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId() == 9 || getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId() == 8) { MovecraftLocation loc = new MovecraftLocation(posX, posY, posZ); updateSet.add(new MapUpdateCommand(loc, 0, (byte) 0, getCraft())); } } } } for (posY = maxY + 1; (posY >= minY - 1) && (posY > waterLine); posY--) { posZ = minZ - 1; for (posX = minX - 1; posX <= maxX + 1; posX++) { if (getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId() == 9 || getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId() == 8) { MovecraftLocation loc = new MovecraftLocation(posX, posY, posZ); updateSet.add(new MapUpdateCommand(loc, 0, (byte) 0, getCraft())); } } posZ = maxZ + 1; for (posX = minX - 1; posX <= maxX + 1; posX++) { if (getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId() == 9 || getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId() == 8) { MovecraftLocation loc = new MovecraftLocation(posX, posY, posZ); updateSet.add(new MapUpdateCommand(loc, 0, (byte) 0, getCraft())); } } posX = minX - 1; for (posZ = minZ - 1; posZ <= maxZ + 1; posZ++) { if (getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId() == 9 || getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId() == 8) { MovecraftLocation loc = new MovecraftLocation(posX, posY, posZ); updateSet.add(new MapUpdateCommand(loc, 0, (byte) 0, getCraft())); } } posX = maxX + 1; for (posZ = minZ - 1; posZ <= maxZ + 1; posZ++) { if (getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId() == 9 || getCraft().getW().getBlockAt(posX, posY, posZ).getTypeId() == 8) { MovecraftLocation loc = new MovecraftLocation(posX, posY, posZ); updateSet.add(new MapUpdateCommand(loc, 0, (byte) 0, getCraft())); } } } } //Set blocks that are no longer craft to air // /********************************************************************************************************** // * I had problems with ListUtils (I tryied commons-collections 3.2.1. and 4.0 without success) // * so I replaced Lists with Sets // * // * Caused by: java.lang.NoClassDefFoundError: org/apache/commons/collections/ListUtils // * at net.countercraft.movecraft.async.translation.TranslationTask.excecute(TranslationTask.java:716) // * mwkaicz 24-02-2015 // ***********************************************************************************************************/ // Set<MovecraftLocation> setA = new HashSet(Arrays.asList(blocksList)); // Set<MovecraftLocation> setB = new HashSet(Arrays.asList(newBlockList)); // setA.removeAll(setB); // MovecraftLocation[] arrA = new MovecraftLocation[0]; // arrA = setA.toArray(arrA); // List<MovecraftLocation> airLocation = Arrays.asList(arrA); List<MovecraftLocation> airLocation = ListUtils.subtract(Arrays.asList(blocksList), Arrays.asList(newBlockList)); for (MovecraftLocation l1 : airLocation) { // for watercraft, fill blocks below the waterline with water if (!waterCraft) { if (getCraft().getSinking()) { updateSet.add(new MapUpdateCommand(l1, 0, (byte) 0, getCraft(), getCraft().getType().getSmokeOnSink())); } else { updateSet.add(new MapUpdateCommand(l1, 0, (byte) 0, getCraft())); } } else { if (l1.getY() <= waterLine) { // if there is air below the ship at the current position, don't fill in with water MovecraftLocation testAir = new MovecraftLocation(l1.getX(), l1.getY() - 1, l1.getZ()); while (existingBlockSet.contains(testAir)) { testAir.setY(testAir.getY() - 1); } if (getCraft().getW().getBlockAt(testAir.getX(), testAir.getY(), testAir.getZ()) .getTypeId() == 0) { if (getCraft().getSinking()) { updateSet.add(new MapUpdateCommand(l1, 0, (byte) 0, getCraft(), getCraft().getType().getSmokeOnSink())); } else { updateSet.add(new MapUpdateCommand(l1, 0, (byte) 0, getCraft())); } } else { updateSet.add(new MapUpdateCommand(l1, 9, (byte) 0, getCraft())); } } else { if (getCraft().getSinking()) { updateSet.add(new MapUpdateCommand(l1, 0, (byte) 0, getCraft(), getCraft().getType().getSmokeOnSink())); } else { updateSet.add(new MapUpdateCommand(l1, 0, (byte) 0, getCraft())); } } } } //add destroyed parts of growed for (MovecraftLocation destroyedLocation : destroyedBlocks) { updateSet.add(new MapUpdateCommand(destroyedLocation, 0, (byte) 0, getCraft())); } MapUpdateCommand[] updateArray = updateSet.toArray(new MapUpdateCommand[1]); // MapUpdateManager.getInstance().sortUpdates(updateArray); data.setUpdates(updateArray); data.setEntityUpdates(entityUpdateSet.toArray(new EntityUpdateCommand[1])); if (data.getDy() != 0) { data.setHitbox(BoundingBoxUtils.translateBoundingBoxVertically(data.getHitbox(), data.getDy())); } data.setMinX(data.getMinX() + data.getDx()); data.setMinZ(data.getMinZ() + data.getDz()); } captureYield(blocksList, harvestedBlocks); }