List of usage examples for java.util Set clear
void clear();
From source file:it.baywaylabs.jumpersumo.utility.Finder.java
/** * Extract a priority queue with all commands found on message string. * * @param textMsg String of message.//from ww w . j av a 2s .co m * @return Priority Queue with all commands found. */ public PriorityQueue processingMessage(String textMsg) { String msg = textMsg.toLowerCase(); Log.d(TAG, "Stringa da processare: " + msg); msg = msg.replaceAll("\\s+", "\\$"); Log.d(TAG, "Stringa da processare: " + msg); msg = msg.replaceAll("\\p{Punct}", ""); if (!"$".equals(msg.substring(msg.length()))) msg += "$"; // Make Suffix Tree from 'msg' string SuffixTree tree; tree = new MySuffixTree(msg); List<Command> commandsList = new ArrayList<Command>(); Intelligence intelligence = new Intelligence(); int[] position = null; Set<Integer> check = new HashSet<Integer>(); List<Integer> a = new ArrayList<Integer>(); List<Integer> b = new ArrayList<Integer>(); Log.d(TAG, "Stringa da processare: " + msg); // FORWARD case: for (String com : intelligence.getMoveOn()) { // Controllo i comandi formati da pi parole if (com.contains(" ")) { HashMap<String, int[]> map = new HashMap<String, int[]>(); String[] singleCommand = com.split(" "); for (int i = 0; i < singleCommand.length; i++) { int[] trovato = tree.findAll(singleCommand[i]); if (trovato.length > 0) { map.put(singleCommand[i], trovato); String arrayTrovat = ""; for (int k = 0; k < trovato.length; k++) arrayTrovat += trovato[k] + ", "; Log.d(TAG, singleCommand[i] + " -- " + arrayTrovat); } } Integer[] consec = checkConsecutive(singleCommand, map); if (consec != null && consec.length > 0) { for (Integer i : consec) { check.add(i); a.add(i); b.add(com.length()); } } } else { int[] temp = tree.findAll(com); boolean daAggiungere = true; for (Integer i : tree.findAll(com)) { for (int j = 0; j < a.size(); j++) { Log.e(TAG, "Controllo i: " + i + ", con a: " + a.get(j) + " e b: " + b.get(j)); if (i >= a.get(j) && i + com.length() <= a.get(j) + b.get(j)) { daAggiungere = false; break; } } if (daAggiungere) { check.add(i); a.add(i); b.add(com.length()); } } } for (int p = 0; p < a.size(); p++) { Log.e(TAG, "A: " + a.get(p) + ", B: " + b.get(p)); } } position = toPrimitive(check); check.clear(); a.clear(); b.clear(); if (position.length > 0) { for (int i = 0; i < position.length; i++) { Command c = new Command(position[i], "FORWARD"); commandsList.add(c); } } // BACK case: for (String com : intelligence.getMoveBack()) { // Controllo i comandi formati da pi parole if (com.contains(" ")) { HashMap<String, int[]> map = new HashMap<String, int[]>(); String[] singleCommand = com.split(" "); for (int i = 0; i < singleCommand.length; i++) { int[] trovato = tree.findAll(singleCommand[i]); if (trovato.length > 0) { map.put(singleCommand[i], trovato); String arrayTrovat = ""; for (int k = 0; k < trovato.length; k++) arrayTrovat += trovato[k] + ", "; Log.d(TAG, singleCommand[i] + " -- " + arrayTrovat); } } Integer[] consec = checkConsecutive(singleCommand, map); if (consec != null && consec.length > 0) { for (Integer i : consec) { check.add(i); a.add(i); b.add(com.length()); } } } else { int[] temp = tree.findAll(com); boolean daAggiungere = true; for (Integer i : tree.findAll(com)) { for (int j = 0; j < a.size(); j++) { Log.e(TAG, "Controllo i: " + i + ", con a: " + a.get(j) + " e b: " + b.get(j)); if (i >= a.get(j) && i + com.length() <= a.get(j) + b.get(j)) { daAggiungere = false; break; } } if (daAggiungere) { check.add(i); a.add(i); b.add(com.length()); } } } for (int p = 0; p < a.size(); p++) { Log.e(TAG, "A: " + a.get(p) + ", B: " + b.get(p)); } } position = toPrimitive(check); check.clear(); a.clear(); b.clear(); if (position.length > 0) { for (int i = 0; i < position.length; i++) { Command c = new Command(position[i], "BACK"); commandsList.add(c); } } // LEFT case: for (String com : intelligence.getTurnLeft()) { // Controllo i comandi formati da pi parole if (com.contains(" ")) { HashMap<String, int[]> map = new HashMap<String, int[]>(); String[] singleCommand = com.split(" "); for (int i = 0; i < singleCommand.length; i++) { int[] trovato = tree.findAll(singleCommand[i]); if (trovato.length > 0) { map.put(singleCommand[i], trovato); String arrayTrovat = ""; for (int k = 0; k < trovato.length; k++) arrayTrovat += trovato[k] + ", "; Log.d(TAG, singleCommand[i] + " -- " + arrayTrovat); } } Integer[] consec = checkConsecutive(singleCommand, map); if (consec != null && consec.length > 0) { for (Integer i : consec) { check.add(i); a.add(i); b.add(com.length()); } } } else { int[] temp = tree.findAll(com); boolean daAggiungere = true; for (Integer i : tree.findAll(com)) { for (int j = 0; j < a.size(); j++) { Log.e(TAG, "Controllo i: " + i + ", con a: " + a.get(j) + " e b: " + b.get(j)); if (i >= a.get(j) && i + com.length() <= a.get(j) + b.get(j)) { daAggiungere = false; break; } } if (daAggiungere) { check.add(i); a.add(i); b.add(com.length()); } } } for (int p = 0; p < a.size(); p++) { Log.e(TAG, "A: " + a.get(p) + ", B: " + b.get(p)); } } position = toPrimitive(check); check.clear(); a.clear(); b.clear(); if (position.length > 0) { for (int i = 0; i < position.length; i++) { Command c = new Command(position[i], "LEFT"); commandsList.add(c); } } // RIGHT case: for (String com : intelligence.getTurnRight()) { // Controllo i comandi formati da pi parole if (com.contains(" ")) { HashMap<String, int[]> map = new HashMap<String, int[]>(); String[] singleCommand = com.split(" "); for (int i = 0; i < singleCommand.length; i++) { int[] trovato = tree.findAll(singleCommand[i]); if (trovato.length > 0) { map.put(singleCommand[i], trovato); String arrayTrovat = ""; for (int k = 0; k < trovato.length; k++) arrayTrovat += trovato[k] + ", "; Log.d(TAG, singleCommand[i] + " -- " + arrayTrovat); } } Integer[] consec = checkConsecutive(singleCommand, map); if (consec != null && consec.length > 0) { for (Integer i : consec) { check.add(i); a.add(i); b.add(com.length()); } } } else { int[] temp = tree.findAll(com); boolean daAggiungere = true; for (Integer i : tree.findAll(com)) { for (int j = 0; j < a.size(); j++) { Log.e(TAG, "Controllo i: " + i + ", con a: " + a.get(j) + " e b: " + b.get(j)); if (i >= a.get(j) && i + com.length() <= a.get(j) + b.get(j)) { daAggiungere = false; break; } } if (daAggiungere) { check.add(i); a.add(i); b.add(com.length()); } } } for (int p = 0; p < a.size(); p++) { Log.e(TAG, "A: " + a.get(p) + ", B: " + b.get(p)); } } position = toPrimitive(check); check.clear(); a.clear(); b.clear(); if (position.length > 0) { for (int i = 0; i < position.length; i++) { Command c = new Command(position[i], "RIGHT"); commandsList.add(c); } } // PHOTO case: for (String com : intelligence.getTakePhoto()) { // Controllo i comandi formati da pi parole if (com.contains(" ")) { HashMap<String, int[]> map = new HashMap<String, int[]>(); String[] singleCommand = com.split(" "); for (int i = 0; i < singleCommand.length; i++) { int[] trovato = tree.findAll(singleCommand[i]); if (trovato.length > 0) { map.put(singleCommand[i], trovato); String arrayTrovat = ""; for (int k = 0; k < trovato.length; k++) arrayTrovat += trovato[k] + ", "; Log.d(TAG, singleCommand[i] + " -- " + arrayTrovat); } } Integer[] consec = checkConsecutive(singleCommand, map); if (consec != null && consec.length > 0) { for (Integer i : consec) { check.add(i); a.add(i); b.add(com.length()); } } } else { int[] temp = tree.findAll(com); boolean daAggiungere = true; for (Integer i : tree.findAll(com)) { for (int j = 0; j < a.size(); j++) { Log.e(TAG, "Controllo i: " + i + ", con a: " + a.get(j) + " e b: " + b.get(j)); if (i >= a.get(j) && i + com.length() <= a.get(j) + b.get(j)) { daAggiungere = false; break; } } if (daAggiungere) { check.add(i); a.add(i); b.add(com.length()); } } } for (int p = 0; p < a.size(); p++) { Log.e(TAG, "A: " + a.get(p) + ", B: " + b.get(p)); } } position = toPrimitive(check); check.clear(); a.clear(); b.clear(); if (position.length > 0) { for (int i = 0; i < position.length; i++) { Command c = new Command(position[i], "PHOTO"); commandsList.add(c); } } // EXECUTE case: for (String com : intelligence.getExecuteCsv()) { // Controllo i comandi formati da pi parole if (com.contains(" ")) { HashMap<String, int[]> map = new HashMap<String, int[]>(); String[] singleCommand = com.split(" "); for (int i = 0; i < singleCommand.length; i++) { int[] trovato = tree.findAll(singleCommand[i]); if (trovato.length > 0) { map.put(singleCommand[i], trovato); String arrayTrovat = ""; for (int k = 0; k < trovato.length; k++) arrayTrovat += trovato[k] + ", "; Log.d(TAG, singleCommand[i] + " -- " + arrayTrovat); } } Integer[] consec = checkConsecutive(singleCommand, map); if (consec != null && consec.length > 0) { for (Integer i : consec) { check.add(i); a.add(i); b.add(com.length()); } } } else { int[] temp = tree.findAll(com); boolean daAggiungere = true; for (Integer i : tree.findAll(com)) { for (int j = 0; j < a.size(); j++) { Log.e(TAG, "Controllo i: " + i + ", con a: " + a.get(j) + " e b: " + b.get(j)); if (i >= a.get(j) && i + com.length() <= a.get(j) + b.get(j)) { daAggiungere = false; break; } } if (daAggiungere) { check.add(i); a.add(i); b.add(com.length()); } } } for (int p = 0; p < a.size(); p++) { Log.e(TAG, "A: " + a.get(p) + ", B: " + b.get(p)); } } position = toPrimitive(check); check.clear(); a.clear(); b.clear(); if (position.length > 0) { for (int i = 0; i < position.length; i++) { Command c = new Command(position[i], "EXECUTE"); commandsList.add(c); } } PriorityQueue pq = new PriorityQueue(commandsList); Log.d(TAG, "ListaCommands: " + commandsList.size()); Log.d(TAG, String.valueOf(pq.size())); Log.d(TAG, "Coda Vuota: " + pq.isEmpty()); return pq; }
From source file:org.apache.ambari.server.controller.AmbariManagementControllerImplTest.java
@Test public void testMaintenanceAndDeleteStates() throws Exception { Map<String, String> mapRequestProps = new HashMap<String, String>(); Injector injector = Guice.createInjector(new AbstractModule() { @Override// w ww . java 2s . co m protected void configure() { Properties properties = new Properties(); properties.setProperty(Configuration.SERVER_PERSISTENCE_TYPE_KEY, "in-memory"); properties.setProperty(Configuration.METADETA_DIR_PATH, "src/main/resources/stacks"); properties.setProperty(Configuration.SERVER_VERSION_FILE, "target/version"); properties.setProperty(Configuration.OS_VERSION_KEY, "centos5"); try { install(new ControllerModule(properties)); } catch (Exception e) { throw new RuntimeException(e); } } }); injector.getInstance(GuiceJpaInitializer.class); try { AmbariManagementController amc = injector.getInstance(AmbariManagementController.class); Clusters clusters = injector.getInstance(Clusters.class); Gson gson = new Gson(); clusters.addHost("host1"); clusters.addHost("host2"); clusters.addHost("host3"); Host host = clusters.getHost("host1"); host.setOsType("centos5"); host.persist(); host = clusters.getHost("host2"); host.setOsType("centos5"); host.persist(); host = clusters.getHost("host3"); host.setOsType("centos5"); host.persist(); ClusterRequest clusterRequest = new ClusterRequest(null, "c1", "HDP-1.2.0", null); amc.createCluster(clusterRequest); Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>(); serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null)); amc.createServices(serviceRequests); Type confType = new TypeToken<Map<String, String>>() { }.getType(); ConfigurationRequest configurationRequest = new ConfigurationRequest("c1", "core-site", "version1", gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)); amc.createConfiguration(configurationRequest); configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version1", gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)); amc.createConfiguration(configurationRequest); configurationRequest = new ConfigurationRequest("c1", "global", "version1", gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)); amc.createConfiguration(configurationRequest); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", gson.<Map<String, String>>fromJson( "{\"core-site\": \"version1\", \"hdfs-site\": \"version1\", \"global\" : \"version1\" }", confType), null)); amc.updateServices(serviceRequests, mapRequestProps, true, false); Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>(); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null)); serviceComponentRequests .add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null, null)); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null, null)); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null)); amc.createComponents(serviceComponentRequests); Set<HostRequest> hostRequests = new HashSet<HostRequest>(); hostRequests.add(new HostRequest("host1", "c1", null)); hostRequests.add(new HostRequest("host2", "c1", null)); hostRequests.add(new HostRequest("host3", "c1", null)); amc.createHosts(hostRequests); Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null, null)); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null)); componentHostRequests .add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null, null)); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null)); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null, null)); amc.createHostComponents(componentHostRequests); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED")); amc.updateServices(serviceRequests, mapRequestProps, true, false); Cluster cluster = clusters.getCluster("c1"); Map<String, ServiceComponentHost> namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE") .getServiceComponentHosts(); assertEquals(1, namenodes.size()); ServiceComponentHost componentHost = namenodes.get("host1"); Map<String, ServiceComponentHost> hostComponents = cluster.getService("HDFS") .getServiceComponent("DATANODE").getServiceComponentHosts(); for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) { ServiceComponentHost cHost = entry.getValue(); cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis())); } hostComponents = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts(); for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) { ServiceComponentHost cHost = entry.getValue(); cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis())); } hostComponents = cluster.getService("HDFS").getServiceComponent("SECONDARY_NAMENODE") .getServiceComponentHosts(); for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) { ServiceComponentHost cHost = entry.getValue(); cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis())); } componentHostRequests.clear(); componentHostRequests .add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "MAINTENANCE")); amc.updateHostComponents(componentHostRequests, mapRequestProps, true); assertEquals(State.MAINTENANCE, componentHost.getState()); componentHostRequests.clear(); componentHostRequests .add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "INSTALLED")); amc.updateHostComponents(componentHostRequests, mapRequestProps, true); assertEquals(State.INSTALLED, componentHost.getState()); componentHostRequests.clear(); componentHostRequests .add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, "MAINTENANCE")); amc.updateHostComponents(componentHostRequests, mapRequestProps, true); assertEquals(State.MAINTENANCE, componentHost.getState()); componentHostRequests.clear(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null, null)); amc.createHostComponents(componentHostRequests); componentHostRequests.clear(); componentHostRequests .add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null, "INSTALLED")); amc.updateHostComponents(componentHostRequests, mapRequestProps, true); namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts(); assertEquals(2, namenodes.size()); componentHost = namenodes.get("host2"); componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0")); componentHost .handleEvent(new ServiceComponentHostOpSucceededEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis())); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "STARTED")); RequestStatusResponse response = amc.updateServices(serviceRequests, mapRequestProps, true, false); for (ShortTaskStatus shortTaskStatus : response.getTasks()) { assertFalse("host1".equals(shortTaskStatus.getHostName()) && "NAMENODE".equals(shortTaskStatus.getRole())); } componentHostRequests.clear(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null)); amc.deleteHostComponents(componentHostRequests); namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts(); assertEquals(1, namenodes.size()); // testing the behavior for runSmokeTest flag // piggybacking on this test to avoid setting up the mock cluster testRunSmokeTestFlag(mapRequestProps, amc, serviceRequests); // should be able to add the host component back componentHostRequests.clear(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null)); amc.createHostComponents(componentHostRequests); namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts(); assertEquals(2, namenodes.size()); // make unknown ServiceComponentHost sch = null; for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) { if (tmp.getServiceComponentName().equals("DATANODE")) { tmp.setState(State.UNKNOWN); sch = tmp; } } assertNotNull(sch); // make maintenance componentHostRequests.clear(); componentHostRequests .add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, "MAINTENANCE")); amc.updateHostComponents(componentHostRequests, mapRequestProps, false); assertEquals(State.MAINTENANCE, sch.getState()); // confirm delete componentHostRequests.clear(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null)); amc.deleteHostComponents(componentHostRequests); sch = null; for (ServiceComponentHost tmp : cluster.getServiceComponentHosts("host2")) { if (tmp.getServiceComponentName().equals("DATANODE")) { sch = tmp; } } assertNull(sch); /* *Test remove service */ serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", null, "INSTALLED")); amc.updateServices(serviceRequests, mapRequestProps, true, false); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", null, null, null)); assertEquals(1, amc.getServices(serviceRequests).size()); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null)); amc.deleteServices(serviceRequests); serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", null, null, null)); assertEquals(0, amc.getServices(serviceRequests).size()); /* *Test add service again */ serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", null, null)); amc.createServices(serviceRequests); assertEquals(1, amc.getServices(serviceRequests).size()); //Create new configs configurationRequest = new ConfigurationRequest("c1", "core-site", "version2", gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)); amc.createConfiguration(configurationRequest); configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version2", gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)); amc.createConfiguration(configurationRequest); configurationRequest = new ConfigurationRequest("c1", "global", "version2", gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)); amc.createConfiguration(configurationRequest); //Add configs to service serviceRequests.clear(); serviceRequests.add(new ServiceRequest("c1", "HDFS", gson.<Map<String, String>>fromJson( "{\"core-site\": \"version2\", \"hdfs-site\": \"version2\", \"global\" : \"version2\" }", confType), null)); amc.updateServices(serviceRequests, mapRequestProps, true, false); //Crate service components serviceComponentRequests = new HashSet<ServiceComponentRequest>(); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null)); serviceComponentRequests .add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null, null)); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null, null)); serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null)); amc.createComponents(serviceComponentRequests); //Create ServiceComponentHosts componentHostRequests = new HashSet<ServiceComponentHostRequest>(); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null, null)); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null, null)); componentHostRequests .add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null, null)); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null, null)); componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null, null)); amc.createHostComponents(componentHostRequests); namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts(); assertEquals(1, namenodes.size()); Map<String, ServiceComponentHost> datanodes = cluster.getService("HDFS").getServiceComponent("DATANODE") .getServiceComponentHosts(); assertEquals(3, datanodes.size()); Map<String, ServiceComponentHost> namenodes2 = cluster.getService("HDFS") .getServiceComponent("SECONDARY_NAMENODE").getServiceComponentHosts(); assertEquals(1, namenodes2.size()); } finally { injector.getInstance(PersistService.class).stop(); } }
From source file:com.ebay.cloud.cms.query.service.QueryPaginationByIdTest.java
private void iteratorWithSort(final List<String> sortOnFields, long TOTAL_COUNT, String query, QueryContext queryContext, Set<String> allIds, int step) throws Exception { IQueryResult result;/*from ww w. ja va2 s. c o m*/ int fetchCount = 0; int queryIterationCount = 0; Set<String> fetchedIds = new HashSet<String>(); queryContext.getCursor().setLimits(null); queryContext.getCursor().setSingleCursorValue(null); queryContext.getCursor().removeSortOn(); queryContext.getCursor().setLimits(new int[] { step }); for (String sortOnField : sortOnFields) { queryContext.getCursor().addSortOn(sortOnField); } result = queryService.query(query, queryContext); queryIterationCount++; List<String> repeatedIds = new ArrayList<String>(); do { for (IEntity e : result.getEntities()) { if (fetchedIds.contains(e.getId())) { repeatedIds.add(e.getId()); } else { fetchedIds.add(e.getId()); } fetchCount++; } if (result.hasMoreResults()) { Assert.assertNotNull(result.getNextCursor()); Assert.assertNotNull(result.getNextCursor().getSortOn()); Assert.assertFalse(result.getNextCursor().isJoinCursor()); Assert.assertNotNull(result.getNextCursor().getSingleCursorValue()); for (String sortField : sortOnFields) { Assert.assertTrue(result.getNextCursor().getSortOn().contains(sortField)); } result.getNextCursor().setLimits(new int[] { step }); queryContext.setCursor(result.getNextCursor()); result = queryService.query(query, queryContext); queryIterationCount++; } else { break; } } while (true); System.out.println(" iterate count : " + queryIterationCount); Set<String> missedIds = CollectionUtils.diffSet(allIds, fetchedIds); if (TOTAL_COUNT != fetchCount) { StringBuilder sb = new StringBuilder(); sb.append(" Repeated entity ids: ").append(new ObjectMapper().writeValueAsString(repeatedIds)); sb.append(" Missed entity ids: ").append(new ObjectMapper().writeValueAsString(missedIds)); Assert.fail(sb.toString()); } Assert.assertEquals(" Missed entity ids: " + new ObjectMapper().writeValueAsString(missedIds), 0, missedIds.size()); Assert.assertEquals(TOTAL_COUNT, fetchCount); Assert.assertEquals(0, repeatedIds.size()); Assert.assertEquals(0, missedIds.size()); Assert.assertEquals(TOTAL_COUNT, fetchedIds.size()); // Assert.assertEquals(2, queryIterationCount); fetchedIds.clear(); fetchCount = 0; queryIterationCount = 0; }
From source file:com.opengamma.integration.marketdata.WatchListRecorder.java
public void run() { final Collection<ConfigItem<ViewDefinition>> viewDefinitions = _viewProcessor.getConfigSource() .getAll(ViewDefinition.class, VersionCorrection.LATEST); final Set<ExternalId> emitted = Sets.newHashSet(); final Set<ExternalId> emittedRecently = Sets.newHashSet(); final Instant now = OffsetDateTime.ofInstant(Instant.now(), ZoneOffset.UTC).with(LocalTime.NOON) .toInstant();/*from w w w. j av a 2s . c o m*/ s_logger.info("{} view(s) defined in demo view processor", viewDefinitions.size()); _writer.println("# Automatically generated"); final ViewClient client = _viewProcessor.createViewClient(UserPrincipal.getLocalUser()); final List<CompiledViewDefinition> compilations = new LinkedList<CompiledViewDefinition>(); client.setResultListener(new AbstractViewResultListener() { @Override public UserPrincipal getUser() { return UserPrincipal.getLocalUser(); } @Override public void viewDefinitionCompiled(final CompiledViewDefinition compiledViewDefinition, final boolean hasMarketDataPermissions) { compilations.add(compiledViewDefinition); } @Override public void viewDefinitionCompilationFailed(final Instant valuationTime, final Exception exception) { s_logger.error("Error while compiling view definition " + viewDefinitions + " for instant " + valuationTime, exception); } }); for (final ConfigItem<ViewDefinition> viewDefinition : viewDefinitions) { if (viewDefinition.getName().startsWith("10K")) { // Don't do the huge ones! s_logger.warn("Skipping {}", viewDefinition); _writer.println(); _writer.print("# Skipping "); _writer.println(viewDefinition); continue; } s_logger.debug("Compiling view {}", viewDefinition); _writer.println(); _writer.println("# " + viewDefinition); client.attachToViewProcess(viewDefinition.getUniqueId(), generateExecutionOptions(now)); try { client.waitForCompletion(); } catch (final InterruptedException e) { s_logger.warn("Interrupted while waiting for '{}' to complete" + viewDefinition); } client.detachFromViewProcess(); if (compilations.size() == 0) { _writer.println("# ERROR - Failed to compile " + viewDefinition); } else { _writer.println("# " + compilations.size() + " different compilations of " + viewDefinition + " for the next " + VALIDITY_PERIOD_DAYS + " days"); } for (int i = 0; i < compilations.size(); i++) { final CompiledViewDefinition compilation = compilations.get(i); final Set<ValueSpecification> liveData = compilation.getMarketDataRequirements(); s_logger.info("{} live data requirements for view {} for compilation {}", new Object[] { liveData.size(), viewDefinition, compilation.toString() }); _writer.println("# " + (i + 1) + " of " + compilations.size() + " - " + compilation); for (final ValueSpecification specification : liveData) { s_logger.debug("Specification {}", specification); emitSpecification(specification, emitted, emittedRecently); } _writer.flush(); emittedRecently.clear(); } compilations.clear(); } client.shutdown(); addInterestRates(emitted); }
From source file:ddf.catalog.impl.CatalogFrameworkImpl.java
/** * Executes a query using the specified {@link QueryRequest} and {@link FederationStrategy}. * Based on the isEnterprise and sourceIds list in the query request, the federated query may * include the local provider and {@link ConnectedSource}s. * * @param queryRequest the {@link QueryRequest} * @param strategy the {@link FederationStrategy} * @return the {@link QueryResponse}// w w w . j a v a 2 s .c o m * @throws FederationException */ private QueryResponse doQuery(QueryRequest queryRequest, FederationStrategy strategy) throws FederationException { Set<ProcessingDetails> exceptions = new HashSet<>(); Set<String> sourceIds = getCombinedIdSet(queryRequest); LOGGER.debug("source ids: {}", sourceIds); List<Source> sourcesToQuery = new ArrayList<>(); boolean addConnectedSources = false; boolean addCatalogProvider = false; boolean sourceFound; if (queryRequest.isEnterprise()) { // Check if it's an enterprise query addConnectedSources = true; addCatalogProvider = hasCatalogProvider(); if (sourceIds != null && !sourceIds.isEmpty()) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Enterprise Query also included specific sites which will now be ignored"); } sourceIds.clear(); } // add all the federated sources for (FederatedSource source : frameworkProperties.getFederatedSources().values()) { if (sourceIsAvailable(source) && canAccessSource(source, queryRequest)) { sourcesToQuery.add(source); } else { exceptions.add(createUnavailableProcessingDetails(source)); } } } else if (sourceIds != null && !sourceIds.isEmpty()) { // it's a targeted federated query if (includesLocalSources(sourceIds)) { LOGGER.debug("Local source is included in sourceIds"); addConnectedSources = connectedSourcesExist(); addCatalogProvider = hasCatalogProvider(); sourceIds.remove(getId()); sourceIds.remove(null); sourceIds.remove(""); } // See if we still have sources to look up by name if (sourceIds.size() > 0) { for (String id : sourceIds) { LOGGER.debug("Looking up source ID = {}", id); sourceFound = false; if (frameworkProperties.getFederatedSources().containsKey(id)) { sourceFound = true; if (frameworkProperties.getFederatedSources().get(id).isAvailable() && canAccessSource( frameworkProperties.getFederatedSources().get(id), queryRequest)) { sourcesToQuery.add(frameworkProperties.getFederatedSources().get(id)); } else { exceptions.add(createUnavailableProcessingDetails( frameworkProperties.getFederatedSources().get(id))); } } if (!sourceFound) { exceptions.add(new ProcessingDetailsImpl(id, new Exception("Source id is not found"))); } } } } else { // default to local sources addConnectedSources = connectedSourcesExist(); addCatalogProvider = hasCatalogProvider(); } if (addConnectedSources) { // add Connected Sources for (ConnectedSource source : frameworkProperties.getConnectedSources()) { if (sourceIsAvailable(source)) { sourcesToQuery.add(source); } else { // do nothing -- we don't care if a connected source is // unavailable. if (LOGGER.isWarnEnabled()) { LOGGER.warn("Connected Source {} is unavailable and will not be queried.", source.getId()); } } } } if (addCatalogProvider) { if (sourceIsAvailable(catalog)) { sourcesToQuery.add(catalog); } else { exceptions.add(createUnavailableProcessingDetails(catalog)); } } if (sourcesToQuery.isEmpty()) { // We have nothing to query at all. // TODO change to SourceUnavailableException throw new FederationException( "SiteNames could not be resolved due to invalid site names, none of the sites were available, or the current subject doesn't have permission to access the sites."); } LOGGER.debug("Calling strategy.federate()"); QueryResponse response = strategy.federate(sourcesToQuery, queryRequest); frameworkProperties.getQueryResponsePostProcessor().processResponse(response); return addProcessingDetails(exceptions, response); }
From source file:org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.java
/** Replicate a set of blocks * * @param blocksToReplicate blocks to be replicated, for each priority * @return the number of blocks scheduled for replication *//*from ww w . j ava 2 s. c om*/ @VisibleForTesting int computeReplicationWorkForBlocks(List<List<Block>> blocksToReplicate) { int requiredReplication, numEffectiveReplicas; List<DatanodeDescriptor> containingNodes; DatanodeDescriptor srcNode; BlockCollection bc = null; int additionalReplRequired; int scheduledWork = 0; List<ReplicationWork> work = new LinkedList<ReplicationWork>(); namesystem.writeLock(); try { synchronized (neededReplications) { for (int priority = 0; priority < blocksToReplicate.size(); priority++) { for (Block block : blocksToReplicate.get(priority)) { // block should belong to a file bc = blocksMap.getBlockCollection(block); // abandoned block or block reopened for append if (bc == null || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) { neededReplications.remove(block, priority); // remove from neededReplications neededReplications.decrementReplicationIndex(priority); continue; } requiredReplication = bc.getBlockReplication(); // get a source data-node containingNodes = new ArrayList<DatanodeDescriptor>(); List<DatanodeStorageInfo> liveReplicaNodes = new ArrayList<DatanodeStorageInfo>(); NumberReplicas numReplicas = new NumberReplicas(); srcNode = chooseSourceDatanode(block, containingNodes, liveReplicaNodes, numReplicas, priority); if (srcNode == null) { // block can not be replicated from any node LOG.debug("Block " + block + " cannot be repl from any node"); continue; } // liveReplicaNodes can include READ_ONLY_SHARED replicas which are // not included in the numReplicas.liveReplicas() count assert liveReplicaNodes.size() >= numReplicas.liveReplicas(); // do not schedule more if enough replicas is already pending numEffectiveReplicas = numReplicas.liveReplicas() + pendingReplications.getNumReplicas(block); if (numEffectiveReplicas >= requiredReplication) { if ((pendingReplications.getNumReplicas(block) > 0) || (blockHasEnoughRacks(block))) { neededReplications.remove(block, priority); // remove from neededReplications neededReplications.decrementReplicationIndex(priority); blockLog.info("BLOCK* Removing " + block + " from neededReplications as it has enough replicas"); continue; } } if (numReplicas.liveReplicas() < requiredReplication) { additionalReplRequired = requiredReplication - numEffectiveReplicas; } else { additionalReplRequired = 1; // Needed on a new rack } work.add(new ReplicationWork(block, bc, srcNode, containingNodes, liveReplicaNodes, additionalReplRequired, priority)); } } } } finally { namesystem.writeUnlock(); } final Set<Node> excludedNodes = new HashSet<Node>(); for (ReplicationWork rw : work) { // Exclude all of the containing nodes from being targets. // This list includes decommissioning or corrupt nodes. excludedNodes.clear(); for (DatanodeDescriptor dn : rw.containingNodes) { excludedNodes.add(dn); } // choose replication targets: NOT HOLDING THE GLOBAL LOCK // It is costly to extract the filename for which chooseTargets is called, // so for now we pass in the block collection itself. rw.chooseTargets(blockplacement, storagePolicySuite, excludedNodes); } namesystem.writeLock(); try { for (ReplicationWork rw : work) { final DatanodeStorageInfo[] targets = rw.targets; if (targets == null || targets.length == 0) { rw.targets = null; continue; } synchronized (neededReplications) { Block block = rw.block; int priority = rw.priority; // Recheck since global lock was released // block should belong to a file bc = blocksMap.getBlockCollection(block); // abandoned block or block reopened for append if (bc == null || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) { neededReplications.remove(block, priority); // remove from neededReplications rw.targets = null; neededReplications.decrementReplicationIndex(priority); continue; } requiredReplication = bc.getBlockReplication(); // do not schedule more if enough replicas is already pending NumberReplicas numReplicas = countNodes(block); numEffectiveReplicas = numReplicas.liveReplicas() + pendingReplications.getNumReplicas(block); if (numEffectiveReplicas >= requiredReplication) { if ((pendingReplications.getNumReplicas(block) > 0) || (blockHasEnoughRacks(block))) { neededReplications.remove(block, priority); // remove from neededReplications neededReplications.decrementReplicationIndex(priority); rw.targets = null; blockLog.info("BLOCK* Removing " + block + " from neededReplications as it has enough replicas"); continue; } } if ((numReplicas.liveReplicas() >= requiredReplication) && (!blockHasEnoughRacks(block))) { if (rw.srcNode.getNetworkLocation() .equals(targets[0].getDatanodeDescriptor().getNetworkLocation())) { //No use continuing, unless a new rack in this case continue; } } // Add block to the to be replicated list rw.srcNode.addBlockToBeReplicated(block, targets); scheduledWork++; DatanodeStorageInfo.incrementBlocksScheduled(targets); // Move the block-replication into a "pending" state. // The reason we use 'pending' is so we can retry // replications that fail after an appropriate amount of time. pendingReplications.increment(block, DatanodeStorageInfo.toDatanodeDescriptors(targets)); if (blockLog.isDebugEnabled()) { blockLog.debug("BLOCK* block " + block + " is moved from neededReplications to pendingReplications"); } // remove from neededReplications if (numEffectiveReplicas + targets.length >= requiredReplication) { neededReplications.remove(block, priority); // remove from neededReplications neededReplications.decrementReplicationIndex(priority); } } } } finally { namesystem.writeUnlock(); } if (blockLog.isInfoEnabled()) { // log which blocks have been scheduled for replication for (ReplicationWork rw : work) { DatanodeStorageInfo[] targets = rw.targets; if (targets != null && targets.length != 0) { StringBuilder targetList = new StringBuilder("datanode(s)"); for (int k = 0; k < targets.length; k++) { targetList.append(' '); targetList.append(targets[k].getDatanodeDescriptor()); } blockLog.info("BLOCK* ask " + rw.srcNode + " to replicate " + rw.block + " to " + targetList); } } } if (blockLog.isDebugEnabled()) { blockLog.debug("BLOCK* neededReplications = " + neededReplications.size() + " pendingReplications = " + pendingReplications.size()); } return scheduledWork; }
From source file:com.google.gwt.emultest.java.util.TreeMapTest.java
public void testNavigableKeySet_viewRemove() { K[] keys = getSortedKeys();//from w ww.j a v a 2 s . c om V[] values = getSortedValues(); NavigableMap<K, V> map = createNavigableMap(); map.put(keys[0], values[0]); map.put(keys[1], values[1]); Set<K> keySet = map.navigableKeySet(); assertEquals(2, keySet.size()); map.remove(keys[1]); assertEquals(1, keySet.size()); map.put(keys[1], values[1]); keySet.remove(keys[0]); assertEquals(1, map.size()); assertEquals(1, keySet.size()); assertEquals(keys[1], keySet.iterator().next()); keySet.clear(); _assertEmpty(map); }
From source file:com.google.gwt.emultest.java.util.TreeMapTest.java
public void testDescendingKeySet_viewRemove() { K[] keys = getSortedKeys();//from w w w .j a v a 2 s . com V[] values = getSortedValues(); NavigableMap<K, V> map = createNavigableMap(); map.put(keys[0], values[0]); map.put(keys[1], values[1]); Set<K> keySet = map.descendingKeySet(); assertEquals(2, keySet.size()); map.remove(keys[1]); assertEquals(1, keySet.size()); map.put(keys[1], values[1]); keySet.remove(keys[0]); assertEquals(1, map.size()); assertEquals(1, keySet.size()); assertEquals(keys[1], keySet.iterator().next()); keySet.clear(); assertEquals(0, map.size()); assertEquals(0, keySet.size()); }
From source file:dk.netarkivet.harvester.indexserver.CrawlLogIndexCache.java
/** Combine a number of crawl.log files into one Lucene index. This index * is placed as gzip files under the directory returned by getCacheFile(). * * @param rawfiles The map from job ID into crawl.log contents. No * null values are allowed in this map.//from w w w .j a v a 2 s .co m */ protected void combine(Map<Long, File> rawfiles) { indexingJobCount++; long datasetSize = rawfiles.values().size(); log.info("Starting combine task #" + indexingJobCount + ". This combines a dataset with " + datasetSize + " crawl logs (thread = " + Thread.currentThread().getName() + ")"); File resultDir = getCacheFile(rawfiles.keySet()); Set<File> tmpfiles = new HashSet<File>(); String indexLocation = resultDir.getAbsolutePath() + ".luceneDir"; ThreadPoolExecutor executor = null; try { DigestIndexer indexer = createStandardIndexer(indexLocation); final boolean verboseIndexing = false; DigestOptions indexingOptions = new DigestOptions(this.useBlacklist, verboseIndexing, this.mimeFilter); long count = 0; Set<IndexingState> outstandingJobs = new HashSet<IndexingState>(); final int maxThreads = Settings.getInt(HarvesterSettings.INDEXSERVER_INDEXING_MAXTHREADS); executor = new ThreadPoolExecutor(maxThreads, maxThreads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>()); executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy()); for (Map.Entry<Long, File> entry : rawfiles.entrySet()) { Long jobId = entry.getKey(); File crawlLog = entry.getValue(); // Generate UUID to ensure a unique filedir for the index. File tmpFile = new File(FileUtils.getTempDir(), UUID.randomUUID().toString()); tmpfiles.add(tmpFile); String localindexLocation = tmpFile.getAbsolutePath(); Long cached = cdxcache.cache(jobId); if (cached == null) { log.warn("Skipping the ingest of logs for job " + entry.getKey() + ". Unable to retrieve cdx-file for job."); continue; } File cachedCDXFile = cdxcache.getCacheFile(cached); // Dispatch this indexing task to a separate thread that // handles the sorting of the logfiles and the generation // of a lucene index for this crawllog and cdxfile. count++; String taskID = count + " out of " + datasetSize; log.debug("Making subthread for indexing job " + jobId + " - task " + taskID); Callable<Boolean> task = new DigestIndexerWorker(localindexLocation, jobId, crawlLog, cachedCDXFile, indexingOptions, taskID); Future<Boolean> result = executor.submit(task); outstandingJobs.add(new IndexingState(jobId, localindexLocation, result)); } // wait for all the outstanding subtasks to complete. Set<Directory> subindices = new HashSet<Directory>(); // Deadline for the combine-task long combineTimeout = Settings.getLong(HarvesterSettings.INDEXSERVER_INDEXING_TIMEOUT); long timeOutTime = System.currentTimeMillis() + combineTimeout; // The indexwriter for the totalindex. IndexWriter totalIndex = indexer.getIndex(); int subindicesInTotalIndex = 0; // Max number of segments in totalindex. int maxSegments = Settings.getInt(HarvesterSettings.INDEXSERVER_INDEXING_MAX_SEGMENTS); final int ACCUMULATED_SUBINDICES_BEFORE_MERGING = 200; while (outstandingJobs.size() > 0) { log.info("Outstanding jobs in combine task #" + indexingJobCount + " is now " + outstandingJobs.size()); Iterator<IndexingState> iterator = outstandingJobs.iterator(); if (timeOutTime < System.currentTimeMillis()) { log.warn("Max indexing time exceeded for one index (" + TimeUtils.readableTimeInterval(combineTimeout) + "). Indexing stops here, although" + " missing subindices for " + outstandingJobs.size() + " jobs"); break; } while (iterator.hasNext() && subindices.size() < ACCUMULATED_SUBINDICES_BEFORE_MERGING) { Future<Boolean> nextResult; IndexingState next = iterator.next(); if (next.getResultObject().isDone()) { nextResult = next.getResultObject(); try { // check, if the indexing failed if (nextResult.get()) { subindices.add(new SimpleFSDirectory(new File(next.getIndex()))); } else { log.warn("Indexing of job " + next.getJobIdentifier() + " failed."); } } catch (InterruptedException e) { log.warn("Unable to get Result back from " + "indexing thread", e); } catch (ExecutionException e) { log.warn("Unable to get Result back from " + "indexing thread", e); } //remove the done object from the set iterator.remove(); } } if (subindices.size() >= ACCUMULATED_SUBINDICES_BEFORE_MERGING) { log.info("Adding " + subindices.size() + " subindices to main index. Forcing index to contain max " + maxSegments + " files (related to combine task # " + indexingJobCount + ")"); totalIndex.addIndexes(subindices.toArray(new Directory[0])); totalIndex.forceMerge(maxSegments); totalIndex.commit(); for (Directory luceneDir : subindices) { luceneDir.close(); } subindicesInTotalIndex += subindices.size(); log.info("Completed adding " + subindices.size() + " subindices to main index, now containing " + subindicesInTotalIndex + " subindices" + "(related to combine task # " + indexingJobCount + ")"); subindices.clear(); } else { sleepAwhile(); } } log.info("Adding the final " + subindices.size() + " subindices to main index. Forcing index to contain max " + maxSegments + " files " + "(related to combine task # " + indexingJobCount + ")"); totalIndex.addIndexes(subindices.toArray(new Directory[0])); totalIndex.forceMerge(maxSegments); totalIndex.commit(); for (Directory luceneDir : subindices) { luceneDir.close(); } subindices.clear(); log.info("Adding operation completed (combine task # " + indexingJobCount + ")!"); long docsInIndex = totalIndex.numDocs(); indexer.close(); log.info("Closed index (related to combine task # " + indexingJobCount); // Now the index is made, gzip it up. File totalIndexDir = new File(indexLocation); log.info("Gzip-compressing the individual " + totalIndexDir.list().length + " index files of combine task # " + indexingJobCount); ZipUtils.gzipFiles(totalIndexDir, resultDir); log.info("Completed combine task # " + indexingJobCount + " that combined a dataset with " + datasetSize + " crawl logs (entries in combined index: " + docsInIndex + ") - compressed index has size " + FileUtils.getHumanReadableFileSize(resultDir)); } catch (IOException e) { throw new IOFailure("Error setting up craw.log index framework for " + resultDir.getAbsolutePath(), e); } finally { // close down Threadpool-executor closeDownThreadpoolQuietly(executor); FileUtils.removeRecursively(new File(indexLocation)); for (File temporaryFile : tmpfiles) { FileUtils.removeRecursively(temporaryFile); } } }
From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java
private boolean createReconfigurationRecordsDB(Map<String, String> nameStates, Set<NodeIDType> newActives) { String insertCmd = "insert into " + getRCRecordTable() + " (" + Columns.RC_GROUP_NAME.toString() + ", " + Columns.STRINGIFIED_RECORD.toString() + ", " + Columns.SERVICE_NAME.toString() + " ) values (?,?,?)"; PreparedStatement insertRC = null; Connection conn = null;/*from w ww. j a v a 2s . c o m*/ boolean insertedAll = true; Set<String> batch = new HashSet<String>(); Set<String> committed = new HashSet<String>(); try { if (conn == null) { conn = this.getDefaultConn(); conn.setAutoCommit(false); insertRC = conn.prepareStatement(insertCmd); } assert (nameStates != null && !nameStates.isEmpty()); String rcGroupName = this.getRCGroupName(nameStates.keySet().iterator().next()); int i = 0; long t1 = System.currentTimeMillis(); for (String name : nameStates.keySet()) { ReconfigurationRecord<NodeIDType> record = new ReconfigurationRecord<NodeIDType>(name, -1, newActives); /* We just directly initialize with WAIT_ACK_STOP:-1 instead of * starting with READY:-1 and pretending to go through the whole * reconfiguration protocol sequence. */ record.setState(name, -1, RCStates.WAIT_ACK_STOP); insertRC.setString(1, rcGroupName); if (RC_RECORD_CLOB_OPTION) insertRC.setClob(2, new StringReader(record.toString())); else insertRC.setString(2, record.toString()); insertRC.setString(3, name); insertRC.addBatch(); batch.add(name); i++; if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == nameStates.size()) { int[] executed = insertRC.executeBatch(); conn.commit(); insertRC.clearBatch(); committed.addAll(batch); batch.clear(); for (int j : executed) insertedAll = insertedAll && (j > 0); if (insertedAll) log.log(Level.FINE, "{0} successfully logged the last {1} messages in {2} ms", new Object[] { this, (i + 1), (System.currentTimeMillis() - t1) }); t1 = System.currentTimeMillis(); } } } catch (SQLException sqle) { log.severe("SQLException while inserting batched RC records using " + insertCmd); sqle.printStackTrace(); } finally { cleanup(insertRC); cleanup(conn); } // rollback if (!insertedAll) { for (String name : nameStates.keySet()) if (committed.contains(name)) this.deleteReconfigurationRecord(name, 0); } return insertedAll; }