List of usage examples for java.util TreeMap lastKey
public K lastKey()
From source file:hydrograph.ui.dataviewer.filter.FilterHelper.java
/** * Rearrange groups after delete row.//from w w w. j av a2s.co m * * @param groupSelectionMap * the group selection map * @param selectionList * the selection list * @return true, if successful */ public boolean rearrangeGroupsAfterDeleteRow(TreeMap<Integer, List<List<Integer>>> groupSelectionMap, List<Integer> selectionList) { boolean retValue = false; int lastKey = groupSelectionMap.lastKey(); int count = 0; for (int i = lastKey; i >= 0; i--) { List<List<Integer>> groups = groupSelectionMap.get(i); for (int j = 0; j <= groups.size() - 1; j++) { if (selectionList.size() == groups.get(j).size() && ListUtils.isEqualList(selectionList, groups.get(j))) { count++; if (count >= 2) { retValue = true; } } } } return retValue; }
From source file:hydrograph.ui.dataviewer.filter.FilterHelper.java
/** * Rearrange groups./*from ww w . j av a2 s. c o m*/ * * @param groupSelectionMap * the group selection map * @param selectionList * the selection list */ public void rearrangeGroups(TreeMap<Integer, List<List<Integer>>> groupSelectionMap, List<Integer> selectionList) { List<Integer> tempList = new ArrayList<>(); int lastKey = groupSelectionMap.lastKey(); for (int i = lastKey; i >= 0; i--) { List<List<Integer>> groups = groupSelectionMap.get(i); for (int j = 0; j <= groups.size() - 1; j++) { if (selectionList.size() < groups.get(j).size() && ListUtils.intersection(selectionList, groups.get(j)).size() > 0) { tempList.addAll(groups.get(j)); groups.get(j).clear(); groups.set(j, new ArrayList<Integer>(selectionList)); selectionList.clear(); selectionList.addAll(tempList); } tempList.clear(); } } }
From source file:org.cloudfoundry.client.lib.CloudFoundryClient.java
public Map<String, String> getCrashLogs(String appName) throws CloudFoundryException { String filePath = "";// TODO - where am I supposed to get this value? int index = 0;// TODO - where am I supposed to get this value? String urlPath = getFileUrlPath(index, filePath); CrashesInfo crashes = getCrashes(appName); if (crashes.getCrashes().isEmpty()) { return Collections.emptyMap(); }//from ww w . ja va2s . c om TreeMap<Date, String> crashInstances = new TreeMap<Date, String>(); for (CrashInfo crash : crashes.getCrashes()) { crashInstances.put(crash.getSince(), crash.getInstance()); } String instance = crashInstances.get(crashInstances.lastKey()); return doGetLogs(urlPath, appName, instance); }
From source file:com.l2jfree.sql.L2DatabaseInstaller.java
public static void check() throws SAXException, IOException, ParserConfigurationException { final TreeMap<String, String> tables = new TreeMap<String, String>(); final TreeMap<Double, String> updates = new TreeMap<Double, String>(); final DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); factory.setValidating(false); // FIXME add validation factory.setIgnoringComments(true);// w w w. ja va2 s. c om final List<Document> documents = new ArrayList<Document>(); InputStream is = null; try { // load default database schema from resources is = L2DatabaseInstaller.class.getResourceAsStream("database_schema.xml"); documents.add(factory.newDocumentBuilder().parse(is)); } finally { IOUtils.closeQuietly(is); } final File f = new File("./config/database_schema.xml"); // load optional project specific database tables/updates (fails on already existing) if (f.exists()) documents.add(factory.newDocumentBuilder().parse(f)); for (Document doc : documents) { for (Node n1 : L2XML.listNodesByNodeName(doc, "database")) { for (Node n2 : L2XML.listNodesByNodeName(n1, "table")) { final String name = L2XML.getAttribute(n2, "name"); final String definition = L2XML.getAttribute(n2, "definition"); final String oldDefinition = tables.put(name, definition); if (oldDefinition != null) throw new RuntimeException("Found multiple tables with name " + name + "!"); } for (Node n2 : L2XML.listNodesByNodeName(n1, "update")) { final Double revision = Double.valueOf(L2XML.getAttribute(n2, "revision")); final String query = L2XML.getAttribute(n2, "query"); final String oldQuery = updates.put(revision, query); if (oldQuery != null) throw new RuntimeException("Found multiple updates with revision " + revision + "!"); } } } createRevisionTable(); final double databaseRevision = getDatabaseRevision(); if (databaseRevision == -1) // no table exists { for (Entry<String, String> table : tables.entrySet()) { final String tableName = table.getKey(); final String tableDefinition = table.getValue(); installTable(tableName, tableDefinition); } if (updates.isEmpty()) insertRevision(0); else insertRevision(updates.lastKey()); } else // check for possibly required updates { for (Entry<String, String> table : tables.entrySet()) { final String tableName = table.getKey(); final String tableDefinition = table.getValue(); if (L2Database.tableExists(tableName)) continue; System.err.println("Table '" + tableName + "' is missing, so the server attempts to install it."); System.err.println("WARNING! It's highly recommended to check the results manually."); installTable(tableName, tableDefinition); } for (Entry<Double, String> update : updates.entrySet()) { final double updateRevision = update.getKey(); final String updateQuery = update.getValue(); if (updateRevision > databaseRevision) { executeUpdate(updateQuery); insertRevision(updateRevision); } } } }
From source file:org.apache.hadoop.hbase.master.LoadBalancer.java
/** * Generate a global load balancing plan according to the specified map of * server information to the most loaded regions of each server. * * The load balancing invariant is that all servers are within 1 region of the * average number of regions per server. If the average is an integer number, * all servers will be balanced to the average. Otherwise, all servers will * have either floor(average) or ceiling(average) regions. * * The algorithm is currently implemented as such: * * <ol>// www . j a va2 s . c o m * <li>Determine the two valid numbers of regions each server should have, * <b>MIN</b>=floor(average) and <b>MAX</b>=ceiling(average). * * <li>Iterate down the most loaded servers, shedding regions from each so * each server hosts exactly <b>MAX</b> regions. Stop once you reach a * server that already has <= <b>MAX</b> regions. * <p> * Order the regions to move from most recent to least. * * <li>Iterate down the least loaded servers, assigning regions so each server * has exactly </b>MIN</b> regions. Stop once you reach a server that * already has >= <b>MIN</b> regions. * * Regions being assigned to underloaded servers are those that were shed * in the previous step. It is possible that there were not enough * regions shed to fill each underloaded server to <b>MIN</b>. If so we * end up with a number of regions required to do so, <b>neededRegions</b>. * * It is also possible that we were able fill each underloaded but ended * up with regions that were unassigned from overloaded servers but that * still do not have assignment. * * If neither of these conditions hold (no regions needed to fill the * underloaded servers, no regions leftover from overloaded servers), * we are done and return. Otherwise we handle these cases below. * * <li>If <b>neededRegions</b> is non-zero (still have underloaded servers), * we iterate the most loaded servers again, shedding a single server from * each (this brings them from having <b>MAX</b> regions to having * <b>MIN</b> regions). * * <li>We now definitely have more regions that need assignment, either from * the previous step or from the original shedding from overloaded servers. * * Iterate the least loaded servers filling each to <b>MIN</b>. * * <li>If we still have more regions that need assignment, again iterate the * least loaded servers, this time giving each one (filling them to * </b>MAX</b>) until we run out. * * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions. * * In addition, any server hosting >= <b>MAX</b> regions is guaranteed * to end up with <b>MAX</b> regions at the end of the balancing. This * ensures the minimal number of regions possible are moved. * </ol> * * TODO: We can at-most reassign the number of regions away from a particular * server to be how many they report as most loaded. * Should we just keep all assignment in memory? Any objections? * Does this mean we need HeapSize on HMaster? Or just careful monitor? * (current thinking is we will hold all assignments in memory) * * @param clusterState Map of regionservers and their load/region information to * a list of their most loaded regions * @return a list of regions to be moved, including source and destination, * or null if cluster is already balanced */ public List<RegionPlan> balanceCluster(Map<HServerInfo, List<HRegionInfo>> clusterState) { long startTime = System.currentTimeMillis(); // Make a map sorted by load and count regions TreeMap<HServerInfo, List<HRegionInfo>> serversByLoad = new TreeMap<HServerInfo, List<HRegionInfo>>( new HServerInfo.LoadComparator()); int numServers = clusterState.size(); if (numServers == 0) { LOG.debug("numServers=0 so skipping load balancing"); return null; } int numRegions = 0; // Iterate so we can count regions as we build the map for (Map.Entry<HServerInfo, List<HRegionInfo>> server : clusterState.entrySet()) { server.getKey().getLoad().setNumberOfRegions(server.getValue().size()); numRegions += server.getKey().getLoad().getNumberOfRegions(); serversByLoad.put(server.getKey(), server.getValue()); } // Check if we even need to do any load balancing float average = (float) numRegions / numServers; // for logging // HBASE-3681 check sloppiness first int floor = (int) Math.floor(average * (1 - slop)); int ceiling = (int) Math.ceil(average * (1 + slop)); if (serversByLoad.lastKey().getLoad().getNumberOfRegions() <= ceiling && serversByLoad.firstKey().getLoad().getNumberOfRegions() >= floor) { // Skipped because no server outside (min,max) range LOG.info("Skipping load balancing. servers=" + numServers + " " + "regions=" + numRegions + " average=" + average + " " + "mostloaded=" + serversByLoad.lastKey().getLoad().getNumberOfRegions() + " leastloaded=" + serversByLoad.lastKey().getLoad().getNumberOfRegions()); return null; } int min = numRegions / numServers; int max = numRegions % numServers == 0 ? min : min + 1; // Balance the cluster // TODO: Look at data block locality or a more complex load to do this List<RegionPlan> regionsToMove = new ArrayList<RegionPlan>(); int regionidx = 0; // track the index in above list for setting destination // Walk down most loaded, pruning each to the max int serversOverloaded = 0; Map<HServerInfo, BalanceInfo> serverBalanceInfo = new TreeMap<HServerInfo, BalanceInfo>(); for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) { HServerInfo serverInfo = server.getKey(); int regionCount = serverInfo.getLoad().getNumberOfRegions(); if (regionCount <= max) { serverBalanceInfo.put(serverInfo, new BalanceInfo(0, 0)); break; } serversOverloaded++; List<HRegionInfo> regions = randomize(server.getValue()); int numToOffload = Math.min(regionCount - max, regions.size()); int numTaken = 0; for (int i = regions.size() - 1; i >= 0; i--) { HRegionInfo hri = regions.get(i); // Don't rebalance meta regions. if (hri.isMetaRegion()) continue; regionsToMove.add(new RegionPlan(hri, serverInfo, null)); numTaken++; if (numTaken >= numToOffload) break; } serverBalanceInfo.put(serverInfo, new BalanceInfo(numToOffload, (-1) * numTaken)); } // Walk down least loaded, filling each to the min int serversUnderloaded = 0; // number of servers that get new regions int neededRegions = 0; // number of regions needed to bring all up to min for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.entrySet()) { int regionCount = server.getKey().getLoad().getNumberOfRegions(); if (regionCount >= min) { break; } serversUnderloaded++; int numToTake = min - regionCount; int numTaken = 0; while (numTaken < numToTake && regionidx < regionsToMove.size()) { regionsToMove.get(regionidx).setDestination(server.getKey()); numTaken++; regionidx++; } serverBalanceInfo.put(server.getKey(), new BalanceInfo(0, numTaken)); // If we still want to take some, increment needed if (numTaken < numToTake) { neededRegions += (numToTake - numTaken); } } // If none needed to fill all to min and none left to drain all to max, // we are done if (neededRegions == 0 && regionidx == regionsToMove.size()) { long endTime = System.currentTimeMillis(); LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + regionsToMove.size() + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded + " less loaded servers"); return regionsToMove; } // Need to do a second pass. // Either more regions to assign out or servers that are still underloaded // If we need more to fill min, grab one from each most loaded until enough if (neededRegions != 0) { // Walk down most loaded, grabbing one from each until we get enough for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) { BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey()); int idx = balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload(); if (idx >= server.getValue().size()) break; HRegionInfo region = server.getValue().get(idx); if (region.isMetaRegion()) continue; // Don't move meta regions. regionsToMove.add(new RegionPlan(region, server.getKey(), null)); if (--neededRegions == 0) { // No more regions needed, done shedding break; } } } // Now we have a set of regions that must be all assigned out // Assign each underloaded up to the min, then if leftovers, assign to max // Walk down least loaded, assigning to each to fill up to min for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.entrySet()) { int regionCount = server.getKey().getLoad().getNumberOfRegions(); if (regionCount >= min) break; BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey()); if (balanceInfo != null) { regionCount += balanceInfo.getNumRegionsAdded(); } if (regionCount >= min) { continue; } int numToTake = min - regionCount; int numTaken = 0; while (numTaken < numToTake && regionidx < regionsToMove.size()) { regionsToMove.get(regionidx).setDestination(server.getKey()); numTaken++; regionidx++; } } // If we still have regions to dish out, assign underloaded to max if (regionidx != regionsToMove.size()) { for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.entrySet()) { int regionCount = server.getKey().getLoad().getNumberOfRegions(); if (regionCount >= max) { break; } regionsToMove.get(regionidx).setDestination(server.getKey()); regionidx++; if (regionidx == regionsToMove.size()) { break; } } } long endTime = System.currentTimeMillis(); if (regionidx != regionsToMove.size() || neededRegions != 0) { // Emit data so can diagnose how balancer went astray. LOG.warn("regionidx=" + regionidx + ", regionsToMove=" + regionsToMove.size() + ", numServers=" + numServers + ", serversOverloaded=" + serversOverloaded + ", serversUnderloaded=" + serversUnderloaded); StringBuilder sb = new StringBuilder(); for (Map.Entry<HServerInfo, List<HRegionInfo>> e : clusterState.entrySet()) { if (sb.length() > 0) sb.append(", "); sb.append(e.getKey().getServerName()); sb.append(" "); sb.append(e.getValue().size()); } LOG.warn("Input " + sb.toString()); } // All done! LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + regionsToMove.size() + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded + " less loaded servers"); return regionsToMove; }
From source file:org.xwiki.repository.internal.RepositoryManager.java
public DocumentReference importExtension(String extensionId, ExtensionRepository repository, Type type) throws QueryException, XWikiException, ResolveException { TreeMap<Version, String> versions = new TreeMap<Version, String>(); Version lastVersion = getVersions(extensionId, repository, type, versions); if (lastVersion == null) { throw new ExtensionNotFoundException("Can't find any version for the extension [" + extensionId + "] on repository [" + repository + "]"); } else if (versions.isEmpty()) { // If no valid version import the last version versions.put(lastVersion, extensionId); } else {//from w w w . j a va2 s .co m // Select the last valid version lastVersion = versions.lastKey(); } Extension extension = repository.resolve(new ExtensionId(extensionId, lastVersion)); // Get former ids versions Collection<ExtensionId> features = extension.getExtensionFeatures(); for (ExtensionId feature : features) { try { getVersions(feature.getId(), repository, type, versions); } catch (ResolveException e) { // Ignore } } XWikiContext xcontext = this.xcontextProvider.get(); boolean needSave = false; XWikiDocument document = getExistingExtensionDocumentById(extensionId); if (document == null) { // Create document document = xcontext.getWiki().getDocument(new DocumentReference(xcontext.getWikiId(), Arrays.asList("Extension", extension.getName()), "WebHome"), xcontext); for (int i = 1; !document.isNew(); ++i) { document = xcontext.getWiki().getDocument(new DocumentReference(xcontext.getWikiId(), Arrays.asList("Extension", extension.getName() + ' ' + i), "WebHome"), xcontext); } document.readFromTemplate( this.currentResolver.resolve(XWikiRepositoryModel.EXTENSION_TEMPLATEREFERENCE), xcontext); needSave = true; } // Update document BaseObject extensionObject = document.getXObject(XWikiRepositoryModel.EXTENSION_CLASSREFERENCE); if (extensionObject == null) { extensionObject = document.newXObject(XWikiRepositoryModel.EXTENSION_CLASSREFERENCE, xcontext); needSave = true; } if (!StringUtils.equals(extensionId, getValue(extensionObject, XWikiRepositoryModel.PROP_EXTENSION_ID, (String) null))) { extensionObject.set(XWikiRepositoryModel.PROP_EXTENSION_ID, extensionId, xcontext); needSave = true; } // Update extension informations needSave |= updateExtension(extension, extensionObject, xcontext); // Proxy marker BaseObject extensionProxyObject = document.getXObject(XWikiRepositoryModel.EXTENSIONPROXY_CLASSREFERENCE); if (extensionProxyObject == null) { extensionProxyObject = document.newXObject(XWikiRepositoryModel.EXTENSIONPROXY_CLASSREFERENCE, xcontext); extensionProxyObject.setIntValue(XWikiRepositoryModel.PROP_PROXY_AUTOUPDATE, 1); needSave = true; } needSave |= update(extensionProxyObject, XWikiRepositoryModel.PROP_PROXY_REPOSITORYID, repository.getDescriptor().getId()); needSave |= update(extensionProxyObject, XWikiRepositoryModel.PROP_PROXY_REPOSITORYTYPE, repository.getDescriptor().getType()); needSave |= update(extensionProxyObject, XWikiRepositoryModel.PROP_PROXY_REPOSITORYURI, repository.getDescriptor().getURI().toString()); // Remove unexisting versions Set<String> validVersions = new HashSet<String>(); List<BaseObject> versionObjects = document .getXObjects(XWikiRepositoryModel.EXTENSIONVERSION_CLASSREFERENCE); if (versionObjects != null) { for (BaseObject versionObject : versionObjects) { if (versionObject != null) { String version = getValue(versionObject, XWikiRepositoryModel.PROP_VERSION_VERSION); if (StringUtils.isBlank(version) || (isVersionProxyingEnabled(document) && !new DefaultVersion(version).equals(extension.getId().getVersion()))) { // Empty version OR old versions should be proxied document.removeXObject(versionObject); needSave = true; } else { if (!versions.containsKey(new DefaultVersion(version))) { // The version does not exist on remote repository if (!isVersionValid(document, versionObject, xcontext)) { // The version is invalid, removing it to not make the whole extension invalid document.removeXObject(versionObject); needSave = true; } else { // The version is valid, lets keep it validVersions.add(version); } } else { // This version exist on remote repository validVersions.add(version); } } } } } List<BaseObject> dependencyObjects = document .getXObjects(XWikiRepositoryModel.EXTENSIONDEPENDENCY_CLASSREFERENCE); if (dependencyObjects != null) { for (BaseObject dependencyObject : dependencyObjects) { if (dependencyObject != null) { String version = getValue(dependencyObject, XWikiRepositoryModel.PROP_DEPENDENCY_EXTENSIONVERSION); if (!validVersions.contains(version)) { // The version is invalid, removing it to not make the whole extension invalid document.removeXObject(dependencyObject); needSave = true; } } } } // Update versions for (Map.Entry<Version, String> entry : versions.entrySet()) { Version version = entry.getKey(); String id = entry.getValue(); try { Extension versionExtension; if (version.equals(extension.getId().getVersion())) { versionExtension = extension; } else if (isVersionProxyingEnabled(document)) { continue; } else { versionExtension = repository.resolve(new ExtensionId(id, version)); } // Update version related informations needSave |= updateExtensionVersion(document, versionExtension); } catch (Exception e) { this.logger.error("Failed to resolve extension with id [" + id + "] and version [" + version + "] on repository [" + repository + "]", e); } } if (needSave) { document.setAuthorReference(xcontext.getUserReference()); if (document.isNew()) { document.setContentAuthorReference(xcontext.getUserReference()); document.setCreatorReference(xcontext.getUserReference()); } xcontext.getWiki().saveDocument(document, "Imported extension [" + extensionId + "] from repository [" + repository.getDescriptor() + "]", true, xcontext); } return document.getDocumentReference(); }
From source file:hydrograph.ui.dataviewer.filter.FilterHelper.java
/** * Rearrange group columns./*from w ww . j ava 2 s . c o m*/ * * @param groupSelectionMap * the group selection map */ public void rearrangeGroupColumns(TreeMap<Integer, List<List<Integer>>> groupSelectionMap) { Map<Integer, List<List<Integer>>> tempMap = new TreeMap<Integer, List<List<Integer>>>(groupSelectionMap); for (int key : tempMap.keySet()) { List<List<Integer>> groups = tempMap.get(key); List<Integer> tempList = new ArrayList<>(); for (List<Integer> grp : groups) { tempList.addAll(grp); } if (tempList.isEmpty()) { for (int i = key; i < tempMap.size() - 1; i++) { groupSelectionMap.put(i, tempMap.get(i + 1)); } groupSelectionMap.remove(groupSelectionMap.lastKey()); } } }
From source file:com.brejza.matt.habmodem.Dsp_service.java
@Override public void HabitatRx(TreeMap<Long, Telemetry_string> data, boolean success, String callsign, long startTime, long endTime, AscentRate as, double maxAltitude) { mapPayloads.get(callsign.toUpperCase()).setQueryOngoing(0); if (success) { String call = callsign.toUpperCase(); System.out.println("DEBUG: Got " + data.size() + " sentences for payload " + callsign); logEvent("Habitat Query Got " + data.size() + " Sentences For Payload " + callsign, true); if (mapPayloads.containsKey(call)) { Payload p = mapPayloads.get(call); //if havnt already got a telem_config, see if one exists in hab_con if (p.telemetryConfig == null) { if (hab_con.getTelemConfigs().containsKey(call)) { p.telemetryConfig = hab_con.getTelemConfigs().get(call); }/*w w w . ja v a2 s. com*/ } long lt = p.getLastTime(); p.setLastUpdated(endTime); p.putPackets(data); p.setIsActivePayload(true); if (p.colour == 0) p.setNewColour(newColour()); if (data.size() > 0) { if (lt < Long.valueOf(data.lastKey())) { if (as != null) { if (as.valid()) p.ascentRate = as; } } } } else { Payload p = new Payload(callsign, newColour(), true); if (hab_con.getTelemConfigs().containsKey(call)) { p.telemetryConfig = hab_con.getTelemConfigs().get(call); } p.setLastUpdated(endTime); p.data = data; mapPayloads.put(call, p); if (as != null) { if (as.valid()) mapPayloads.get(call).ascentRate = as; } } mapPayloads.get(call).putMaxAltitude(maxAltitude); Intent i = new Intent(HABITAT_NEW_DATA); if (data.size() > 0) i.putExtra(TELEM_STR, data.get(data.lastKey()).getSentence()); sendBroadcast(i); } else { logEvent("Habitat Query Failed - " + callsign, true); } }
From source file:statistic.ca.gui.JCAStatisticPanel.java
private void showDiagramm(String diagrammName) { if (diagrammName.equals("Grundinformationen")) { String[] columnNames = { "Bezeichnung", "Wert" }; EvacuationCellularAutomaton tmpCA; int nrOfInd = 0; double evacSec = 0.0; double evacCAStep = 0; double notEvac = 0; double evac = 0; double notEvacNoExit = 0; double notEvacNoTime = 0; int bestEvacIndex = 0; int aveEvacIndex = 0; int worseEvacIndex = 0; TreeMap<Double, Integer> findMedian = new TreeMap<>(); for (int i = 0; i < selectedBatchResultEntry.getCa().length; i++) { tmpCA = selectedBatchResultEntry.getCa()[i]; nrOfInd += tmpCA.getInitialIndividualCount(); evacSec += tmpCA.getSecondsPerStep() * tmpCA.getTimeStep(); evacCAStep += tmpCA.getTimeStep(); evac += tmpCA.getInitialIndividualCount() - tmpCA.deadIndividualsCount(); notEvac += tmpCA.deadIndividualsCount(); notEvacNoExit += tmpCA.getDeadIndividualCount(DeathCause.ExitUnreachable);// getNrOfExitUnreachableDeadIndividuals(); notEvacNoTime += tmpCA.getDeadIndividualCount(DeathCause.NotEnoughTime);// getNrOfNotEnoughTimeDeadIndividuals(); findMedian.put(tmpCA.getTimeStep() * tmpCA.getSecondsPerStep(), i); }/*w w w . ja va 2 s. c o m*/ bestEvacIndex = findMedian.firstEntry().getValue(); for (int j = 0; j < findMedian.size() / 2; j++) findMedian.remove(findMedian.firstKey()); aveEvacIndex = findMedian.get(findMedian.firstKey()); worseEvacIndex = findMedian.get(findMedian.lastKey()); Object[][] data = { { "Informationen fr Modell", selectedBatchResultEntry.getName() }, { "Evakuierungszeit in Sekunden", evacSec / selectedBatchResultEntry.getCa().length }, { "Evakuierungszeit in ZA-Schritten", evacCAStep / selectedBatchResultEntry.getCa().length }, { "Anzahl Individuen", (double) nrOfInd / selectedBatchResultEntry.getCa().length }, { "evakuiert", evac / selectedBatchResultEntry.getCa().length }, { "nicht evakuiert", notEvac / selectedBatchResultEntry.getCa().length }, { "nicht evakuiert weil kein Ausgang erreichbar", notEvacNoExit / selectedBatchResultEntry.getCa().length }, { "nicht evakuiert weil die Zeit nicht gereicht hat", notEvacNoTime / selectedBatchResultEntry.getCa().length }, { "beste Evakuierungszeit (Durchlaufindex,Zeit)", ("(" + (bestEvacIndex + 1) + " - " + (selectedBatchResultEntry.getCa()[bestEvacIndex].getTimeStep() / selectedBatchResultEntry.getCa()[bestEvacIndex].getStepsPerSecond()) + ")") }, { "durchschnit. Evakuierungszeit (Durchlaufindex,Zeit)", ("(" + (aveEvacIndex + 1) + " - " + (selectedBatchResultEntry.getCa()[aveEvacIndex].getTimeStep() / selectedBatchResultEntry.getCa()[bestEvacIndex].getStepsPerSecond()) + ")") }, { "schlechteste Evakuierungszeit (Durchlaufindex,Zeit)", ("(" + (worseEvacIndex + 1) + " - " + (selectedBatchResultEntry.getCa()[worseEvacIndex].getTimeStep() / selectedBatchResultEntry.getCa()[bestEvacIndex].getStepsPerSecond()) + ")") } }; basicInformationTable = new JTable(data, columnNames); basicInformationScrollPane = new JScrollPane(basicInformationTable); diagrams.addTable(diagrammName, basicInformationScrollPane, west); } if ((noIndividualsInAtLeastOneAssignmentIndex) && !(diagrammName.equals("Grundinformationen"))) { chartData = new ChartData("bar", "NO INDIVIDUALS in at least one of the choosed dataset(s)", "", new ArrayList<>(), new ArrayList<>()); evakuierungsdauer = ChartFactory.createBarChart( "NO INDIVIDUALS in at least one of the choosed dataset(s)", "", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart("NO INDIVIDUALS in at least one of the choosed dataset(s)", evakuierungsdauer, west); } else { if (diagrammName.equals("Ausgangsverteilung")) { chartData = new ChartData("pie", diagrammName + ":" + selectedBatchResultEntry.getName() + "-" + assignmentGroups.get(assignmentIndexToShow.get(0)).toString(), "Ausgnge", categoryDatasetValues, categoryDatasetAssignments); ausgangsverteilung = ChartFactory.createPieChart( diagrammName + ":" + selectedBatchResultEntry.getName() + "-" + assignmentGroups.get(assignmentIndexToShow.get(0)).toString(), ChartData.getPieDataSet(), false, true, false); diagrams.addChart(diagrammName, ausgangsverteilung, west); } if (diagrammName.equals("Ankunftskurve")) { chartData = new ChartData("bar", diagrammName, "Zeit [s]", categoryDatasetValues, categoryDatasetAssignments); ankunftskurve = ChartFactory.createXYLineChart(diagrammName, chartData.getYAxisLabel(), "Individuen", (XYDataset) datasetCollection, PlotOrientation.VERTICAL, true, true, false); diagrams.addChart(diagrammName, ankunftskurve, west); } if (diagrammName.equals("Evakuierungsdauer")) { chartData = new ChartData("bar", diagrammName, "Zeit [s]", categoryDatasetValues, categoryDatasetAssignments); evakuierungsdauer = ChartFactory.createBarChart(diagrammName, "Belegungen", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart(diagrammName, evakuierungsdauer, west); } if (diagrammName.equals("evakuierte Individuen in Prozent")) { chartData = new ChartData("pie", diagrammName + ":" + selectedBatchResultEntry.getName() + "-" + assignmentGroups.get(assignmentIndexToShow.get(0)).toString(), "Individuen", categoryDatasetValues, categoryDatasetAssignments); evakuierteIndividueninProzent = ChartFactory.createPieChart( diagrammName + ":" + selectedBatchResultEntry.getName() + "-" + assignmentGroups.get(assignmentIndexToShow.get(0)).toString(), ChartData.getPieDataSet(), false, true, false); diagrams.addChart(diagrammName, evakuierteIndividueninProzent, west); } if (diagrammName.equals("maximale Blockadezeit")) { chartData = new ChartData("bar", diagrammName, "Zeit [s]", categoryDatasetValues, categoryDatasetAssignments); maxblockadezeit = ChartFactory.createBarChart(diagrammName, "Belegungen", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart(diagrammName, maxblockadezeit, west); } if (diagrammName.equals("durchschnittliche Blockadezeit")) { chartData = new ChartData("bar", diagrammName, "Zeit [s]", categoryDatasetValues, categoryDatasetAssignments); aveblockadezeit = ChartFactory.createBarChart(diagrammName, "Belegungen", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart(diagrammName, aveblockadezeit, west); } if (diagrammName.equals("minimale Blockadezeit")) { chartData = new ChartData("bar", diagrammName, "Zeit [s]", categoryDatasetValues, categoryDatasetAssignments); minblockadezeit = ChartFactory.createBarChart(diagrammName, "Belegungen", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart(diagrammName, minblockadezeit, west); } if (diagrammName.equals("zurckgelegte Distanz")) { chartData = new ChartData("bar", diagrammName, "Meter [m]", categoryDatasetValues, categoryDatasetAssignments); zurueckgelegteDistanz = ChartFactory.createBarChart(diagrammName, "Belegungen", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart(diagrammName, zurueckgelegteDistanz, west); } if (diagrammName.equals("minimale Distanz zum initialen Ausgang")) { chartData = new ChartData("bar", diagrammName, "Meter [m]", categoryDatasetValues, categoryDatasetAssignments); minimaleDistanzzuminitialenAusgang = ChartFactory.createBarChart(diagrammName, "Belegungen", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart(diagrammName, minimaleDistanzzuminitialenAusgang, west); } if (diagrammName.equals("minimale Distanz zum nchsten Ausgang")) { chartData = new ChartData("bar", diagrammName, "Meter [m]", categoryDatasetValues, categoryDatasetAssignments); minimaleDistanzzumnaechstenAusgang = ChartFactory.createBarChart(diagrammName, "Belegungen", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart(diagrammName, minimaleDistanzzumnaechstenAusgang, west); } if (diagrammName.equals("maximale Zeit bis Safe")) { chartData = new ChartData("bar", diagrammName, "Zeit [s]", categoryDatasetValues, categoryDatasetAssignments); maxZeitBisSafe = ChartFactory.createBarChart(diagrammName, "Belegungen", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart(diagrammName, maxZeitBisSafe, west); } if (diagrammName.equals("durchschnittliche Zeit bis Safe")) { chartData = new ChartData("bar", diagrammName, "Zeit [s]", categoryDatasetValues, categoryDatasetAssignments); aveZeitBisSafe = ChartFactory.createBarChart(diagrammName, "Belegungen", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart(diagrammName, aveZeitBisSafe, west); } if (diagrammName.equals("minimale Zeit bis Safe")) { chartData = new ChartData("bar", diagrammName, "Zeit [s]", categoryDatasetValues, categoryDatasetAssignments); minZeitBisSafe = ChartFactory.createBarChart(diagrammName, "Belegungen", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart(diagrammName, minZeitBisSafe, west); } if (diagrammName.equals("Distanz ber Zeit")) { chartData = new ChartData("bar", diagrammName, "Meter [m]", categoryDatasetValues, categoryDatasetAssignments); distanzueberZeit = ChartFactory.createXYLineChart(diagrammName, "Zeit [s]", chartData.getYAxisLabel(), (XYDataset) datasetCollection, PlotOrientation.VERTICAL, true, true, false); diagrams.addChart(diagrammName, distanzueberZeit, west); } if (diagrammName.equals("maximale Geschwindigkeit ber Zeit")) { chartData = new ChartData("bar", diagrammName, "Zeit [s]", categoryDatasetValues, categoryDatasetAssignments); maximaleGeschwindigkeitueberZeit = ChartFactory.createXYLineChart(diagrammName, chartData.getYAxisLabel(), "Meter pro Sekunde [m/s]", (XYDataset) datasetCollection, PlotOrientation.VERTICAL, true, true, false); diagrams.addChart(diagrammName, maximaleGeschwindigkeitueberZeit, west); } if (diagrammName.equals("durschnittliche Geschwindigkeit ber Zeit")) { chartData = new ChartData("bar", diagrammName, "Zeit [s]", categoryDatasetValues, categoryDatasetAssignments); durschnittlicheGeschwindigkeitueberZeit = ChartFactory.createXYLineChart(diagrammName, chartData.getYAxisLabel(), "Meter pro Sekunde [m/s]", (XYDataset) datasetCollection, PlotOrientation.VERTICAL, true, true, false); diagrams.addChart(diagrammName, durschnittlicheGeschwindigkeitueberZeit, west); } if (diagrammName.equals("maximale Geschwindigkeit")) { chartData = new ChartData("bar", diagrammName, "Meter pro Sekunde [m/s]", categoryDatasetValues, categoryDatasetAssignments); maximaleGeschwindigkeit = ChartFactory.createBarChart(diagrammName, "Belegungen", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart(diagrammName, maximaleGeschwindigkeit, west); } if (diagrammName.equals("durchschnittliche Geschwindigkeit")) { chartData = new ChartData("bar", diagrammName, "Meter pro Sekunde [m/s]", categoryDatasetValues, categoryDatasetAssignments); durchschnittlicheGeschwindigkeit = ChartFactory.createBarChart(diagrammName, "Belegungen", chartData.getYAxisLabel(), chartData.getCDataSet(), PlotOrientation.VERTICAL, false, true, false); diagrams.addChart(diagrammName, durchschnittlicheGeschwindigkeit, west); } if (diagrammName.equals("Panik ber Zeit")) { chartData = new ChartData("bar", diagrammName, "Zeit [s]", categoryDatasetValues, categoryDatasetAssignments); panik = ChartFactory.createXYLineChart(diagrammName, chartData.getYAxisLabel(), "Panik", (XYDataset) datasetCollection, PlotOrientation.VERTICAL, true, true, false); diagrams.addChart(diagrammName, panik, west); } if (diagrammName.equals("Erschpfung ber Zeit")) { chartData = new ChartData("bar", diagrammName, "Zeit [s]", categoryDatasetValues, categoryDatasetAssignments); erschoepfung = ChartFactory.createXYLineChart(diagrammName, chartData.getYAxisLabel(), "Erschpfung", (XYDataset) datasetCollection, PlotOrientation.VERTICAL, true, true, false); diagrams.addChart(diagrammName, erschoepfung, west); } } //end else categoryDatasetValues = new ArrayList<>(); categoryDatasetAssignments = new ArrayList<>(); //dataset = new XYSeries(""); datasetCollection = new XYSeriesCollection(); diagrams.validate(); }
From source file:org.cloudfoundry.client.lib.rest.CloudControllerClientImpl.java
@Override public Map<String, String> getCrashLogs(String appName) { String urlPath = getFileUrlPath(); CrashesInfo crashes = getCrashes(appName); if (crashes.getCrashes().isEmpty()) { return Collections.emptyMap(); }// w w w . j a va 2 s .c o m TreeMap<Date, String> crashInstances = new TreeMap<Date, String>(); for (CrashInfo crash : crashes.getCrashes()) { crashInstances.put(crash.getSince(), crash.getInstance()); } String instance = crashInstances.get(crashInstances.lastKey()); return doGetLogs(urlPath, appName, instance); }