List of usage examples for java.util TreeMap firstKey
public K firstKey()
From source file:org.kuali.ole.module.purap.document.web.struts.AccountsPayableActionBase.java
/** * Builds and asks questions which require text input by the user for a payment request or a credit memo. * * @param mapping An ActionMapping * @param form An ActionForm * @param request The HttpServletRequest * @param response The HttpServletResponse * @param questionType A String used to distinguish which question is being asked * @param notePrefix A String explaining what action was taken, to be prepended to the note containing the reason, which gets * written to the document * @param operation A one-word String description of the action to be taken, to be substituted into the message. (Can be an * empty String for some messages.) * @param messageKey A (whole) key to the message which will appear on the question screen * @param questionsAndCallbacks A TreeMap associating the type of question to be asked and the type of callback which should * happen in that case * @param messagePrefix The most general part of a key to a message text to be retrieved from ConfigurationService, * Describes a collection of questions. * @param redirect An ActionForward to return to if done with questions * @return An ActionForward/* w w w . j av a2 s . c om*/ * @throws Exception */ protected ActionForward askQuestionWithInput(ActionMapping mapping, ActionForm form, HttpServletRequest request, HttpServletResponse response, String questionType, String notePrefix, String operation, String messageKey, TreeMap<String, PurQuestionCallback> questionsAndCallbacks, String messagePrefix, ActionForward redirect) throws Exception { KualiDocumentFormBase kualiDocumentFormBase = (KualiDocumentFormBase) form; AccountsPayableDocumentBase apDocument = (AccountsPayableDocumentBase) kualiDocumentFormBase.getDocument(); String question = request.getParameter(OLEConstants.QUESTION_INST_ATTRIBUTE_NAME); String reason = request.getParameter(OLEConstants.QUESTION_REASON_ATTRIBUTE_NAME); String noteText = ""; ConfigurationService kualiConfiguration = SpringContext.getBean(ConfigurationService.class); String firstQuestion = questionsAndCallbacks.firstKey(); PurQuestionCallback callback = null; Iterator questions = questionsAndCallbacks.keySet().iterator(); String mapQuestion = null; String key = null; // Start in logic for confirming the close. if (question == null) { key = getQuestionProperty(messageKey, messagePrefix, kualiConfiguration, firstQuestion); String message = StringUtils.replace(key, "{0}", operation); // Ask question if not already asked. return this.performQuestionWithInput(mapping, form, request, response, firstQuestion, message, OLEConstants.CONFIRMATION_QUESTION, questionType, ""); } else { // find callback for this question while (questions.hasNext()) { mapQuestion = (String) questions.next(); if (StringUtils.equals(mapQuestion, question)) { callback = questionsAndCallbacks.get(mapQuestion); break; } } key = getQuestionProperty(messageKey, messagePrefix, kualiConfiguration, mapQuestion); Object buttonClicked = request.getParameter(OLEConstants.QUESTION_CLICKED_BUTTON); if (question.equals(mapQuestion) && buttonClicked.equals(ConfirmationQuestion.NO)) { // If 'No' is the button clicked, just reload the doc String nextQuestion = null; // ask another question if more left if (questions.hasNext()) { nextQuestion = (String) questions.next(); key = getQuestionProperty(messageKey, messagePrefix, kualiConfiguration, nextQuestion); return this.performQuestionWithInput(mapping, form, request, response, nextQuestion, key, OLEConstants.CONFIRMATION_QUESTION, questionType, ""); } else { return mapping.findForward(OLEConstants.MAPPING_BASIC); } } // Have to check length on value entered. String introNoteMessage = notePrefix + OLEConstants.BLANK_SPACE; // Build out full message. noteText = introNoteMessage + reason; int noteTextLength = noteText.length(); // Get note text max length from DD. int noteTextMaxLength = SpringContext.getBean(DataDictionaryService.class) .getAttributeMaxLength(Note.class, OLEConstants.NOTE_TEXT_PROPERTY_NAME).intValue(); if (StringUtils.isBlank(reason) || (noteTextLength > noteTextMaxLength)) { // Figure out exact number of characters that the user can enter. int reasonLimit = noteTextMaxLength - noteTextLength; if (reason == null) { // Prevent a NPE by setting the reason to a blank string. reason = ""; } return this.performQuestionWithInputAgainBecauseOfErrors(mapping, form, request, response, mapQuestion, key, OLEConstants.CONFIRMATION_QUESTION, questionType, "", reason, PurapKeyConstants.ERROR_PAYMENT_REQUEST_REASON_REQUIRED, OLEConstants.QUESTION_REASON_ATTRIBUTE_NAME, new Integer(reasonLimit).toString()); } } // make callback if (ObjectUtils.isNotNull(callback)) { AccountsPayableDocument refreshedApDocument = callback.doPostQuestion(apDocument, noteText); kualiDocumentFormBase.setDocument(refreshedApDocument); } String nextQuestion = null; // ask another question if more left if (questions.hasNext()) { nextQuestion = (String) questions.next(); key = getQuestionProperty(messageKey, messagePrefix, kualiConfiguration, nextQuestion); return this.performQuestionWithInput(mapping, form, request, response, nextQuestion, key, OLEConstants.CONFIRMATION_QUESTION, questionType, ""); } return redirect; }
From source file:edu.utexas.cs.tactex.tariffoptimization.TariffOptimizerBinaryOneShot.java
/** * evaluate suggestedSpecs(index), and record result to utilToIndex * and result/*from w ww. j a v a 2 s.c om*/ * * @param index * @param utilToIndex * @param result * @param suggestedSpecs * @param consideredTariffActions * @param tariffSubscriptions * @param competingTariffs * @param costCurvesPredictor * @param currentTimeslot * @param me * @param customer2ShiftedEnergy * @param customer2RelevantTariffCharges * @param customer2NonShiftedEnergy */ private void evaluateAndRecord(int index, TreeMap<Double, Integer> utilToIndex, TreeMap<Double, TariffSpecification> result, List<TariffSpecification> suggestedSpecs, ArrayList<TariffSpecification> consideredTariffActions, HashMap<TariffSpecification, HashMap<CustomerInfo, Integer>> tariffSubscriptions, List<TariffSpecification> competingTariffs, CostCurvesPredictor costCurvesPredictor, int currentTimeslot, Broker me, HashMap<CustomerInfo, HashMap<TariffSpecification, ShiftedEnergyData>> customer2ShiftedEnergy, HashMap<CustomerInfo, ArrayRealVector> customer2NonShiftedEnergy, HashMap<CustomerInfo, HashMap<TariffSpecification, Double>> customer2RelevantTariffCharges) { TreeMap<Double, TariffSpecification> sortedTariffs; consideredTariffActions.clear(); consideredTariffActions.add(suggestedSpecs.get(index)); //log.info("computing utilities"); sortedTariffs = utilityEstimator.estimateUtilities(consideredTariffActions, tariffSubscriptions, competingTariffs, customer2RelevantTariffCharges, customer2ShiftedEnergy, customer2NonShiftedEnergy, marketPredictionManager, costCurvesPredictor, currentTimeslot, me); utilToIndex.put(sortedTariffs.lastEntry().getKey(), index); // maintain top 3 if (utilToIndex.size() > 3) { utilToIndex.remove(utilToIndex.firstKey()); } result.putAll(sortedTariffs); }
From source file:us.levk.math.linear.EucledianDistanceClusterer.java
public Cluster eucledian(final RealMatrix original) throws IOException { try (HugeRealMatrix distances = new HugeRealMatrix(original.getRowDimension(), original.getRowDimension())) { final Map<Integer, Cluster> genehash = new HashMap<Integer, Cluster>() { private static final long serialVersionUID = 1L; {/*from ww w.j av a 2s.com*/ for (int index = original.getRowDimension(); --index >= 0; put(index, new Cluster(index))) ; } }; TreeMap<Double, int[]> sorted = new TreeMap<>(); log.debug("Populating distance matrix"); for (int i = 0; i < original.getRowDimension(); i++) { for (int j = i + 1; j < original.getRowDimension(); j++) { // Euclidean distance calculation. double total = 0; for (int k = 0; k < original.getColumnDimension(); k++) { double left = original.getEntry(i, k); double right = original.getEntry(j, k); if (!isNaN(left) && !isNaN(right) && !isInfinite(left) && !isInfinite(right)) total += Math.pow(left - right, 2); } double distance = Math.pow(total, 0.5); distances.setEntry(i, j, distance); distances.setEntry(j, i, distance); int[] genePair = { i, j }; // Enter the distance calculated and the genes measured into a // treemap. Will be automatically sorted. sorted.put(distance, genePair); } } log.debug("Initialized distances matrix " + distances); while (true) { // Get the first key of the TreeMap. Will be the shortest distance de // facto. final double minkey = (Double) sorted.firstKey(); int[] minValues = (int[]) sorted.firstEntry().getValue(); final int value1 = minValues[0], value2 = minValues[1]; // find Cluster cluster = new Cluster(genehash.get(value1), genehash.get(value2)) { { log.debug("Generating cluster from " + value1 + " and " + value2 + " in " + genehash); contains().addAll(genehash.get(value1).contains()); contains().addAll(genehash.get(value2).contains()); d(minkey); log.debug("Generated cluster " + this); } }; genehash.put(cluster.id(), cluster); genehash.remove(value1); genehash.remove(value2); if (genehash.size() <= 1) break; // Iterate over all the current clusters to remeasure distance with the // previously clustered group. for (Cluster c : genehash.values()) { // Skip measuring the new cluster with itself. if (c == cluster) continue; double distance = 0; int n = 0; // Get genes from each cluster. Distance is measured from each element // to every element. for (int current : c.contains()) for (int created : cluster.contains()) { distance += distances.getEntry(current, created); n++; } distance = distance / n; int[] valuePair = { c.id(), cluster.id() }; sorted.put(distance, valuePair); } // Get the shortest distance. // Check to make sure shortest distance does not include a gene pair // that // has already had its elements clustered. boolean minimized = false; while (!minimized) { double mk = sorted.firstKey(); minValues = sorted.firstEntry().getValue(); // If the gene pair is not present in the current gene set, remove // this // distance. if (!genehash.containsKey(minValues[0]) || !genehash.containsKey(minValues[1])) sorted.remove(mk); else minimized = true; } } return genehash.entrySet().iterator().next().getValue(); } }
From source file:org.wso2.carbon.governance.registry.extensions.executors.ServiceVersionExecutor.java
private String reformatPath(String path, String currentExpression, String targetExpression, String newResourceVersion) throws RegistryException { TreeMap<Integer, String> indexMap = new TreeMap<Integer, String>(); String returnPath = targetExpression; String prefix;//from ww w . jav a 2 s . c om if (currentExpression.equals(targetExpression)) { return path; } indexMap.put(currentExpression.indexOf(RESOURCE_NAME), RESOURCE_NAME); indexMap.put(currentExpression.indexOf(RESOURCE_PATH), RESOURCE_PATH); indexMap.put(currentExpression.indexOf(RESOURCE_VERSION), RESOURCE_VERSION); String tempExpression = currentExpression; while (indexMap.lastKey() < tempExpression.lastIndexOf(RegistryConstants.PATH_SEPARATOR)) { tempExpression = tempExpression.substring(0, tempExpression.lastIndexOf(RegistryConstants.PATH_SEPARATOR)); path = path.substring(0, path.lastIndexOf(RegistryConstants.PATH_SEPARATOR)); } prefix = currentExpression.substring(0, currentExpression.indexOf(indexMap.get(indexMap.higherKey(-1)))); if (!path.startsWith(prefix)) { return path; } path = path.replace(prefix, ""); while (true) { if (indexMap.firstKey() < 0) { indexMap.pollFirstEntry(); } else { break; } } while (true) { if (indexMap.size() == 0) { break; } Map.Entry lastEntry = indexMap.pollLastEntry(); if (lastEntry.getValue().equals(RESOURCE_PATH)) { String pathValue = path; for (int i = 0; i < indexMap.size(); i++) { // pathValue = formatPath(pathValue.substring(path.indexOf(RegistryConstants.PATH_SEPARATOR))); pathValue = formatPath( pathValue.substring(pathValue.indexOf(RegistryConstants.PATH_SEPARATOR))); } if (!pathValue.equals("")) { returnPath = returnPath.replace(RESOURCE_PATH, formatPath(pathValue)); path = path.replace(pathValue, ""); } else { returnPath = returnPath.replace("/" + lastEntry.getValue(), ""); } continue; } if (lastEntry.getValue().equals(RESOURCE_VERSION)) { returnPath = returnPath.replace(RESOURCE_VERSION, newResourceVersion); if (path.contains("/")) { path = path.substring(0, path.lastIndexOf(RegistryConstants.PATH_SEPARATOR)); } else { path = ""; } continue; } String tempPath; if (path.contains("/")) { tempPath = path.substring(path.lastIndexOf(RegistryConstants.PATH_SEPARATOR) + 1); } else { tempPath = path; } if (!tempPath.equals("")) { returnPath = returnPath.replace((String) lastEntry.getValue(), formatPath(tempPath)); if (path.contains("/")) { path = path.substring(0, path.lastIndexOf(RegistryConstants.PATH_SEPARATOR)); } else { path = ""; } } else { returnPath = returnPath.replace("/" + lastEntry.getValue(), ""); if (path.contains("/")) { path = path.substring(0, path.lastIndexOf(RegistryConstants.PATH_SEPARATOR)); } } } // Adding the version validation here. if (!newResourceVersion.matches("^\\d+[.]\\d+[.]\\d+(-[a-zA-Z0-9]+)?$")) { String message = "Invalid version found for " + RegistryUtils.getResourceName(path); log.error(message); throw new RegistryException(message); } if (returnPath.contains(RESOURCE_VERSION)) { return returnPath.replace(RESOURCE_VERSION, newResourceVersion); } return returnPath; }
From source file:org.apache.hadoop.hbase.master.LoadBalancer.java
/** * Generate a global load balancing plan according to the specified map of * server information to the most loaded regions of each server. * * The load balancing invariant is that all servers are within 1 region of the * average number of regions per server. If the average is an integer number, * all servers will be balanced to the average. Otherwise, all servers will * have either floor(average) or ceiling(average) regions. * * The algorithm is currently implemented as such: * * <ol>//from w ww .j av a 2 s .c om * <li>Determine the two valid numbers of regions each server should have, * <b>MIN</b>=floor(average) and <b>MAX</b>=ceiling(average). * * <li>Iterate down the most loaded servers, shedding regions from each so * each server hosts exactly <b>MAX</b> regions. Stop once you reach a * server that already has <= <b>MAX</b> regions. * <p> * Order the regions to move from most recent to least. * * <li>Iterate down the least loaded servers, assigning regions so each server * has exactly </b>MIN</b> regions. Stop once you reach a server that * already has >= <b>MIN</b> regions. * * Regions being assigned to underloaded servers are those that were shed * in the previous step. It is possible that there were not enough * regions shed to fill each underloaded server to <b>MIN</b>. If so we * end up with a number of regions required to do so, <b>neededRegions</b>. * * It is also possible that we were able fill each underloaded but ended * up with regions that were unassigned from overloaded servers but that * still do not have assignment. * * If neither of these conditions hold (no regions needed to fill the * underloaded servers, no regions leftover from overloaded servers), * we are done and return. Otherwise we handle these cases below. * * <li>If <b>neededRegions</b> is non-zero (still have underloaded servers), * we iterate the most loaded servers again, shedding a single server from * each (this brings them from having <b>MAX</b> regions to having * <b>MIN</b> regions). * * <li>We now definitely have more regions that need assignment, either from * the previous step or from the original shedding from overloaded servers. * * Iterate the least loaded servers filling each to <b>MIN</b>. * * <li>If we still have more regions that need assignment, again iterate the * least loaded servers, this time giving each one (filling them to * </b>MAX</b>) until we run out. * * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions. * * In addition, any server hosting >= <b>MAX</b> regions is guaranteed * to end up with <b>MAX</b> regions at the end of the balancing. This * ensures the minimal number of regions possible are moved. * </ol> * * TODO: We can at-most reassign the number of regions away from a particular * server to be how many they report as most loaded. * Should we just keep all assignment in memory? Any objections? * Does this mean we need HeapSize on HMaster? Or just careful monitor? * (current thinking is we will hold all assignments in memory) * * @param clusterState Map of regionservers and their load/region information to * a list of their most loaded regions * @return a list of regions to be moved, including source and destination, * or null if cluster is already balanced */ public List<RegionPlan> balanceCluster(Map<HServerInfo, List<HRegionInfo>> clusterState) { long startTime = System.currentTimeMillis(); // Make a map sorted by load and count regions TreeMap<HServerInfo, List<HRegionInfo>> serversByLoad = new TreeMap<HServerInfo, List<HRegionInfo>>( new HServerInfo.LoadComparator()); int numServers = clusterState.size(); if (numServers == 0) { LOG.debug("numServers=0 so skipping load balancing"); return null; } int numRegions = 0; // Iterate so we can count regions as we build the map for (Map.Entry<HServerInfo, List<HRegionInfo>> server : clusterState.entrySet()) { server.getKey().getLoad().setNumberOfRegions(server.getValue().size()); numRegions += server.getKey().getLoad().getNumberOfRegions(); serversByLoad.put(server.getKey(), server.getValue()); } // Check if we even need to do any load balancing float average = (float) numRegions / numServers; // for logging // HBASE-3681 check sloppiness first int floor = (int) Math.floor(average * (1 - slop)); int ceiling = (int) Math.ceil(average * (1 + slop)); if (serversByLoad.lastKey().getLoad().getNumberOfRegions() <= ceiling && serversByLoad.firstKey().getLoad().getNumberOfRegions() >= floor) { // Skipped because no server outside (min,max) range LOG.info("Skipping load balancing. servers=" + numServers + " " + "regions=" + numRegions + " average=" + average + " " + "mostloaded=" + serversByLoad.lastKey().getLoad().getNumberOfRegions() + " leastloaded=" + serversByLoad.lastKey().getLoad().getNumberOfRegions()); return null; } int min = numRegions / numServers; int max = numRegions % numServers == 0 ? min : min + 1; // Balance the cluster // TODO: Look at data block locality or a more complex load to do this List<RegionPlan> regionsToMove = new ArrayList<RegionPlan>(); int regionidx = 0; // track the index in above list for setting destination // Walk down most loaded, pruning each to the max int serversOverloaded = 0; Map<HServerInfo, BalanceInfo> serverBalanceInfo = new TreeMap<HServerInfo, BalanceInfo>(); for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) { HServerInfo serverInfo = server.getKey(); int regionCount = serverInfo.getLoad().getNumberOfRegions(); if (regionCount <= max) { serverBalanceInfo.put(serverInfo, new BalanceInfo(0, 0)); break; } serversOverloaded++; List<HRegionInfo> regions = randomize(server.getValue()); int numToOffload = Math.min(regionCount - max, regions.size()); int numTaken = 0; for (int i = regions.size() - 1; i >= 0; i--) { HRegionInfo hri = regions.get(i); // Don't rebalance meta regions. if (hri.isMetaRegion()) continue; regionsToMove.add(new RegionPlan(hri, serverInfo, null)); numTaken++; if (numTaken >= numToOffload) break; } serverBalanceInfo.put(serverInfo, new BalanceInfo(numToOffload, (-1) * numTaken)); } // Walk down least loaded, filling each to the min int serversUnderloaded = 0; // number of servers that get new regions int neededRegions = 0; // number of regions needed to bring all up to min for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.entrySet()) { int regionCount = server.getKey().getLoad().getNumberOfRegions(); if (regionCount >= min) { break; } serversUnderloaded++; int numToTake = min - regionCount; int numTaken = 0; while (numTaken < numToTake && regionidx < regionsToMove.size()) { regionsToMove.get(regionidx).setDestination(server.getKey()); numTaken++; regionidx++; } serverBalanceInfo.put(server.getKey(), new BalanceInfo(0, numTaken)); // If we still want to take some, increment needed if (numTaken < numToTake) { neededRegions += (numToTake - numTaken); } } // If none needed to fill all to min and none left to drain all to max, // we are done if (neededRegions == 0 && regionidx == regionsToMove.size()) { long endTime = System.currentTimeMillis(); LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + regionsToMove.size() + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded + " less loaded servers"); return regionsToMove; } // Need to do a second pass. // Either more regions to assign out or servers that are still underloaded // If we need more to fill min, grab one from each most loaded until enough if (neededRegions != 0) { // Walk down most loaded, grabbing one from each until we get enough for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) { BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey()); int idx = balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload(); if (idx >= server.getValue().size()) break; HRegionInfo region = server.getValue().get(idx); if (region.isMetaRegion()) continue; // Don't move meta regions. regionsToMove.add(new RegionPlan(region, server.getKey(), null)); if (--neededRegions == 0) { // No more regions needed, done shedding break; } } } // Now we have a set of regions that must be all assigned out // Assign each underloaded up to the min, then if leftovers, assign to max // Walk down least loaded, assigning to each to fill up to min for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.entrySet()) { int regionCount = server.getKey().getLoad().getNumberOfRegions(); if (regionCount >= min) break; BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey()); if (balanceInfo != null) { regionCount += balanceInfo.getNumRegionsAdded(); } if (regionCount >= min) { continue; } int numToTake = min - regionCount; int numTaken = 0; while (numTaken < numToTake && regionidx < regionsToMove.size()) { regionsToMove.get(regionidx).setDestination(server.getKey()); numTaken++; regionidx++; } } // If we still have regions to dish out, assign underloaded to max if (regionidx != regionsToMove.size()) { for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.entrySet()) { int regionCount = server.getKey().getLoad().getNumberOfRegions(); if (regionCount >= max) { break; } regionsToMove.get(regionidx).setDestination(server.getKey()); regionidx++; if (regionidx == regionsToMove.size()) { break; } } } long endTime = System.currentTimeMillis(); if (regionidx != regionsToMove.size() || neededRegions != 0) { // Emit data so can diagnose how balancer went astray. LOG.warn("regionidx=" + regionidx + ", regionsToMove=" + regionsToMove.size() + ", numServers=" + numServers + ", serversOverloaded=" + serversOverloaded + ", serversUnderloaded=" + serversUnderloaded); StringBuilder sb = new StringBuilder(); for (Map.Entry<HServerInfo, List<HRegionInfo>> e : clusterState.entrySet()) { if (sb.length() > 0) sb.append(", "); sb.append(e.getKey().getServerName()); sb.append(" "); sb.append(e.getValue().size()); } LOG.warn("Input " + sb.toString()); } // All done! LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + regionsToMove.size() + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded + " less loaded servers"); return regionsToMove; }
From source file:org.eclipse.dataset.Stats.java
public static double[] outlierValuesMap(final Dataset a, int nl, int nh) { final TreeMap<Double, Integer> lMap = new TreeMap<Double, Integer>(); final TreeMap<Double, Integer> hMap = new TreeMap<Double, Integer>(); int ml = 0;/*from w w w . j a v a2 s .c o m*/ int mh = 0; IndexIterator it = a.getIterator(); while (it.hasNext()) { Double x = a.getElementDoubleAbs(it.index); Integer i; if (ml == nl) { Double k = lMap.lastKey(); if (x < k) { i = lMap.get(k) - 1; if (i == 0) { lMap.remove(k); } else { lMap.put(k, i); } i = lMap.get(x); if (i == null) { lMap.put(x, 1); } else { lMap.put(x, i + 1); } } } else { i = lMap.get(x); if (i == null) { lMap.put(x, 1); } else { lMap.put(x, i + 1); } ml++; } if (mh == nh) { Double k = hMap.firstKey(); if (x > k) { i = hMap.get(k) - 1; if (i == 0) { hMap.remove(k); } else { hMap.put(k, i); } i = hMap.get(x); if (i == null) { hMap.put(x, 1); } else { hMap.put(x, i + 1); } } } else { i = hMap.get(x); if (i == null) { hMap.put(x, 1); } else { hMap.put(x, i + 1); } mh++; } } // Attempt to make values distinct double lx = lMap.lastKey(); double hx = hMap.firstKey(); if (lx >= hx) { Double h = hMap.higherKey(lx); if (h != null) { hx = h; mh--; } else { Double l = lMap.lowerKey(hx); if (l != null) { lx = l; ml--; } } } return new double[] { lMap.lastKey(), hMap.firstKey(), ml, mh }; }
From source file:org.apache.hadoop.mapred.HFSPScheduler.java
/** * Preempt missingSlots number of slots from jobs bigger * /*from w w w. ja v a 2s. com*/ * @param jip * Job that clamies slots * @param allJobs * all the size based jobs in the cluster * @param localJobs * size based jobs that can be immediately suspended * @param missingSlots * number of slots to claim * @param numToSkip * number of slots on non-local * * @return number of tasks preemped in the cluster for jip. The first elem of * the tuple is the number of tasks preempted for new tasks, the * second in the number of tasks preempted for tasks to be resumed */ private ClaimedSlots claimSlots(HelperForType helper, final Phase phase, final JobInProgress jip, int missingNewSlots, int missingResumableSlots, int numToSkip, TreeMap<JobDurationInfo, JobInProgress> allJobs, TreeMap<JobDurationInfo, TaskStatuses> localJobs) { assert phase == Phase.SIZE_BASED || missingResumableSlots == 0; final TaskType type = helper.taskType; JobDurationInfo jdi = this.getDuration(jip.getJobID(), type); /* #size based tasks that occupies train slots in the cluster (suspendable) */ int numTasksToPreempt = 0; if (phase == Phase.TRAIN) { /** num of size based tasks that can be suspended for training */ int numOverflowSizeBasedTasks = helper.maxSizeBasedSlots > helper.runningSizeBasedTasks ? 0 : helper.runningSizeBasedTasks - helper.maxSizeBasedSlots; /* num of size base tasks to preempt for the training of jip */ numTasksToPreempt = Math.min(missingNewSlots, numOverflowSizeBasedTasks); if (LOG.isDebugEnabled()) { LOG.debug(phase.toString() + "(" + jip.getJobID() + ":" + type + "):" + " numOverflowSizeBasedTasks: " + numOverflowSizeBasedTasks + " numTasksToPreempt: " + numTasksToPreempt + " missingNewSlots: " + missingNewSlots + " numTrainTasksForJob: " + helper.numTrainTasksForJob + " canAssignTrain: " + helper.canAssignTrain() + " numToSkip: " + numToSkip); } } else { numTasksToPreempt = missingNewSlots; if (LOG.isDebugEnabled()) { LOG.debug(phase.toString() + "(" + jip.getJobID() + ":" + type + "):" + " missingNewSlots: " + missingNewSlots + " missingResumableSlots: " + missingResumableSlots + " numTrainTasksForJob: " + helper.numTrainTasksForJob + " canAssignTrain: " + helper.canAssignTrain() + " numToSkip: " + numToSkip); } } final int startingNumTasksToPreemptForNew = numTasksToPreempt; final int startingResumableSlots = missingResumableSlots; // try to free pendingTasks number of slots among running on this TT Iterator<Entry<JobDurationInfo, JobInProgress>> sizeBasedJobsDescIter = allJobs.descendingMap().entrySet() .iterator(); Iterator<Entry<JobDurationInfo, TaskStatuses>> sizeBasedJobsDescIterOnTT = localJobs.entrySet().iterator(); Entry<JobDurationInfo, TaskStatuses> biggerOnTT = sizeBasedJobsDescIterOnTT.hasNext() ? sizeBasedJobsDescIterOnTT.next() : null; while (this.preemptionStrategy.isPreemptionActive() && (numTasksToPreempt > 0 || missingResumableSlots > 0)) { if (!sizeBasedJobsDescIter.hasNext()) { if (LOG.isDebugEnabled()) { LOG.debug(phase.toString() + "(" + jip.getJobID() + ":" + type + "):" + " should preempt " + numTasksToPreempt + " for new tasks and " + missingResumableSlots + " for resumable " + "tasks but no sizeBasedJob is running"); } break; } Entry<JobDurationInfo, JobInProgress> nextSBJ = sizeBasedJobsDescIter.next(); JobInProgress jipToPreempt = nextSBJ.getValue(); /* don't try to suspend if jip is bigger than any other jip */ if (jdi != null) { if (jipToPreempt.getJobID().equals(jip.getJobID())) { return new ClaimedSlots(startingNumTasksToPreemptForNew - numTasksToPreempt, startingResumableSlots - missingResumableSlots); } if (JOB_DURATION_COMPARATOR.compare(nextSBJ.getKey(), jdi) <= 0) { if (LOG.isDebugEnabled()) { LOG.debug(phase.toString() + "(" + jip.getJobID() + ":" + type + "):" + " should preempt " + numTasksToPreempt + ", but bigger job avail is " + jip.getJobID() + ".len: " + jdi.getPhaseDuration() + " > " + nextSBJ.getValue().getJobID() + ".len: " + nextSBJ.getKey().getPhaseDuration()); } return new ClaimedSlots(startingNumTasksToPreemptForNew - numTasksToPreempt, startingResumableSlots - missingResumableSlots); } } if (jipToPreempt.getJobID().equals(jip.getJobID())) { continue; } /* * don't try to claim slots from a job in training * * FIXME: ideally a job can claim slots from a training job until this job * has enough tasks for training */ if (!this.isTrained(jipToPreempt, type)) { LOG.debug(phase.toString() + "(" + jip.getJobID() + ":" + type + "):" + " ignoring " + jipToPreempt.getJobID() + " because in training"); continue; } int numSuspendedOnThisTT = 0; /* if jipToPreempt has tasks on this TT, then suspend them */ if (biggerOnTT != null // && type == TaskType.REDUCE && biggerOnTT.getKey().getJobID().equals(nextSBJ.getKey().getJobID())) { TreeMap<TaskAttemptID, TaskStatus> preemptableTAIDS = biggerOnTT.getValue().taskStatuses; int numPreemptions = Math.min(preemptableTAIDS.size(), missingResumableSlots + numTasksToPreempt); for (int i = 0; i < numPreemptions; i++) { TaskAttemptID pTAID = preemptableTAIDS.firstKey(); TaskStatus pTS = preemptableTAIDS.remove(pTAID); JobInProgress pJIP = this.taskTrackerManager.getJob(pTAID.getJobID()); TaskInProgress pTIP = pJIP.getTaskInProgress(pTAID.getTaskID()); if (type == TaskType.REDUCE) { // if (this.eagerPreemption == PreemptionType.KILL // && pTIP.killTask(pTAID, false)) { // if (missingResumableSlots > 0) // missingResumableSlots -= 1; // else // numTasksToPreempt -= 1; // numSuspendedOnThisTT += 1; // if (jdi == null) { // taskHelper.kill(pTAID, jip.getJobID(), phase); // } else { // taskHelper.kill(pTAID, jip.getJobID(), phase, nextSBJ.getKey(), // jdi); // } // } else if (this.preemptionStrategy.isPreemptionActive() // && this.canBeSuspended(pTS) && pTIP.suspendTaskAttempt(pTAID)) { if (this.preemptionStrategy.isPreemptionActive() && this.preemptionStrategy.canBePreempted(pTS) && this.preemptionStrategy.preempt(pTIP, pTS)) { if (missingResumableSlots > 0) missingResumableSlots -= 1; else numTasksToPreempt -= 1; numSuspendedOnThisTT += 1; if (jdi == null) { taskHelper.suspend(pTAID, jip.getJobID(), phase); } else { taskHelper.suspend(pTAID, jip.getJobID(), phase, nextSBJ.getKey(), jdi); } } else { LOG.debug(phase.toString() + "(" + jip.getJobID() + ":" + type + "): cannot suspend " + pTAID + " for " + jip); } } } if (preemptableTAIDS.size() - numPreemptions <= 0) { biggerOnTT = sizeBasedJobsDescIterOnTT.hasNext() ? sizeBasedJobsDescIterOnTT.next() : null; } } /* #tasks that can be preempted */ int numPreemptibleRunTasks = this.getNumRunningTasks(jipToPreempt, type) - numSuspendedOnThisTT; /* * Two cases: numToSkip is bigger then preemptible tasks or it is not: - * is bigger: then we skip this preemptible jip - it is not: then * numToSkip is set to 0 and we do the real wait preemption */ if (numPreemptibleRunTasks <= numToSkip) { numToSkip -= numPreemptibleRunTasks; } else { /* #tasks that can be preempted by jip */ int numPreemptibleByJIPRunTasks = numPreemptibleRunTasks - numToSkip; numToSkip = 0; /* #tasks that will be preempted by jip on other TTs s */ int numRunTasksEventuallyPreemptedByJIP = Math.min(numTasksToPreempt, numPreemptibleByJIPRunTasks); numTasksToPreempt -= numRunTasksEventuallyPreemptedByJIP; } } return new ClaimedSlots(startingNumTasksToPreemptForNew - numTasksToPreempt, startingResumableSlots - missingNewSlots); }
From source file:org.opennms.features.vaadin.pmatrix.calculator.PmatrixDpdCalculatorRepository.java
/** * Causes the dataPointMap to be persisted to a file * @param file file definition to persist the data set to * @return true if dataPointMap persisted correctly, false if not *//* w w w . j a va2 s . c o m*/ public boolean persist() { File currentArchiveFile = null; File tmpCurrentArchiveFile = null; Resource tmpResource = null; if (!persistHistoricData) { LOG.debug("not persisting data as persistHistoricData=false"); return false; } if (archiveFileName == null || archiveFileDirectoryLocation == null) { LOG.error("cannot save historical data to file as incorrect file location:" + " archiveFileDirectoryLocation=" + archiveFileDirectoryLocation + " archiveFileName=" + archiveFileName); return false; } // set the date on which this file was persisted datePersisted = new Date(); // used to get file name suffix SimpleDateFormat dateFormatter = new SimpleDateFormat(dateFormatString); // persist data to temporary file <archiveFileName.tmp> String tmpArchiveFileName = archiveFileName + ".tmp"; String tmpArchiveFileLocation = archiveFileDirectoryLocation + File.separator + tmpArchiveFileName; LOG.debug("historical data will be written to temporary file :" + tmpArchiveFileLocation); try { tmpResource = resourceLoader.getResource(tmpArchiveFileLocation); tmpCurrentArchiveFile = new File(tmpResource.getURL().getFile()); } catch (IOException e) { LOG.error("cannot save historical data to file at archiveFileLocation='" + tmpArchiveFileLocation + "' due to error:", e); return false; } LOG.debug( "persisting historical data to temporary file location:" + tmpCurrentArchiveFile.getAbsolutePath()); // marshal the data PrintWriter writer = null; boolean marshalledCorrectly = false; try { // create directory if doesn't exist File directory = new File(tmpCurrentArchiveFile.getParentFile().getAbsolutePath()); directory.mkdirs(); // create file if doesn't exist writer = new PrintWriter(tmpCurrentArchiveFile, "UTF-8"); writer.close(); // see http://stackoverflow.com/questions/1043109/why-cant-jaxb-find-my-jaxb-index-when-running-inside-apache-felix // need to provide bundles class loader ClassLoader cl = org.opennms.features.vaadin.pmatrix.model.DataPointDefinition.class.getClassLoader(); JAXBContext jaxbContext = JAXBContext.newInstance( "org.opennms.features.vaadin.pmatrix.model:org.opennms.features.vaadin.pmatrix.calculator", cl); //JAXBContext jaxbContext = JAXBContext.newInstance("org.opennms.features.vaadin.pmatrix.model:org.opennms.features.vaadin.pmatrix.calculator"); Marshaller jaxbMarshaller = jaxbContext.createMarshaller(); jaxbMarshaller.setProperty(Marshaller.JAXB_ENCODING, "UTF-8"); // TODO CHANGE output pretty printed jaxbMarshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true); // marshal this Data Repository jaxbMarshaller.marshal(this, tmpCurrentArchiveFile); marshalledCorrectly = true; } catch (JAXBException e) { LOG.error("problem marshalling file: ", e); } catch (Exception e) { LOG.error("problem marshalling file: ", e); } finally { if (writer != null) writer.close(); } if (marshalledCorrectly == false) return false; // marshaling succeeded so rename tmp file String archiveFileLocation = archiveFileDirectoryLocation + File.separator + archiveFileName; LOG.info("historical data will be written to:" + archiveFileLocation); Resource resource = resourceLoader.getResource(archiveFileLocation); if (resource.exists()) { String oldArchiveFileName = archiveFileName + "." + dateFormatter.format(datePersisted); String oldArchiveFileLocation = archiveFileDirectoryLocation + File.separator + oldArchiveFileName; LOG.info("previous historical file at archiveFileLocation='" + archiveFileLocation + "' exists so being renamed to " + oldArchiveFileLocation); Resource oldresource = resourceLoader.getResource(oldArchiveFileLocation); try { currentArchiveFile = new File(resource.getURL().getFile()); File oldArchiveFile = new File(oldresource.getURL().getFile()); // rename current archive file to old archive file name if (!currentArchiveFile.renameTo(oldArchiveFile)) { throw new IOException("cannot rename current archive file:" + currentArchiveFile.getAbsolutePath() + " to " + oldArchiveFile.getAbsolutePath()); } // rename temporary archive file to current archive file name if (!tmpCurrentArchiveFile.renameTo(currentArchiveFile)) { throw new IOException("cannot rename temporary current archive file:" + tmpCurrentArchiveFile.getAbsolutePath() + " to " + currentArchiveFile.getAbsolutePath()); } } catch (IOException e) { LOG.error("Problem archiving old persistance file", e); } // remove excess files try { Resource directoryResource = resourceLoader.getResource(archiveFileDirectoryLocation); File archiveFolder = new File(directoryResource.getURL().getFile()); File[] listOfFiles = archiveFolder.listFiles(); String filename; //this will sort earliest to latest date TreeMap<Date, File> sortedFiles = new TreeMap<Date, File>(); for (int i = 0; i < listOfFiles.length; i++) { if (listOfFiles[i].isFile()) { filename = listOfFiles[i].getName(); if ((!filename.equals(archiveFileName)) && (!filename.equals(tmpArchiveFileName)) && (filename.startsWith(archiveFileName))) { String beforeTimeString = archiveFileName + "."; String timeSuffix = filename.substring(beforeTimeString.length()); if (!"".equals(timeSuffix)) { Date fileCreatedate = null; try { fileCreatedate = dateFormatter.parse(timeSuffix); } catch (ParseException e) { LOG.debug("cant parse file name suffix to time for filename:" + filename, e); } if (fileCreatedate != null) { sortedFiles.put(fileCreatedate, listOfFiles[i]); } } } } } while (sortedFiles.size() > archiveFileMaxNumber) { File removeFile = sortedFiles.remove(sortedFiles.firstKey()); LOG.debug("deleting archive file:'" + removeFile.getName() + "' so that number of archive files <=" + archiveFileMaxNumber); removeFile.delete(); } for (File archivedFile : sortedFiles.values()) { LOG.debug("not deleting archive file:'" + archivedFile.getName() + "' so that number of archive files <=" + archiveFileMaxNumber); } return true; } catch (IOException e) { LOG.error("Problem removing old persistance files", e); } } else { // if resource doesn't exist just rename the new tmp file to the archive file name try { currentArchiveFile = new File(resource.getURL().getFile()); // rename temporary archive file to current archive file name if (!tmpCurrentArchiveFile.renameTo(currentArchiveFile)) { throw new IOException("cannot rename temporary current archive file:" + tmpCurrentArchiveFile.getAbsolutePath() + " to " + currentArchiveFile.getAbsolutePath()); } return true; } catch (IOException e) { LOG.error("cannot rename temporary current archive ", e); } } return false; }
From source file:org.eclipse.january.dataset.Stats.java
static double[] outlierValuesMap(final Dataset a, int nl, int nh) { final TreeMap<Double, Integer> lMap = new TreeMap<Double, Integer>(); final TreeMap<Double, Integer> hMap = new TreeMap<Double, Integer>(); int ml = 0;/*ww w . ja va2s .com*/ int mh = 0; IndexIterator it = a.getIterator(); while (it.hasNext()) { Double x = a.getElementDoubleAbs(it.index); Integer i; if (ml == nl) { Double k = lMap.lastKey(); if (x < k) { i = lMap.get(k) - 1; if (i == 0) { lMap.remove(k); } else { lMap.put(k, i); } i = lMap.get(x); if (i == null) { lMap.put(x, 1); } else { lMap.put(x, i + 1); } } } else { i = lMap.get(x); if (i == null) { lMap.put(x, 1); } else { lMap.put(x, i + 1); } ml++; } if (mh == nh) { Double k = hMap.firstKey(); if (x > k) { i = hMap.get(k) - 1; if (i == 0) { hMap.remove(k); } else { hMap.put(k, i); } i = hMap.get(x); if (i == null) { hMap.put(x, 1); } else { hMap.put(x, i + 1); } } } else { i = hMap.get(x); if (i == null) { hMap.put(x, 1); } else { hMap.put(x, i + 1); } mh++; } } // Attempt to make values distinct double lx = lMap.lastKey(); double hx = hMap.firstKey(); if (lx >= hx) { Double h = hMap.higherKey(lx); if (h != null) { hx = h; mh--; } else { Double l = lMap.lowerKey(hx); if (l != null) { lx = l; ml--; } } } return new double[] { lMap.lastKey(), hMap.firstKey(), ml, mh }; }
From source file:org.esa.nest.gpf.SliceAssemblyOp.java
private Product[] determineSliceProducts() throws Exception { if (sourceProducts.length < 2) { throw new Exception("Slice assembly requires at least two consecutive slice products"); }//w w w .j a va 2 s. c om final TreeMap<Integer, Product> productSet = new TreeMap<>(); for (Product srcProduct : sourceProducts) { final MetadataElement origMetaRoot = AbstractMetadata.getOriginalProductMetadata(srcProduct); final MetadataElement generalProductInformation = getGeneralProductInformation(origMetaRoot); if (!isSliceProduct(generalProductInformation)) { throw new Exception(srcProduct.getName() + " is not a slice product"); } final int totalSlices = generalProductInformation.getAttributeInt("totalSlices"); final int sliceNumber = generalProductInformation.getAttributeInt("sliceNumber"); //System.out.println("SliceAssemblyOp.determineSliceProducts: totalSlices = " + totalSlices + "; slice product name = " + srcProduct.getName() + "; prod type = " + srcProduct.getProductType() + "; sliceNumber = " + sliceNumber); productSet.put(sliceNumber, srcProduct); } //check if consecutive Integer prev = productSet.firstKey(); // Note that "The set's iterator returns the keys in ascending order". for (Integer i : productSet.keySet()) { if (!i.equals(prev)) { if (!prev.equals(i - 1)) { throw new Exception("Products are not consecutive slices"); } prev = i; } } // Note that "If productSet makes any guarantees as to what order its elements // are returned by its iterator, toArray() must return the elements in // the same order". return productSet.values().toArray(new Product[productSet.size()]); }