List of usage examples for java.util TreeMap containsKey
public boolean containsKey(Object key)
From source file:com.alibaba.wasp.master.handler.TableEventHandler.java
public boolean reOpenAllEntityGroups(List<EntityGroupInfo> entityGroups) throws IOException { boolean done = false; LOG.info("Bucketing entityGroups by entityGroup server..."); TreeMap<ServerName, List<EntityGroupInfo>> serverToEntityGroups = Maps.newTreeMap(); NavigableMap<EntityGroupInfo, ServerName> egiHserverMapping = FMetaScanner .allTableEntityGroups(server.getConfiguration(), tableName, false); List<EntityGroupInfo> reEntityGroups = new ArrayList<EntityGroupInfo>(); for (EntityGroupInfo egi : entityGroups) { ServerName egLocation = egiHserverMapping.get(egi); // Skip the offlined split parent EntityGroup if (null == egLocation) { LOG.info("Skip " + egi); continue; }/*from ww w. j a v a 2 s . com*/ if (!serverToEntityGroups.containsKey(egLocation)) { LinkedList<EntityGroupInfo> egiList = Lists.newLinkedList(); serverToEntityGroups.put(egLocation, egiList); } reEntityGroups.add(egi); serverToEntityGroups.get(egLocation).add(egi); } LOG.info("Reopening " + reEntityGroups.size() + " entityGroups on " + serverToEntityGroups.size() + " fservers."); this.fMasterServices.getAssignmentManager().setEntityGroupsToReopen(reEntityGroups); BulkReOpen bulkReopen = new BulkReOpen(this.server, serverToEntityGroups, this.fMasterServices.getAssignmentManager()); while (true) { try { if (bulkReopen.bulkReOpen()) { done = true; break; } else { LOG.warn("Timeout before reopening all entityGroups"); } } catch (InterruptedException e) { LOG.warn("Reopen was interrupted"); // Preserve the interrupt. Thread.currentThread().interrupt(); break; } } return done; }
From source file:net.tsquery.DataEndpoint.java
@SuppressWarnings("unchecked") private JSONObject PlotToDygraphJSON(Plot plot, long tsFrom, long tsTo, int topN) { final JSONObject plotObject = new JSONObject(); final JSONArray nameArray = new JSONArray(); final JSONArray dataArray = new JSONArray(); final int dpCount = plot.getDataPointsSize(); final TreeMap<Long, double[]> tsMap = new TreeMap<>(); final double[] weight = new double[dpCount]; int dpIndex = 0; for (DataPoints dataPoints : plot.getDataPoints()) { for (DataPoint point : dataPoints) { long timestamp = point.timestamp(); if (timestamp < tsFrom || timestamp > tsTo) continue; long tsMSec = timestamp * 1000; if (!tsMap.containsKey(tsMSec)) { double[] values = new double[dpCount]; values[dpIndex] = getValue(point); tsMap.put(tsMSec, values); weight[dpIndex] += ((values[dpIndex]) / 1000000.0); } else { //noinspection MismatchedReadAndWriteOfArray double[] values = tsMap.get(tsMSec); values[dpIndex] = getValue(point); weight[dpIndex] += ((values[dpIndex]) / 1000000.0); }//from w w w . j a v a2 s. co m } dpIndex++; } HashMap<Integer, Boolean> includeMap = null; // are we performing a topN lookup? if (topN > 0) { includeMap = new HashMap<>(topN); TreeMap<Double, Integer> weightMap = new TreeMap<>(Collections.reverseOrder()); for (int i = 0; i < dpCount; i++) { while (weightMap.containsKey(weight[i])) weight[i] -= 0.00000001; weightMap.put(weight[i], i); } int series = 0; for (Map.Entry<Double, Integer> entry : weightMap.entrySet()) { includeMap.put(entry.getValue(), true); ++series; if (series >= topN) break; } } for (Map.Entry<Long, double[]> entry : tsMap.entrySet()) { JSONArray entryArray = new JSONArray(); entryArray.add(entry.getKey()); final double[] points = entry.getValue(); for (dpIndex = 0; dpIndex < dpCount; dpIndex++) { if ((topN <= 0) || (topN > 0 && includeMap.containsKey(dpIndex))) { entryArray.add(points[dpIndex]); } } dataArray.add(entryArray); } // First column is always the Date nameArray.add("Date"); int index = -1; for (DataPoints dataPoints : plot.getDataPoints()) { index++; // if we are in a topN query and the current index is not included, skip this iteration if (topN > 0 && !includeMap.containsKey(index)) continue; StringBuilder nameBuilder = new StringBuilder(); nameBuilder.append(dataPoints.metricName()).append(":"); Map<String, String> tags = dataPoints.getTags(); for (String s : tags.keySet()) { nameBuilder.append(String.format(" %s=%s", s, tags.get(s))); } nameArray.add(nameBuilder.toString()); } plotObject.put("labels", nameArray); plotObject.put("values", dataArray); return plotObject; }
From source file:de.tudarmstadt.ukp.uby.integration.alignment.xml.transform.sensealignments.FnWnSenseAlignmentXml.java
/** * Collect UBY SenseIds for the aligned senses based on synsetId and lemma * for WordNet and based on lexical unit id for FrameNet * * @throws IOException/* ww w . ja v a2s. c om*/ */ @Override public void toAlignmentXml(XmlMeta metadata) throws IOException { System.err.println("to Alignment Xml"); TreeMap<String, Source> sourceMap = new TreeMap<>(); List<String[]> data = null; data = readAlignmentFile(); int counter = 0; // input sense pairs int found = 0; // output sense pairs // iterate over alignment entries for (String[] d : data) { counter++; // show progress: if ((counter % 1000) == 0) { logger.info("# processed alignments: " + counter); } // use FrameNet sense externalReference (lexical unit Id) String fnSenseId = d[0]; // SOURCE Source source = null; if (sourceMap.containsKey(fnSenseId)) { source = sourceMap.get(fnSenseId); } else { source = new Source(); } source.ref = fnSenseId; List<Target> targets = new LinkedList<Target>(); // get WordNet sense by Synset Offset and Lemma List<Sense> wnSenses = uby.getSensesByWNSynsetId(d[1]); // List<Sense> wnSenses = uby.wordNetSenses(partOfSpeech, offset); for (Sense wnSense : wnSenses) { Target target = new Target(); target.ref = wnSense.getId(); Decision decision = new Decision(); decision.confidence = SenseAlignmentGenericXml.DEFAULTCONFSCORE; decision.value = true; // decision.src = metadata.decisiontypes.get(0).name; target.decision = decision; targets.add(target); found++; } if (targets.size() > 0) { source.targets = targets; sourceMap.put(source.ref, source); } } writer.writeMetaData(metadata); Alignments alignments = new Alignments(); alignments.source = new LinkedList<>(); alignments.source.addAll(sourceMap.values()); writer.writeAlignments(alignments); writer.close(); System.err.println("Alignments in: " + counter + " OUT" + found); logger.info("Alignments in: " + counter + "Alignments out: " + found); }
From source file:com.sfs.DataFilter.java
/** * Parses the text data./*from w w w. j a va 2s. c om*/ * * @param text the text * * @return the tree map< integer, tree map< integer, string>> */ public static TreeMap<Integer, TreeMap<Integer, String>> parseTextData(final String text) { TreeMap<Integer, TreeMap<Integer, String>> parsedData = new TreeMap<Integer, TreeMap<Integer, String>>(); // This counter holds the maximum number of columns provided int maxNumberOfTokens = 0; if (text != null) { StringTokenizer tokenizer = new StringTokenizer(text, "\n"); int lineCounter = 1; while (tokenizer.hasMoreTokens()) { String line = tokenizer.nextToken(); TreeMap<Integer, String> parsedLine = new TreeMap<Integer, String>(); final StringTokenizer tabTokenizer = new StringTokenizer(line, "\t"); if (tabTokenizer.countTokens() > 1) { parsedLine = tokenizerToMap(tabTokenizer); } else { final StringTokenizer commaTokenizer = new StringTokenizer(line, ","); parsedLine = tokenizerToMap(commaTokenizer); } if (parsedLine.size() > maxNumberOfTokens) { maxNumberOfTokens = parsedLine.size(); } parsedData.put(lineCounter, parsedLine); lineCounter++; } } // Now cycle through all the parsed data // Ensure that each row has the same (max) number of tokens for (int rowIndex : parsedData.keySet()) { TreeMap<Integer, String> parsedLine = parsedData.get(rowIndex); // This map holds the final values TreeMap<Integer, String> columnTokens = new TreeMap<Integer, String>(); for (int i = 0; i < maxNumberOfTokens; i++) { int columnIndex = i + 1; if (parsedLine.containsKey(columnIndex)) { String value = parsedLine.get(columnIndex); columnTokens.put(columnIndex, value); } else { columnTokens.put(columnIndex, ""); } } parsedData.put(rowIndex, columnTokens); } return parsedData; }
From source file:ca.sqlpower.architect.ddl.TypeMap.java
/** * //www. j av a 2 s.c o m * only create new database and native type entries when createNew is true. * this prevents bogus map entries from being created when doing lookups. * * notice that a SQLColumn is not passed in, so the rules list is not * cut down to size. * * @see getRules for the method which determines which rules are in effect * * @param database * @param nativeType * @param createNew * @return */ protected List getRulesForNativeType(String database, String nativeType, boolean createNew) { TreeMap nativeTypes = null; String tDatabase = translateDatabaseName(database); if (!databases.containsKey(tDatabase)) { if (createNew) { nativeTypes = new TreeMap(); databases.put(tDatabase, nativeTypes); } else { return EMPTY_LIST; // empty list } } else { nativeTypes = (TreeMap) databases.get(tDatabase); } ArrayList mappingRules = null; if (!nativeTypes.containsKey(nativeType)) { if (createNew) { mappingRules = new ArrayList(); nativeTypes.put(nativeType, mappingRules); } else { return EMPTY_LIST; // empty list } } else { mappingRules = (ArrayList) nativeTypes.get(nativeType); } return mappingRules; }
From source file:practica4.Bet365.java
@Override public Partido[] parser(String url) throws FileNotFoundException, MalformedURLException, IOException { ArrayList<Partido> resultado = new ArrayList<>(); HashMap<String, Partido> partidos = new HashMap<>(); TreeMap<String, String> claves = new TreeMap<>(); File file = new File("cache365"); if (!file.exists()) { FileUtils.copyURLToFile(new URL(url), file); }/*from w w w . java 2 s . c o m*/ String contents = FileUtils.readFileToString(file); //String[] pageParts = contents.split("\\<h3\\>"); String[] pageParts = contents.split("\\|"); for (String line : pageParts) { String lineParts[] = line.split(";"); claves.clear(); for (String part : lineParts) { if (part.length() < 3) continue; String key = part.substring(0, 2); String value = part.substring(3); claves.put(key, value); } if (claves.containsKey("FI")) { // tenemos un partido String idPartido = claves.get("FI"); if (claves.containsKey("NA")) { // informacion general (nombre, fecha) Partido partido = new Partido(); String nombresStr = claves.get("NA"); //byte[] bytes = nombresStr.getBytes("LATIN1"); //nombresStr = new String(bytes, "UTF-8"); String nombres[] = nombresStr.split(" v "); String fecha = claves.get("BC"); partido.equipo1 = nombres[0].trim(); partido.equipo2 = nombres[1].trim(); partido.fecha = Calendar.getInstance(); partido.casa = "Bet365"; int year = Integer.parseInt(fecha.substring(0, 4)); int month = Integer.parseInt(fecha.substring(4, 6)) - 1; int day = Integer.parseInt(fecha.substring(6, 8)); int hour = Integer.parseInt(fecha.substring(8, 10)); int minutes = Integer.parseInt(fecha.substring(10, 12)); partido.fecha.set(year, month, day, hour, minutes); partido.costeVictoria1 = -1; partido.costeVictoria2 = -1; partido.costeEmpate = -1; partidos.put(idPartido, partido); } else { String fraccion[] = claves.get("OD").split("/"); Partido partido = partidos.get(idPartido); double valor = 1.0 + Double.parseDouble(fraccion[0]) / Double.parseDouble(fraccion[1]); valor = Math.round(valor * 100.0) / 100.0; if (partido.costeVictoria1 == -1) { partido.costeVictoria1 = valor; } else if (partido.costeEmpate == -1) { partido.costeEmpate = valor; } else if (partido.costeVictoria2 == -1) { partido.costeVictoria2 = valor; resultado.add(partido); System.out.printf("%s %f %f %f %s\n", partido.equipo1, partido.costeVictoria1, partido.costeEmpate, partido.costeVictoria2, partido.equipo2); } } } } return resultado.toArray(new Partido[partidos.size()]); }
From source file:org.jasig.ssp.web.api.reports.PersonHistoryReportController.java
public static List<StudentHistoryTO> sort(final Set<EarlyAlertTO> earlyAlerts, final Map<String, List<TaskTO>> taskMap, final List<JournalEntryTO> journalEntries) { //TreeMap assures modified date order is preserved (sorted by modified date descending) final TreeMap<Date, StudentHistoryTO> studentHistoryMap = new TreeMap(new Comparator<Date>() { public int compare(Date o1, Date o2) { return sortDateDescending(o1, o2); }//from www . j av a2 s . c o m }); //Sort early alerts by modified date descending final List<EarlyAlertTO> earlyAlertsSorted = new ArrayList(earlyAlerts); Collections.sort(earlyAlertsSorted, new Comparator<EarlyAlertTO>() { @Override public int compare(final EarlyAlertTO o1, final EarlyAlertTO o2) { return sortDateDescending(o1.getModifiedDate(), o2.getModifiedDate()); } }); //Sort journal entries by modified date descending final List journalEntriesSorted = journalEntries; Collections.sort(journalEntriesSorted, new Comparator<JournalEntryTO>() { @Override public int compare(final JournalEntryTO o1, final JournalEntryTO o2) { return sortDateDescending(o1.getModifiedDate(), o2.getModifiedDate()); } }); //First, iterate over each EarlyAlertTO, looking for matching dates in the PersonHistoryTO final Iterator<EarlyAlertTO> alertIter = earlyAlertsSorted.iterator(); while (alertIter.hasNext()) { final EarlyAlertTO thisEarlyAlertTO = alertIter.next(); final Date snewDate = DateTimeUtils.midnightOn(thisEarlyAlertTO.getModifiedDate()); if (studentHistoryMap.containsKey(snewDate)) { final StudentHistoryTO studentHistoryTO = studentHistoryMap.get(snewDate); studentHistoryTO.addEarlyAlertTO(thisEarlyAlertTO); } else { final StudentHistoryTO thisStudentHistoryTO = new StudentHistoryTO( getDateFormatter().format(snewDate)); thisStudentHistoryTO.addEarlyAlertTO(thisEarlyAlertTO); studentHistoryMap.put(snewDate, thisStudentHistoryTO); } } //Second, iterate over each JournalEntryTO, looking for matching dates in the PersonHistoryTO final Iterator<JournalEntryTO> journalEntryIter = journalEntriesSorted.iterator(); while (journalEntryIter.hasNext()) { final JournalEntryTO thisJournalEntryTO = journalEntryIter.next(); final Date snewDate = DateTimeUtils.midnightOn(thisJournalEntryTO.getModifiedDate()); if (studentHistoryMap.containsKey(snewDate)) { final StudentHistoryTO studentHistoryTO = studentHistoryMap.get(snewDate); studentHistoryTO.addJournalEntryTO(thisJournalEntryTO); } else { final StudentHistoryTO thisStudentHistoryTO = new StudentHistoryTO( getDateFormatter().format(snewDate)); thisStudentHistoryTO.addJournalEntryTO(thisJournalEntryTO); studentHistoryMap.put(snewDate, thisStudentHistoryTO); } } // Per the API, the tasks are already broken down into a map, sorted by group. // We want to maintain this grouping, but sort these based date //Third, iterate over each TaskTO in each group, looking for matching dates in the PersonHistoryTO for (final Map.Entry<String, List<TaskTO>> entry : taskMap.entrySet()) { final String groupName = entry.getKey(); final List<TaskTO> tasksSorted = entry.getValue(); //Sort tasks by modified date descending Collections.sort(tasksSorted, new Comparator<TaskTO>() { @Override public int compare(final TaskTO o1, final TaskTO o2) { return sortDateDescending(o1.getModifiedDate(), o2.getModifiedDate()); } }); final Iterator<TaskTO> taskIter = tasksSorted.iterator(); while (taskIter.hasNext()) { final TaskTO thisTask = taskIter.next(); final Date snewDate = DateTimeUtils.midnightOn(thisTask.getModifiedDate()); if (studentHistoryMap.containsKey(snewDate)) { final StudentHistoryTO studentHistoryTO = studentHistoryMap.get(snewDate); studentHistoryTO.addTask(groupName, thisTask); } else { final StudentHistoryTO thisStudentHistoryTO = new StudentHistoryTO( getDateFormatter().format(snewDate)); thisStudentHistoryTO.addTask(groupName, thisTask); studentHistoryMap.put(snewDate, thisStudentHistoryTO); } } } // at this point, we should have a StudentHistoryTO map with Dates final Collection<StudentHistoryTO> studentHistoryTOs = studentHistoryMap.values(); final List<StudentHistoryTO> retVal = new ArrayList<StudentHistoryTO>(); final Iterator<StudentHistoryTO> studentHistoryTOIter = studentHistoryTOs.iterator(); while (studentHistoryTOIter.hasNext()) { final StudentHistoryTO currentStudentHistoryTO = studentHistoryTOIter.next(); currentStudentHistoryTO.createTaskList(); retVal.add(currentStudentHistoryTO); } return retVal; }
From source file:org.apache.hadoop.hbase.master.handler.TableEventHandler.java
public boolean reOpenAllRegions(List<HRegionInfo> regions) throws IOException { boolean done = false; LOG.info("Bucketing regions by region server..."); HTable table = new HTable(masterServices.getConfiguration(), tableName); TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps.newTreeMap(); NavigableMap<HRegionInfo, ServerName> hriHserverMapping; try {//from w ww . java2s . c o m hriHserverMapping = table.getRegionLocations(); } finally { table.close(); } List<HRegionInfo> reRegions = new ArrayList<HRegionInfo>(); for (HRegionInfo hri : regions) { ServerName rsLocation = hriHserverMapping.get(hri); // Skip the offlined split parent region // See HBASE-4578 for more information. if (null == rsLocation) { LOG.info("Skip " + hri); continue; } if (!serverToRegions.containsKey(rsLocation)) { LinkedList<HRegionInfo> hriList = Lists.newLinkedList(); serverToRegions.put(rsLocation, hriList); } reRegions.add(hri); serverToRegions.get(rsLocation).add(hri); } LOG.info("Reopening " + reRegions.size() + " regions on " + serverToRegions.size() + " region servers."); this.masterServices.getAssignmentManager().setRegionsToReopen(reRegions); BulkReOpen bulkReopen = new BulkReOpen(this.server, serverToRegions, this.masterServices.getAssignmentManager()); while (true) { try { if (bulkReopen.bulkReOpen()) { done = true; break; } else { LOG.warn("Timeout before reopening all regions"); } } catch (InterruptedException e) { LOG.warn("Reopen was interrupted"); // Preserve the interrupt. Thread.currentThread().interrupt(); break; } } return done; }
From source file:edu.nyu.vida.data_polygamy.standard_techniques.CorrelationTechniquesReducer.java
private double[] computeCorrelationTechniques(ArrayList<TreeMap<Integer, Float>>[] timeSeries, int index1, int index2, boolean temporalPermutation) { double[] values = { 0.0, 0.0, 0.0 }; TreeMap<Integer, Float> map1 = timeSeries[index1].get(dataset1Key); TreeMap<Integer, Float> map2 = timeSeries[index2].get(dataset2Key); ArrayList<Double> array1 = new ArrayList<Double>(); ArrayList<Double> array2 = new ArrayList<Double>(); for (int temp : map1.keySet()) { if (map2.containsKey(temp)) { array1.add((double) map1.get(temp)); array2.add((double) map2.get(temp)); }// w w w .ja v a2 s . c o m } double[] completeTempArray1 = new double[map1.keySet().size()]; int index = 0; for (int temp : map1.keySet()) { completeTempArray1[index] = map1.get(temp); index++; } double[] completeTempArray2 = new double[map2.keySet().size()]; index = 0; for (int temp : map2.keySet()) { completeTempArray2[index] = map2.get(temp); index++; } map1.clear(); map2.clear(); if (array1.size() < 2) return null; // Pearson's Correlation double[] tempDoubleArray1 = new double[array1.size()]; double[] tempDoubleArray2 = new double[array2.size()]; int indexD1 = (temporalPermutation) ? new Random().nextInt(array1.size()) : 0; int indexD2 = (temporalPermutation) ? new Random().nextInt(array2.size()) : 0; for (int i = 0; i < array1.size(); i++) { int j = (indexD1 + i) % array1.size(); int k = (indexD2 + i) % array2.size(); tempDoubleArray1[i] = array1.get(j); tempDoubleArray2[i] = array2.get(k); } array1 = null; array2 = null; PearsonsCorrelation pearsonsCorr = new PearsonsCorrelation(); values[0] = pearsonsCorr.correlation(tempDoubleArray1, tempDoubleArray2); // Mutual Information try { values[1] = getMIScore(tempDoubleArray1, tempDoubleArray2); } catch (Exception e) { e.printStackTrace(); /*String data1 = ""; for (double d : tempDoubleArray1) data1 += d + ", "; String data2 = ""; for (double d : tempDoubleArray2) data2 += d + ", "; System.out.println(data1); System.out.println(data2);*/ System.exit(-1); } tempDoubleArray1 = null; tempDoubleArray2 = null; // DTW double[] completeTempDoubleArray1 = new double[completeTempArray1.length]; double[] completeTempDoubleArray2 = new double[completeTempArray2.length]; if (temporalPermutation) { indexD1 = new Random().nextInt(completeTempArray1.length); for (int i = 0; i < completeTempArray1.length; i++) { int j = (indexD1 + i) % completeTempArray1.length; completeTempDoubleArray1[i] = completeTempArray1[j]; } indexD2 = new Random().nextInt(completeTempArray2.length); for (int i = 0; i < completeTempArray2.length; i++) { int j = (indexD2 + i) % completeTempArray2.length; completeTempDoubleArray2[i] = completeTempArray2[j]; } } else { System.arraycopy(completeTempArray1, 0, completeTempDoubleArray1, 0, completeTempArray1.length); System.arraycopy(completeTempArray2, 0, completeTempDoubleArray2, 0, completeTempArray2.length); } completeTempArray1 = null; completeTempArray2 = null; completeTempDoubleArray1 = normalize(completeTempDoubleArray1); completeTempDoubleArray2 = normalize(completeTempDoubleArray2); values[2] = getDTWScore(completeTempDoubleArray1, completeTempDoubleArray2); return values; }
From source file:com.eucalyptus.ws.handlers.WalrusAuthenticationHandler.java
private String getCanonicalizedAmzHeaders(MappingHttpRequest httpRequest) { String result = ""; Set<String> headerNames = httpRequest.getHeaderNames(); TreeMap amzHeaders = new TreeMap<String, String>(); for (String headerName : headerNames) { String headerNameString = headerName.toLowerCase().trim(); if (headerNameString.startsWith("x-amz-")) { String value = httpRequest.getHeader(headerName).trim(); String[] parts = value.split("\n"); value = ""; for (String part : parts) { part = part.trim();/* w w w.ja va 2s .c o m*/ value += part + " "; } value = value.trim(); if (amzHeaders.containsKey(headerNameString)) { String oldValue = (String) amzHeaders.remove(headerNameString); oldValue += "," + value; amzHeaders.put(headerNameString, oldValue); } else { amzHeaders.put(headerNameString, value); } } } Iterator<String> iterator = amzHeaders.keySet().iterator(); while (iterator.hasNext()) { String key = iterator.next(); String value = (String) amzHeaders.get(key); result += key + ":" + value + "\n"; } return result; }