List of usage examples for java.util SortedMap lastKey
K lastKey();
From source file:org.web4thejob.web.util.ToolbarRenderer.java
@Override public void render() { final boolean isEmpty = isEmpty(); if (toolbar != null && isEmpty) { reset();/*w w w . j a v a 2 s . c o m*/ return; } else if (toolbar != null || isEmpty) { return; } toolbar = new Toolbar(); toolbar.setAlign(align); container.insertBefore(toolbar, container.getFirstChild()); if (!HtmlViewPanel.class.isInstance(getPrimaryOwner())) { toolbar.setStyle("border-width: 0;"); } SortedMap<CommandEnum, List<Command>> map = mergeCommands(); for (final CommandEnum id : map.keySet()) { CommandDecorator commandDecorator = null; if (map.get(id).size() == 1) { commandDecorator = getDecorator(map.get(id).get(0)); } else { for (Command command : map.get(id)) { if (commandDecorator == null) { commandDecorator = new DefaultDropdownCommandDecorator(command); } else { ((DropdownCommandDecorator) commandDecorator).add(command); } } } if (id.isRequiresStartSeparator() && id != map.firstKey() && !isPreviousSeparator()) { addSeparator(); } if (commandDecorator != null) { commandDecorator.attach(toolbar); commandDecorator.addMessageListener(this); commandDecorator.render(); } if (id.isRequiresEndSeparator() && id != map.lastKey()) { addSeparator(); } Space space = new Space(); space.setSpacing("8px"); space.setParent(toolbar); } }
From source file:de.tudarmstadt.ukp.experiments.argumentation.sequence.feature.coreference.CoreferenceFeatures.java
@Override protected List<Feature> extract(JCas jCas, Sentence sentence, String sentencePrefix) throws TextClassificationException { List<List<CoreferenceLink>> coreferenceChains = extractCoreferenceChains(jCas); FrequencyDistribution<String> featuresAcrossAllChains = new FrequencyDistribution<>(); DescriptiveStatistics chainLength = new DescriptiveStatistics(); DescriptiveStatistics distanceToPreviousSentence = new DescriptiveStatistics(); DescriptiveStatistics distanceToNextSentence = new DescriptiveStatistics(); DescriptiveStatistics interSentencesCorLinks = new DescriptiveStatistics(); for (List<CoreferenceLink> chain : coreferenceChains) { SortedMap<Integer, List<CoreferenceLink>> sentencesAndLinks = extractSentencesAndLinksFromChain(chain, jCas);/*from w w w .j ava 2 s . co m*/ int currentSentencePos = getCurrentSentencePos(jCas, sentence); log.debug(sentencesAndLinks.keySet() + ", current " + currentSentencePos); // is the sentence in chain that spans more sentences? boolean partOfChain = sentencesAndLinks.containsKey(currentSentencePos) && sentencesAndLinks.size() > 1; // is part of a chain? if (partOfChain) { log.debug(chainToString(chain)); featuresAcrossAllChains.inc(FN_PART_OF_CHAIN); // starts the chain? if (sentencesAndLinks.firstKey().equals(currentSentencePos)) { featuresAcrossAllChains.inc(FN_STARTS_THE_CHAIN); } else if (sentencesAndLinks.lastKey().equals(currentSentencePos)) { // ends the chain? featuresAcrossAllChains.inc(FN_ENDS_THE_CHAIN); } else { // in the middle of chain? featuresAcrossAllChains.inc(FN_IN_THE_MIDDLE_OF_CHAIN); } // length of the chain chainLength.addValue(sentencesAndLinks.size()); List<CoreferenceLink> currentSentenceLinks = sentencesAndLinks.get(currentSentencePos); CoreferenceLink currentSentenceFirstLink = currentSentenceLinks.get(0); CoreferenceLink currentSentenceLastLink = currentSentenceLinks.get(currentSentenceLinks.size() - 1); // transition to the previous link, i.e. NOMINAL -> PRONOMINAL if (!sentencesAndLinks.firstKey().equals(currentSentencePos)) { // find the previous sentence List<CoreferenceLink> previousSentenceLinks = null; int prevSentNo = currentSentencePos; while (previousSentenceLinks == null && prevSentNo >= 0) { prevSentNo--; if (sentencesAndLinks.containsKey(prevSentNo)) { previousSentenceLinks = sentencesAndLinks.get(prevSentNo); } } if (previousSentenceLinks == null) { throw new IllegalStateException("Oops :))"); } // distance to previous sentence distanceToPreviousSentence.addValue(currentSentencePos - prevSentNo); // get the last link from the previous sentence CoreferenceLink prevSentenceLastLink = previousSentenceLinks .get(previousSentenceLinks.size() - 1); // add type type transition String prevSentenceLastLinkReferenceType = prevSentenceLastLink.getReferenceType(); String currentSentenceFirstLinkReferenceType = currentSentenceFirstLink.getReferenceType(); String transitionType = prevSentenceLastLinkReferenceType + GLUE + currentSentenceFirstLinkReferenceType; featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TYPE_TYPE + transitionType, 1); // add token - type transition String glueCoreferenceCurrentSentence = glueCoreferenceLinkTokens(currentSentenceFirstLink); String typeToken = prevSentenceLastLinkReferenceType + GLUE + glueCoreferenceCurrentSentence; featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TYPE_TOKEN + typeToken, 1); // add type - token transition String glueCoreferencePrevSentence = glueCoreferenceLinkTokens(prevSentenceLastLink); String tokenType = glueCoreferencePrevSentence + GLUE + currentSentenceFirstLinkReferenceType; featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TOKEN_TYPE + tokenType, 1); // add token token transition String tokenToken = glueCoreferencePrevSentence + GLUE + glueCoreferenceCurrentSentence; featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TOKEN_TOKEN + tokenToken, 1); // exact matching token-token reference? if (glueCoreferencePrevSentence.equals(glueCoreferenceCurrentSentence)) { featuresAcrossAllChains.addSample(FN_TRANSITION_IN_TOKEN_TOKEN_MATCH, 1); } } // transition to the previous link, i.e. NOMINAL -> PRONOMINAL if (!sentencesAndLinks.lastKey().equals(currentSentencePos)) { // find the previous sentence List<CoreferenceLink> nextSentenceLinks = null; int nextSentNo = currentSentencePos; while (nextSentenceLinks == null && nextSentNo <= sentencesAndLinks.lastKey()) { nextSentNo++; if (sentencesAndLinks.containsKey(nextSentNo)) { nextSentenceLinks = sentencesAndLinks.get(nextSentNo); } } if (nextSentenceLinks == null) { throw new IllegalStateException("Oops :))"); } // distance to next sentence distanceToNextSentence.addValue(nextSentNo - currentSentencePos); // get the last link from the previous sentence CoreferenceLink nextSentenceFirstLink = nextSentenceLinks.get(0); // add type type transition String currentSentenceLastLinkReferenceType = currentSentenceLastLink.getReferenceType(); String nextSentenceFirstLinkReferenceType = nextSentenceFirstLink.getReferenceType(); String transitionType = currentSentenceLastLinkReferenceType + GLUE + nextSentenceFirstLinkReferenceType; featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TYPE_TYPE + transitionType, 1); // add token - type transition String glueCoreferenceCurrentSent = glueCoreferenceLinkTokens(currentSentenceLastLink); String typeToken = glueCoreferenceCurrentSent + GLUE + nextSentenceFirstLinkReferenceType; featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TOKEN_TYPE + typeToken, 1); // add type - token transition String glueCoreferenceNextSent = glueCoreferenceLinkTokens(nextSentenceFirstLink); String tokenType = currentSentenceLastLinkReferenceType + GLUE + glueCoreferenceNextSent; featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TYPE_TOKEN + tokenType, 1); // add token token transition String tokenToken = glueCoreferenceCurrentSent + GLUE + glueCoreferenceNextSent; featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TOKEN_TOKEN + tokenToken, 1); // exact matching token-token reference? if (glueCoreferenceNextSent.equals(glueCoreferenceCurrentSent)) { featuresAcrossAllChains.addSample(FN_TRANSITION_OUT_TOKEN_TOKEN_MATCH, 1); } } } // number of inter-sentence coreference links if (sentencesAndLinks.containsKey(currentSentencePos)) { int coreferenceLinks = sentencesAndLinks.get(currentSentencePos).size(); interSentencesCorLinks.addValue(coreferenceLinks); } /* List<Integer> positions = positionsOfSentenceInCurrentChain(chain, sentence); // ok, we're in a chain if (!positions.isEmpty()) { log.debug(printChain(chain)); log.debug(sentence.getCoveredText()); log.debug(positions); Integer lastPosition = positions.get(positions.size() - 1); Integer firstPosition = positions.get(0); if (lastPosition == positions.size() - 1) { log.debug("Last sentence of chain"); } log.debug("-----"); } */ } List<Feature> result = new ArrayList<>(); log.debug(featuresAcrossAllChains); if (distanceToNextSentence.getN() > 0) { log.debug("Next:" + distanceToNextSentence); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_NEXT_MIN, distanceToNextSentence.getMin())); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_NEXT_MAX, distanceToNextSentence.getMax())); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_NEXT_AVG, distanceToNextSentence.getMean())); } if (distanceToPreviousSentence.getN() > 0) { log.debug("Prev: " + distanceToPreviousSentence); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_PREV_MIN, distanceToPreviousSentence.getMin())); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_PREV_MAX, distanceToPreviousSentence.getMax())); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_DIST_TO_PREV_AVG, distanceToPreviousSentence.getMean())); } if (interSentencesCorLinks.getN() > 0) { result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_INTER_SENT_COR_MIN, interSentencesCorLinks.getMin())); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_INTER_SENT_COR_MAX, interSentencesCorLinks.getMax())); result.add(new Feature(sentencePrefix + FEATURE_NAME + FN_INTER_SENT_COR_AVG, interSentencesCorLinks.getMean())); } log.debug("----"); for (String feat : featuresAcrossAllChains.getKeys()) { // binary result.add(new Feature(sentencePrefix + FEATURE_NAME + feat, 1)); } return result; }
From source file:org.onebusaway.community_transit_gtfs.CommunityTransitGtfsFactory.java
private void processStopTimesForTrip(Map<String, Integer> timepointPositions, PttTrip pttTrip, String tripIdRaw, RouteStopSequence stopSequence, Trip trip) throws ParseException { SortedMap<Integer, Integer> arrivalTimesByTimepointPosition = computeTimepointPositionToScheduleTimep( pttTrip);// w ww .ja v a 2 s . co m List<StopTime> stopTimes = new ArrayList<StopTime>(); if (arrivalTimesByTimepointPosition.size() < 2) { _log.warn("less than two timepoints specified for trip: id=" + trip.getId()); return; } int firstTimepointPosition = arrivalTimesByTimepointPosition.firstKey(); int lastTimepointPosition = arrivalTimesByTimepointPosition.lastKey(); int firstStopIndex = Integer.MAX_VALUE; int lastStopIndex = Integer.MIN_VALUE; /** * Find the bounds on the set of stops that have stop times defined */ List<RouteStopSequenceItem> items = stopSequence.getItems(); for (int index = 0; index < items.size(); index++) { RouteStopSequenceItem item = items.get(index); Integer time = getScheduledTimeForTimepoint(item, timepointPositions, arrivalTimesByTimepointPosition); if (time != null) { firstStopIndex = Math.min(firstStopIndex, index); lastStopIndex = Math.max(lastStopIndex, index); } } StopTime first = null; StopTime last = null; for (int index = firstStopIndex; index < lastStopIndex + 1; index++) { RouteStopSequenceItem item = items.get(index); Integer time = getScheduledTimeForTimepoint(item, timepointPositions, arrivalTimesByTimepointPosition); Stop stop = _dao.getStopForId(id(Long.toString(item.getStopId()))); StopTime stopTime = new StopTime(); stopTime.setStop(stop); stopTime.setStopSequence(index - firstStopIndex); stopTime.setTrip(trip); if ("N".equals(item.getBoarding())) { // timepoint -- not for pickup/drop off stopTime.setDropOffType(1); stopTime.setPickupType(1); } if (time != null) { stopTime.setArrivalTime(time); stopTime.setDepartureTime(time); } _dao.saveEntity(stopTime); stopTimes.add(stopTime); if (first == null) first = stopTime; last = stopTime; } if (this._interpolateStopTimes) { List<ShapePoint> shapePoints = findShapes(trip.getShapeId()); List<StopTimeEntryImpl> stopTimeEntries = ensureStopTimesHaveShapeDistanceTraveledSet(trip.getShapeId(), stopTimes, shapePoints); ensureStopTimesHaveTimesSet(stopTimes, stopTimeEntries); // now copy values back to stopTime models int i = 0; for (StopTimeEntryImpl e : stopTimeEntries) { StopTime m = stopTimes.get(i); m.setArrivalTime(e.getArrivalTime()); m.setDepartureTime(e.getDepartureTime()); i++; } if (!first.isDepartureTimeSet()) { _log.warn("departure time for first StopTime is not set: stop=" + first.getStop().getId() + " trip=" + tripIdRaw + " firstPosition=" + firstTimepointPosition + " lastPosition=" + lastTimepointPosition); for (RouteStopSequenceItem item : stopSequence) _log.warn(" stop=" + item.getStopId() + " timepoint=" + item.getTimePoint() + " pos=" + timepointPositions.get(item.getTimePoint())); } if (!last.isArrivalTimeSet()) { _log.warn("arrival time for last StopTime is not set: stop=" + last.getStop().getId() + " trip=" + tripIdRaw + " firstPosition=" + firstTimepointPosition + " lastPosition=" + lastTimepointPosition); for (RouteStopSequenceItem item : stopSequence) _log.warn(" stop=" + item.getStopId() + " timepoint=" + item.getTimePoint() + " pos=" + timepointPositions.get(item.getTimePoint())); } } }
From source file:hudson.model.Job.java
/** * Returns the oldest build in the record. * @see LazyBuildMixIn#getFirstBuild/*from w w w.j ava 2 s .co m*/ */ @Exported @QuickSilver public RunT getFirstBuild() { SortedMap<Integer, ? extends RunT> runs = _getRuns(); if (runs.isEmpty()) return null; return runs.get(runs.lastKey()); }
From source file:hudson.model.Job.java
/** * Gets the youngest build #m that satisfies <tt>n<=m</tt>. * //from w w w. j a v a 2s . com * This is useful when you'd like to fetch a build but the exact build might * be already gone (deleted, rotated, etc.) * @see LazyBuildMixIn#getNearestBuild */ public RunT getNearestBuild(int n) { SortedMap<Integer, ? extends RunT> m = _getRuns().headMap(n - 1); // the map should // include n, so n-1 if (m.isEmpty()) return null; return m.get(m.lastKey()); }
From source file:org.omnaest.utils.table.TableTest.java
@Test public void testIndexOfArbitraryKeyExtractor() { Table<String> table = this.filledTable(100, 5); KeyExtractor<Integer, RowDataReader<String>> keyExtractor = new KeyExtractor<Integer, RowDataReader<String>>() { private static final long serialVersionUID = -4201644938610833630L; @Override// w w w .j a va 2 s. com public Integer extractKey(RowDataReader<String> rowDataReader) { String[] elements = rowDataReader.getElements(); String[] tokens = elements[1].split(":"); return Integer.valueOf(tokens[0]); } }; SortedMap<Integer, Set<Row<String>>> sortedMap = table.index().of(keyExtractor); { assertNotNull(sortedMap); assertEquals(table.rowSize(), sortedMap.size()); assertTrue(sortedMap.containsKey(0)); } table.removeRow(0); { assertFalse(sortedMap.containsKey(0)); assertTrue(sortedMap.containsKey(1)); assertFalse(sortedMap.containsKey(101)); table.setElement(0, 1, "101:88"); assertTrue(sortedMap.containsKey(101)); Set<Row<String>> rowSet = sortedMap.get(101); assertEquals(1, rowSet.size()); } { assertSame(sortedMap, table.index().of(keyExtractor)); } table.setRowElements(1, "0:0", "200:0"); { assertTrue(sortedMap.containsKey(200)); } { SortedMap<Integer, Set<Row<String>>> tailMap = sortedMap.tailMap(90); assertEquals(100 - 90 + 2, tailMap.size()); assertEquals(90, tailMap.firstKey().intValue()); assertEquals(200, tailMap.lastKey().intValue()); } { SortedMap<Integer, Set<Row<String>>> headMap = sortedMap.headMap(10); assertEquals(9 - 2, headMap.size()); assertEquals(3, headMap.firstKey().intValue()); assertEquals(9, headMap.lastKey().intValue()); } { table.clear(); assertTrue(sortedMap.isEmpty()); } }
From source file:org.mitre.ccv.canopy.CcvCanopyCluster.java
/** * Sets the thresholds 1 and 2 using MaxLike profile. * * Issues/Pittfalls:/*w w w. ja v a 2s. c o m*/ * <ol> * <ul>t2 might be to small and nothing is removed from the list * <ul>t1 might be to large and everything is added to a canopy * </ol> * @todo: figure out how to select threshold1 (not to big not to small) */ public double[] autoThreshold() throws Exception { LOG.info("autoThreshold: Generating distance distribution"); //SortedMap<Double, Integer> sortMap = new TreeMap<Double, Integer>(new ReverseDoubleComparator()); SortedMap<Double, Integer> sortMap = new TreeMap<Double, Integer>(); // generate all the pairwise distances final int size = completeMatrix.getMatrix().getColumnDimension(); for (int i = 0; i < size; ++i) { for (int j = i + 1; j < size; ++j) { // only calculate one triangle not full! Double d = this.cheapMetric.distance(i, j); //set.add(this.cheapMetric.distance(i, j)); if (sortMap.containsKey(d)) { sortMap.put(d, sortMap.get(d) + 1); } else { sortMap.put(d, 1); } } } /** * $gnuplot * > set nokey * > set xlabel "Pairwise distance" * > set ylabel "Number of samples" * > plot "output.txt" using 1:2 */ /* */ for (Iterator<Entry<Double, Integer>> i = sortMap.entrySet().iterator(); i.hasNext();) { Entry<Double, Integer> entry = i.next(); //System.out.printf("%f\t%d\n", entry.getKey(), entry.getValue()); } /* */ /** * How many bins per samples do we want? * Using the two end cases at lower and upper bounds. */ TH1D hist = new TH1D(completeMatrix.getMatrix().getColumnDimension() * 2, sortMap.firstKey(), sortMap.lastKey()); LOG.info(String.format("autoThreshold: Packing into histogram with %d bins (%f, %f)", hist.getBins().length, hist.getLower(), hist.getUpper())); hist.pack(sortMap); int[] bins = hist.getBins(); if (LOG.isDebugEnabled()) { if (hist.getNumberOverflows() != 0) { LOG.debug( String.format("autoThreshold: Have %d overflows in histogram!", hist.getNumberOverflows())); } if (hist.getNumberUnderflows() != 0) { LOG.debug(String.format("autoThreshold: Have %d underflows in histogram!", hist.getNumberUnderflows())); } } // print out histogram bins for (int i = 0; i < bins.length; i++) { //System.out.printf("%f\t%d\n", hist.getBinCenter(i), hist.getBinContent(i)); } TSpectrum spectrum = new TSpectrum(); // use default values (sigma = 1, threshold = 0.5 int numFound = spectrum.search(hist); LOG.info(String.format("autoThreshold: Found %d peaks", numFound)); if (numFound == 0) { LOG.fatal("autoThreshold: No peaks found in data!"); throw new Exception(); } double xpeaks[] = spectrum.getPostionX(); double[] rtn = new double[2]; // t1, t2 if (numFound == 1) { int bin = hist.findBin(xpeaks[0]); // is this in the top or bottom half? // @todo: must be better way than this hack if (bin > 0) { bin--; } rtn[0] = hist.getBinCenter(bin); // threshold1 is only peak rtn[1] = (hist.getLower() + rtn[0]) / 2; return rtn; } // more than one peak /** * Several possible options: * - select t1 first than find a good t2 * - select t2 first than find a good t1 * * make sure that there is enough samples below t2 and above t1 if (xpeaks[0] > xpeaks[1]) { // what about sigma value: how many are between these two rtn[0] = xpeaks[0]; // t1 rtn[1] = xpeaks[1]; //t2 } else { rtn[0] = xpeaks[1]; rtn[1] = xpeaks[0]; } */ // find the peak with the smallest this will be the basis for t2 double minPeakX = hist.getUpper(); int minPeakI = -1; for (int i = 0; i < numFound; i++) { final double x = xpeaks[i]; if (x < minPeakX) { minPeakX = x; minPeakI = i; } } //System.err.printf("minPeakX=%f (%d)\n", minPeakX, minPeakI); // find next peak above the smallest // should try using something about the average and standard deviation // of the distribution of entries in picking this double min2PeakX = hist.getUpper(); int min2PeakI = -1; for (int i = 0; i < numFound; i++) { final double x = xpeaks[i]; if (i != minPeakI && x < min2PeakX) { // should check that it isn't equal or within sigma min2PeakX = x; min2PeakI = i; } } //System.err.printf("min2PeakX=%f (%d)\n", min2PeakX, min2PeakI); /** if (minPeakI + 1 < min2PeakI - 1) { rtn[0] = hist.getBinCenter(min2PeakI - 1); // t1 rtn[1] = hist.getBinCenter(minPeakI + 1); // t2 } else { // really close not good - these should be the centers LOG.info("autoThreshold: t1 and t2 are possbily from adjacent bins!"); rtn[0] = min2PeakX; rtn[1] = minPeakX; } int t2bin = hist.findBin(minPeakX); if (t2bin - 1 > 0 ) { rtn[1] = hist.getBinCenter(t2bin - 1); // don't want the first bin? } else { rtn[1] = minPeakX; } int t1bin = hist.findBin(min2PeakX); if (t1bin + 1 < bins.length - 1) { // don't want the last bin? rtn[0] = hist.getBinCenter(t1bin + 1); } else { rtn[0] = min2PeakX; }*/ rtn[0] = min2PeakX; rtn[1] = minPeakX; /* double t1 = hist.getUpper(); double t2 = hist.getLower(); */ // print out what we found for (int p = 0; p < numFound; p++) { double xp = xpeaks[p]; int bin = hist.findBin(xp); int yp = hist.getBinContent(bin); // double yp System.err.printf("%d\t%f\t%d\n", bin, xp, yp); // if(yp- Math.sqrt(yp) < fline.eval(xp)) continue } return rtn; }
From source file:org.nuxeo.launcher.connect.ConnectBroker.java
protected String getBestIdForNameInList(String pkgName, List<? extends Package> pkgList) { String foundId = null;//from w w w. j a va 2 s. co m SortedMap<Version, String> foundPkgs = new TreeMap<>(); SortedMap<Version, String> matchingPkgs = new TreeMap<>(); for (Package pkg : pkgList) { if (pkg.getName().equals(pkgName)) { foundPkgs.put(pkg.getVersion(), pkg.getId()); if (Arrays.asList(pkg.getTargetPlatforms()).contains(targetPlatform)) { matchingPkgs.put(pkg.getVersion(), pkg.getId()); } } } if (matchingPkgs.size() != 0) { foundId = matchingPkgs.get(matchingPkgs.lastKey()); } else if (foundPkgs.size() != 0) { foundId = foundPkgs.get(foundPkgs.lastKey()); } return foundId; }
From source file:org.apache.accumulo.tserver.tablet.Tablet.java
private SplitRowSpec findSplitRow(Collection<FileRef> files) { // never split the root tablet // check if we already decided that we can never split // check to see if we're big enough to split long splitThreshold = tableConfiguration.getMemoryInBytes(Property.TABLE_SPLIT_THRESHOLD); long maxEndRow = tableConfiguration.getMemoryInBytes(Property.TABLE_MAX_END_ROW_SIZE); if (extent.isRootTablet() || estimateTabletSize() <= splitThreshold) { return null; }// ww w . j a v a 2s. c om // have seen a big row before, do not bother checking unless a minor compaction or map file import has occurred. if (sawBigRow) { if (timeOfLastMinCWhenBigFreakinRowWasSeen != lastMinorCompactionFinishTime || timeOfLastImportWhenBigFreakinRowWasSeen != lastMapFileImportTime) { // a minor compaction or map file import has occurred... check again sawBigRow = false; } else { // nothing changed, do not split return null; } } SortedMap<Double, Key> keys = null; try { // we should make .25 below configurable keys = FileUtil.findMidPoint(getTabletServer().getFileSystem(), getTabletServer().getConfiguration(), extent.getPrevEndRow(), extent.getEndRow(), FileUtil.toPathStrings(files), .25); } catch (IOException e) { log.error("Failed to find midpoint " + e.getMessage()); return null; } // check to see if one row takes up most of the tablet, in which case we can not split try { Text lastRow; if (extent.getEndRow() == null) { Key lastKey = (Key) FileUtil.findLastKey(getTabletServer().getFileSystem(), getTabletServer().getConfiguration(), files); lastRow = lastKey.getRow(); } else { lastRow = extent.getEndRow(); } // We expect to get a midPoint for this set of files. If we don't get one, we have a problem. final Key mid = keys.get(.5); if (null == mid) { throw new IllegalStateException("Could not determine midpoint for files"); } // check to see that the midPoint is not equal to the end key if (mid.compareRow(lastRow) == 0) { if (keys.firstKey() < .5) { Key candidate = keys.get(keys.firstKey()); if (candidate.getLength() > maxEndRow) { log.warn("Cannot split tablet " + extent + ", selected split point too long. Length : " + candidate.getLength()); sawBigRow = true; timeOfLastMinCWhenBigFreakinRowWasSeen = lastMinorCompactionFinishTime; timeOfLastImportWhenBigFreakinRowWasSeen = lastMapFileImportTime; return null; } if (candidate.compareRow(lastRow) != 0) { // we should use this ratio in split size estimations if (log.isTraceEnabled()) log.trace(String.format( "Splitting at %6.2f instead of .5, row at .5 is same as end row%n", keys.firstKey())); return new SplitRowSpec(keys.firstKey(), candidate.getRow()); } } log.warn("Cannot split tablet " + extent + " it contains a big row : " + lastRow); sawBigRow = true; timeOfLastMinCWhenBigFreakinRowWasSeen = lastMinorCompactionFinishTime; timeOfLastImportWhenBigFreakinRowWasSeen = lastMapFileImportTime; return null; } Text text = mid.getRow(); SortedMap<Double, Key> firstHalf = keys.headMap(.5); if (firstHalf.size() > 0) { Text beforeMid = firstHalf.get(firstHalf.lastKey()).getRow(); Text shorter = new Text(); int trunc = longestCommonLength(text, beforeMid); shorter.set(text.getBytes(), 0, Math.min(text.getLength(), trunc + 1)); text = shorter; } if (text.getLength() > maxEndRow) { log.warn("Cannot split tablet " + extent + ", selected split point too long. Length : " + text.getLength()); sawBigRow = true; timeOfLastMinCWhenBigFreakinRowWasSeen = lastMinorCompactionFinishTime; timeOfLastImportWhenBigFreakinRowWasSeen = lastMapFileImportTime; return null; } return new SplitRowSpec(.5, text); } catch (IOException e) { // don't split now, but check again later log.error("Failed to find lastkey " + e.getMessage()); return null; } }
From source file:richtercloud.reflection.form.builder.fieldhandler.MappingFieldHandler.java
/** * Figures out candidates which the longest common prefix in the * {@code fieldParameterizedType} chain of (nested) generic types ignoring * specifications of {@link AnyType}. Then determines the candidates with * the smallest number of {@link AnyType} specifications in the chain. If * there're multiple with the same number of {@link AnyType} chooses the * first it finds which might lead to random choices. * * @param fieldParameterizedType the chain of generic types (remember to * retrieve this information with {@link Field#getGenericType() } instead of * {@link Field#getType() } from fields) * @return the choice result as described above or {@code null} if no * candidate exists// ww w . j a v a2s. co m */ protected Type retrieveClassMappingBestMatch(ParameterizedType fieldParameterizedType) { //check in a row (walking a tree doesn't make sense because it's //agnostic of the position of the type SortedMap<Integer, List<ParameterizedType>> candidates = new TreeMap<>(); //TreeMap is a SortedMap for (Type mappingType : classMapping.keySet()) { if (!(mappingType instanceof ParameterizedType)) { continue; } ParameterizedType mappingParameterizedType = (ParameterizedType) mappingType; if (!mappingParameterizedType.getRawType().equals(fieldParameterizedType.getRawType())) { continue; } Type[] parameterizedTypeArguments = mappingParameterizedType.getActualTypeArguments(); Type[] fieldParameterizedTypeArguments = fieldParameterizedType.getActualTypeArguments(); for (int i = 0; i < Math.min(parameterizedTypeArguments.length, fieldParameterizedTypeArguments.length); i++) { if (fieldParameterizedTypeArguments[i].equals(AnyType.class)) { throw new IllegalArgumentException(String.format( "type %s must only be used to declare placeholders in class mapping, not in classes (was used in field type %s", AnyType.class, fieldParameterizedType)); } // only compare raw type to raw type in the chain Type fieldParameterizedTypeArgument = fieldParameterizedTypeArguments[i]; if (fieldParameterizedTypeArgument instanceof ParameterizedType) { fieldParameterizedTypeArgument = ((ParameterizedType) fieldParameterizedTypeArgument) .getRawType(); } Type parameterizedTypeArgument = parameterizedTypeArguments[i]; if (parameterizedTypeArgument instanceof ParameterizedType) { parameterizedTypeArgument = ((ParameterizedType) parameterizedTypeArgument).getRawType(); } //record AnyType matches as well boolean anyTypeMatch = AnyType.class.equals(parameterizedTypeArgument); //work around sucky debugger if (!parameterizedTypeArgument.equals(fieldParameterizedTypeArgument) && !anyTypeMatch) { break; } int matchCount = i + 1; List<ParameterizedType> candidateList = candidates.get(matchCount); if (candidateList == null) { candidateList = new LinkedList<>(); candidates.put(matchCount, candidateList); } candidateList.add(mappingParameterizedType); } } if (candidates.isEmpty()) { return null; //avoid NoSuchElementException } List<ParameterizedType> higestCandidatesList = candidates.get(candidates.lastKey()); int lowestAnyCount = Integer.MAX_VALUE; ParameterizedType lowestAnyCountCandidate = null; for (ParameterizedType highestCandidateCandidate : higestCandidatesList) { int highestCandidateCandidateAnyCount = retrieveAnyCountRecursively(highestCandidateCandidate); if (highestCandidateCandidateAnyCount < lowestAnyCount) { lowestAnyCount = highestCandidateCandidateAnyCount; lowestAnyCountCandidate = highestCandidateCandidate; } } return lowestAnyCountCandidate; }