List of usage examples for java.util Collections min
public static <T extends Object & Comparable<? super T>> T min(Collection<? extends T> coll)
From source file:org.spotter.ext.detection.highmessaging.analyze.LogAnalyzer.java
@Override public AnalyzeResult analyze() { result.addMessage("*************************************************"); result.addMessage("Testing data for stagnating progression.."); TrendLine logTrend = new LogTrendLine(); logTrend.setValues(toDoubleArray(normalize(yValues, Collections.min(yValues))), toDoubleArray(xValues)); List<Double> smoothed = smooth(yValues, SMOOTH_WIDE); SimpleRegression first = new SimpleRegression(); first.addData(/*from www .j av a 2 s .c o m*/ toDoubleArray(subList(xValues, 0, xValues.size() / 2), subList(smoothed, 0, xValues.size() / 2))); SimpleRegression second = new SimpleRegression(); second.addData(toDoubleArray(subList(xValues, xValues.size() / 2), subList(smoothed, xValues.size() / 2))); result.addMessage("> Slope 0%-50%: " + first.getSlope()); result.addMessage("> Slope 50%-100%: " + second.getSlope()); result.addMessage("> Threshold: " + maxSlopeFactor * first.getSlope()); if (Math.abs(second.getSlope()) < maxSlopeFactor * first.getSlope()) { result.addMessage("> detected"); return AnalyzeResult.POSITIVE; } else { result.addMessage("> not detected"); return AnalyzeResult.NEGATIVE; } }
From source file:org.apache.lens.cube.parse.LightestFactResolver.java
@Override public void rewriteContext(CubeQueryContext cubeql) throws SemanticException { if (cubeql.getCube() != null && !cubeql.getCandidateFactSets().isEmpty()) { Map<Set<CandidateFact>, Double> factWeightMap = new HashMap<Set<CandidateFact>, Double>(); for (Set<CandidateFact> facts : cubeql.getCandidateFactSets()) { factWeightMap.put(facts, getWeight(facts)); }//from w w w. ja v a2s . c o m double minWeight = Collections.min(factWeightMap.values()); for (Iterator<Set<CandidateFact>> i = cubeql.getCandidateFactSets().iterator(); i.hasNext();) { Set<CandidateFact> facts = i.next(); if (factWeightMap.get(facts) > minWeight) { LOG.info("Not considering facts:" + facts + " from candidate fact tables as it has more fact weight:" + factWeightMap.get(facts) + " minimum:" + minWeight); i.remove(); } } cubeql.pruneCandidateFactWithCandidateSet(CandidateTablePruneCode.MORE_WEIGHT); } }
From source file:org.apache.lens.cube.parse.LeastPartitionResolver.java
@Override public void rewriteContext(CubeQueryContext cubeql) throws SemanticException { if (cubeql.getCube() != null && !cubeql.getCandidateFactSets().isEmpty()) { Map<Set<CandidateFact>, Integer> factPartCount = new HashMap<Set<CandidateFact>, Integer>(); for (Set<CandidateFact> facts : cubeql.getCandidateFactSets()) { factPartCount.put(facts, getPartCount(facts)); }//from www . java2 s .c o m double minPartitions = Collections.min(factPartCount.values()); for (Iterator<Set<CandidateFact>> i = cubeql.getCandidateFactSets().iterator(); i.hasNext();) { Set<CandidateFact> facts = i.next(); if (factPartCount.get(facts) > minPartitions) { LOG.info("Not considering facts:" + facts + " from candidate fact tables as it requires more partitions to" + " be queried:" + factPartCount.get(facts) + " minimum:" + minPartitions); i.remove(); } } cubeql.pruneCandidateFactWithCandidateSet(CandidateTablePruneCode.MORE_PARTITIONS); } }
From source file:org.opensourcebank.batch.partition.HazelcastMapPartitioner.java
public Map<String, ExecutionContext> partition(int gridSize) { Map<Long, Object> itemsMap = Hazelcast.getMap(mapName); Set<Long> itemsIds = itemsMap.keySet(); long min = 0; long max = 0; if (itemsIds.size() > 0) { min = Collections.min(itemsIds); max = Collections.max(itemsIds); }/*www . j a v a2 s . c om*/ long targetSize = (max - min) / gridSize + 1; Map<String, ExecutionContext> result = new HashMap<String, ExecutionContext>(); int number = 0; long start = min; long end = start + targetSize - 1; while (start <= max) { ExecutionContext value = new ExecutionContext(); result.put("partition" + number, value); if (end >= max) { end = max; } value.putLong("fromId", start); value.putLong("toId", end); value.putString("mapName", mapName); start += targetSize; end += targetSize; number++; } return result; }
From source file:org.zkoss.ganttz.data.GanttDate.java
public static GanttDate min(GanttDate... dates) { return Collections.min(Arrays.asList(dates)); }
From source file:com.yimidida.shards.strategy.exit.impl.AggregateExitOperation.java
@Override public List<Object> apply(List<Object> result) { if (result.size() == 0) { return Lists.newArrayList((Object) 0); }//from ww w. ja v a 2 s.c om // String className = result.get(0).getClass().getName(); List<Object> nonNullResults = ExitOperationUtils.getNonNullList(result); switch (aggregate) { case MAX: return Collections .<Object>singletonList(Collections.max(ExitOperationUtils.getComparableList(nonNullResults))); case MIN: return Collections .<Object>singletonList(Collections.min(ExitOperationUtils.getComparableList(nonNullResults))); case SUM: return Collections.<Object>singletonList(getSum(nonNullResults, null).intValue()); default: log.error("Aggregation Projection is unsupported: " + aggregate); throw new UnsupportedOperationException("Aggregation Projection is unsupported: " + aggregate); } }
From source file:org.apache.hadoop.mapreduce.lib.partition.TestRehashPartitioner.java
/** test partitioner for patterns */ @Test/*from www. j ava 2 s. c o m*/ public void testPatterns() { int results[] = new int[PARTITIONS]; RehashPartitioner<IntWritable, NullWritable> p = new RehashPartitioner<IntWritable, NullWritable>(); /* test sequence 4, 8, 12, ... 128 */ for (int i = 0; i < END; i += STEP) { results[p.getPartition(new IntWritable(i), null, PARTITIONS)]++; } int badbuckets = 0; Integer min = Collections.min(Arrays.asList(ArrayUtils.toObject(results))); Integer max = Collections.max(Arrays.asList(ArrayUtils.toObject(results))); Integer avg = (int) Math.round((max + min) / 2.0); System.out.println("Dumping buckets distribution: min=" + min + " avg=" + avg + " max=" + max); for (int i = 0; i < PARTITIONS; i++) { double var = (results[i] - avg) / (double) (avg); System.out.println("bucket " + i + " " + results[i] + " items, variance " + var); if (Math.abs(var) > MAX_ERROR) badbuckets++; } System.out.println(badbuckets + " of " + PARTITIONS + " are too small or large buckets"); assertTrue("too many overflow buckets", badbuckets < PARTITIONS * MAX_BADBUCKETS); }
From source file:org.wallerlab.yoink.molecular.service.calculator.ClosestDistanceToMoleculeCalculator.java
/** * calculate the distance minimum between a coordinate and a molecule. * /*from ww w . ja v a2 s .c o m*/ * @param gridCoord * -{@link org.wallerlab.yoink.api.model.molecular.Coord} * @param molecule * -{@link org.wallerlab.yoink.api.model.molecular.Molecule} * @return density -{@link java.lang.Double} * */ public Double calculate(Coord gridCoord, Molecule molecule) { List<Double> distances = new ArrayList<Double>(); for (Atom atom : molecule.getAtoms()) { double tempdistance = distanceCalculator.calculate(gridCoord, atom); distances.add(tempdistance); } double distance = Collections.min(distances); return distance; }
From source file:org.mrgeo.aggregators.MinAvgPairAggregator.java
@Override public double aggregate(double[] values, double nodata) { boolean data0 = Double.compare(values[0], nodata) != 0; boolean data1 = Double.compare(values[1], nodata) != 0; boolean data2 = Double.compare(values[2], nodata) != 0; boolean data3 = Double.compare(values[3], nodata) != 0; Collection<Double> averages = new ArrayList<Double>(); if (data0 && data1) averages.add(Double.valueOf((values[0] + values[1]) / 2)); if (data0 && data2) averages.add(Double.valueOf((values[0] + values[2]) / 2)); if (data0 && data3) averages.add(Double.valueOf((values[0] + values[3]) / 2)); if (data1 && data2) averages.add(Double.valueOf((values[1] + values[2]) / 2)); if (data1 && data3) averages.add(Double.valueOf((values[1] + values[3]) / 2)); if (data2 && data3) averages.add(Double.valueOf((values[2] + values[3]) / 2)); return (averages.isEmpty()) ? nodata : Collections.min(averages).doubleValue(); }
From source file:org.apache.lens.cube.parse.LightestDimensionResolver.java
@Override public void rewriteContext(CubeQueryContext cubeql) throws SemanticException { if (!cubeql.getCandidateDimTables().isEmpty()) { for (Map.Entry<Dimension, Set<CandidateDim>> entry : cubeql.getCandidateDimTables().entrySet()) { if (entry.getValue().isEmpty()) { continue; }/*from w w w .ja v a2s. c o m*/ Map<CandidateDim, Double> dimWeightMap = new HashMap<CandidateDim, Double>(); for (CandidateDim dim : entry.getValue()) { dimWeightMap.put(dim, dim.dimtable.weight()); } double minWeight = Collections.min(dimWeightMap.values()); for (Iterator<CandidateDim> i = entry.getValue().iterator(); i.hasNext();) { CandidateDim dim = i.next(); if (dimWeightMap.get(dim) > minWeight) { LOG.info("Not considering dimtable:" + dim + " from candidate dimension tables as it has more weight:" + dimWeightMap.get(dim) + " minimum:" + minWeight); cubeql.addDimPruningMsgs(entry.getKey(), dim.dimtable, new CandidateTablePruneCause(CandidateTablePruneCode.MORE_WEIGHT)); i.remove(); } } } } }