Example usage for java.util Collections max

List of usage examples for java.util Collections max

Introduction

In this page you can find the example usage for java.util Collections max.

Prototype

public static <T extends Object & Comparable<? super T>> T max(Collection<? extends T> coll) 

Source Link

Document

Returns the maximum element of the given collection, according to the natural ordering of its elements.

Usage

From source file:be.ugent.maf.cellmissy.gui.controller.analysis.doseresponse.generic.GenericDRInputController.java

/**
 * Create model for analysis group table (bottom one). Contains only the
 * data the user has chosen to analyze./*from  w  w  w.jav a2  s  .c o  m*/
 *
 * @return
 */
private NonEditableTableModel createTableModel(GenericDoseResponseAnalysisGroup analysisGroup) {
    List<DoseResponsePair> dataToShow = analysisGroup.getDoseResponseData();
    //when removing all conditions
    if (dataToShow.isEmpty()) {
        return new NonEditableTableModel();
    }
    //the number of columns is dependent on the maximum number of replicates of the dataset
    int maxReplicates = Collections.max(getNumberOfReplicates(dataToShow));
    Object[][] data = new Object[dataToShow.size()][maxReplicates + 1];

    for (int rowIndex = 0; rowIndex < dataToShow.size(); rowIndex++) {
        data[rowIndex][0] = dataToShow.get(rowIndex).getDose();
        for (int columnIndex = 1; columnIndex < maxReplicates + 1; columnIndex++) {
            try {
                data[rowIndex][columnIndex] = dataToShow.get(rowIndex).getResponses().get(columnIndex - 1);
            } catch (IndexOutOfBoundsException e) {
                data[rowIndex][columnIndex] = "";
            }
        }
    }

    // array of column names for table model
    String[] columnNames = new String[maxReplicates + 1];
    columnNames[0] = "Dose";
    for (int x = 1; x < columnNames.length; x++) {
        columnNames[x] = "Repl " + x;
    }

    NonEditableTableModel nonEditableTableModel = new NonEditableTableModel();
    nonEditableTableModel.setDataVector(data, columnNames);
    return nonEditableTableModel;
}

From source file:gate.plugin.learningframework.engines.EngineServer.java

@Override
public List<GateClassification> classify(AnnotationSet instanceAS, AnnotationSet inputAS,
        AnnotationSet sequenceAS, String parms) {
    Parms ps = new Parms(parms, "d:dense:b");
    boolean dense = (boolean) ps.getValueOrElse("dense", false);

    CorpusRepresentationMalletTarget data = (CorpusRepresentationMalletTarget) corpusRepresentationMallet;
    data.stopGrowth();// w  ww .j  a va  2 s .  co  m
    int nrCols = data.getPipe().getDataAlphabet().size();
    //System.err.println("Running EngineSklearn.classify on document "+instanceAS.getDocument().getName());
    List<GateClassification> gcs = new ArrayList<GateClassification>();
    LFPipe pipe = (LFPipe) data.getRepresentationMallet().getPipe();
    ArrayList<String> classList = null;
    // If we have a classification problem, pre-calculate the class label list
    if (pipe.getTargetAlphabet() != null) {
        classList = new ArrayList<String>();
        for (int i = 0; i < pipe.getTargetAlphabet().size(); i++) {
            String labelstr = pipe.getTargetAlphabet().lookupObject(i).toString();
            classList.add(labelstr);
        }
    }
    // For now create a single request per document
    // eventually we could allow a parameter for sending a maximum number of 
    // instances per request.

    List<Annotation> instances = instanceAS.inDocumentOrder();
    List<double[]> valuesvec = new ArrayList<double[]>();
    List<int[]> indicesvec = new ArrayList<int[]>();
    List<Double> weights = new ArrayList<Double>();
    ObjectMapper mapper = new ObjectMapper();
    boolean haveWeights = false;
    for (Annotation instAnn : instances) {
        Instance inst = data.extractIndependentFeatures(instAnn, inputAS);

        inst = pipe.instanceFrom(inst);
        FeatureVector fv = (FeatureVector) inst.getData();
        //System.out.println("Mallet instance, fv: "+fv.toString(true)+", len="+fv.numLocations());

        // Convert to the sparse vector we use to send to the process
        // TODO: depending on a parameter send sparse or dense vectors, for now always send sparse

        if (dense) {
            double[] values = new double[nrCols];
            for (int i = 0; i < nrCols; i++) {
                values[i] = fv.value(i);
            }
            valuesvec.add(values);
        } else {
            // To send a sparse vector, we need the indices and the values      
            int locs = fv.numLocations();
            int[] indices = new int[locs];
            double[] values = new double[locs];
            for (int i = 0; i < locs; i++) {
                indices[i] = fv.indexAtLocation(i);
                values[i] = fv.valueAtLocation(i);
            }
            valuesvec.add(values);
            indicesvec.add(indices);
        }
        double weight = Double.NaN;
        Object weightObj = inst.getProperty("instanceWeight");
        if (weightObj != null) {
            weight = (double) weightObj;
            haveWeights = true;
        }
        weights.add(weight);
    }
    // create the JSON for the request
    Map data4json = new HashMap<String, Object>();
    if (!dense)
        data4json.put("indices", indicesvec);
    data4json.put("values", valuesvec);
    data4json.put("n", nrCols);
    if (haveWeights)
        data4json.put("weights", weights);
    String json = null;
    try {
        json = mapper.writeValueAsString(data4json);
    } catch (JsonProcessingException ex) {
        throw new GateRuntimeException("Could not convert instances to json", ex);
    }
    //System.err.println("GOT JSON: "+json);

    HttpResponse<String> response;
    try {
        response = Unirest.post(serverUrl).header("accept", "application/json")
                .header("content-type", "application/json").body(json).asString();
    } catch (UnirestException ex) {
        throw new GateRuntimeException("Exception when connecting to the server", ex);
    }

    // The response should be either OK and JSON or not OK and an error message
    int status = response.getStatus();
    if (status != 200) {
        throw new GateRuntimeException(
                "Response von server is NOK, status=" + status + " msg=" + response.getBody());
    }
    //System.err.println("Got response, status is OK, data is: "+response.getBody());
    Map responseMap = null;
    try {
        // Parse the json
        responseMap = mapper.readValue(response.getBody(), HashMap.class);
    } catch (IOException ex) {
        Logger.getLogger(EngineServer.class.getName()).log(Level.SEVERE, null, ex);
    }

    // NOTE: the json created by the weka server currently automatically creates 1 instead
    // of 1.0 if the value is 1.0, and the parser then creates an Inteer from this. 
    // We could probably change the parsing behaviour into always creating doubles somehow but
    // for now we simply first parse the arrays into Number, then convert each vector into
    // a vector of Double
    ArrayList<ArrayList<Number>> targets = (ArrayList<ArrayList<Number>>) responseMap.get("preds");

    GateClassification gc = null;

    // now go through all the instances again and do the target assignment from the vector(s) we got
    int instNr = 0;
    for (Annotation instAnn : instances) {
        if (pipe.getTargetAlphabet() == null) { // we have regression        
            gc = new GateClassification(instAnn, (double) targets.get(instNr).get(0));
        } else {
            ArrayList<Number> valsN = targets.get(instNr);
            ArrayList<Double> vals = new ArrayList<Double>(valsN.size());
            for (Number valN : valsN)
                vals.add(valN.doubleValue());
            double target = vals.get(0); // if vals contains just one value, this will be what to use
            if (vals.size() > 1) {
                // find the maximum probability and use the index as target
                double maxProb = Double.NEGATIVE_INFINITY;
                double bestIndex = -1;
                int curIdx = 0;
                for (double val : vals) {
                    if (val > maxProb) {
                        maxProb = val;
                        bestIndex = (double) curIdx;
                    }
                    curIdx++;
                } // for
                target = bestIndex;
            }
            int bestlabel = (int) target;
            String cl = pipe.getTargetAlphabet().lookupObject(bestlabel).toString();
            double bestprob = Double.NaN;
            if (vals.size() > 1) {
                bestprob = Collections.max(vals);
                gc = new GateClassification(instAnn, cl, bestprob, classList, vals);
            } else {
                // create a fake probability distribution with 1.0/0.0 probabilities
                ArrayList<Double> probs = new ArrayList<Double>(classList.size());
                for (int i = 0; i < classList.size(); i++) {
                    if (i == bestlabel)
                        probs.add(1.0);
                    else
                        probs.add(0.0);
                }
                gc = new GateClassification(instAnn, cl, bestprob, classList, probs);

            }
        }
        gcs.add(gc);
        instNr++;
    }
    data.startGrowth();
    return gcs;
}

From source file:edu.uga.cs.fluxbuster.features.FeatureCalculator.java

/**
 * Calculates the cluster novelty feature for each cluster generated
 * on a specific run date./*www  .j  a v  a  2 s.c o m*/
 *
 * @param log_date the run date
 * @param window the number of days previous to use in feature calculation
 * @return a table of values where the keys are cluster ids and the values 
 *       are the feature values
 * @throws SQLException if there is an error calculating the feature values
 */
public Map<Integer, Double> calculateNoveltyFeature(Date log_date, int window) throws SQLException {
    HashMap<Integer, Double> retval = new HashMap<Integer, Double>();
    ArrayList<Date> prevDates = getPrevDates(log_date, window);

    if (prevDates.size() > 0) {
        StringBuffer querybuf = new StringBuffer();
        Formatter formatter = new Formatter(querybuf);
        String curdatestr = df.format(log_date);
        formatter.format(properties.getProperty(NOVELTY_QUERY1_1KEY), curdatestr, curdatestr, curdatestr,
                curdatestr);
        for (Date prevDate : prevDates) {
            formatter.format(" " + properties.getProperty(NOVELTY_QUERY1_2KEY) + " ", df.format(prevDate));
        }
        formatter.format(properties.getProperty(NOVELTY_QUERY1_3KEY), curdatestr, curdatestr);

        ResultSet rs2 = null;
        Hashtable<Integer, Hashtable<String, Long>> new_resolved_ips = new Hashtable<Integer, Hashtable<String, Long>>();
        try {
            rs2 = dbi.executeQueryWithResult(querybuf.toString());
            while (rs2.next()) {
                int cluster_id = rs2.getInt(2);
                if (!new_resolved_ips.containsKey(cluster_id)) {
                    new_resolved_ips.put(cluster_id, new Hashtable<String, Long>());
                }
                String secondLevelDomainName = rs2.getString(1);
                long newips = rs2.getLong(3);
                Hashtable<String, Long> clustertable = new_resolved_ips.get(cluster_id);
                clustertable.put(secondLevelDomainName, newips);
            }
        } catch (Exception e) {
            if (log.isErrorEnabled()) {
                log.error(e);
            }
        } finally {
            if (rs2 != null && !rs2.isClosed()) {
                rs2.close();
            }
            formatter.close();
        }

        Hashtable<String, List<Integer>> numDays = new Hashtable<String, List<Integer>>();
        for (Date prevDate : prevDates) {
            String prevDateStr = df.format(prevDate);
            querybuf = new StringBuffer();
            formatter = new Formatter(querybuf);
            formatter.format(properties.getProperty(NOVELTY_QUERY2KEY), curdatestr, prevDateStr, curdatestr,
                    prevDateStr);
            ResultSet rs3 = null;
            try {
                rs3 = dbi.executeQueryWithResult(querybuf.toString());
                while (rs3.next()) {
                    String sldn = rs3.getString(1);
                    if (!numDays.containsKey(sldn)) {
                        numDays.put(sldn, new ArrayList<Integer>());
                    }
                    Date pd = rs3.getDate(2);
                    DateTime start = new DateTime(pd.getTime());
                    DateTime end = new DateTime(log_date.getTime());
                    Days d = Days.daysBetween(start, end);
                    int diffDays = d.getDays();
                    numDays.get(sldn).add(diffDays);
                }
            } catch (Exception e) {
                if (log.isErrorEnabled()) {
                    log.error(e);
                }
            } finally {
                if (rs3 != null && !rs3.isClosed()) {
                    rs3.close();
                }
                formatter.close();
            }
        }

        Hashtable<Integer, List<Float>> clusterValues = new Hashtable<Integer, List<Float>>();
        for (int clusterID : new_resolved_ips.keySet()) {
            clusterValues.put(clusterID, new ArrayList<Float>());

            Hashtable<String, Long> sldnValues = new_resolved_ips.get(clusterID);
            for (String sldn : sldnValues.keySet()) {
                if (numDays.keySet().contains(sldn)) {
                    long newIPCount = sldnValues.get(sldn);
                    float f = ((float) newIPCount) / Collections.max(numDays.get(sldn));
                    clusterValues.get(clusterID).add(f);

                }
            }
        }

        for (int clusterID : clusterValues.keySet()) {
            if (clusterValues.get(clusterID) == null) { //I dont think it is possible for this to ever be true
                retval.put(clusterID, null);
            } else {
                double sum = 0;
                for (double d : clusterValues.get(clusterID)) {
                    sum += d;
                }
                double val = 0;
                if (clusterValues.get(clusterID).size() > 0) {
                    val = sum / clusterValues.get(clusterID).size();
                }
                retval.put(clusterID, val);
            }
        }
    }
    return retval;
}

From source file:org.squashtest.tm.domain.library.structures.LibraryTree.java

/**
 * <p>/*from w  ww  .  j av a2  s.  c  o m*/
 * Accepts a {@link Closure} that will be applied on the nodes using top-down exploration. The method will walk down the tree :
 * <ul>
 *    <li>the layer <i>n</i> will be treated before layer <i>n+1</i> (natural order)</li>
 *  <li>nodes within a given layer will be treated regardless their ordering</li>
 * </ul>
 * </p>
 * @param closure code to apply on the nodes.
 */
public void doTopDown(Closure closure) {
    Integer layerIndex = 0;

    while (layerIndex <= Collections.max(layers.keySet())) {
        List<T> layer = layers.get(layerIndex);
        CollectionUtils.forAllDo(layer, closure);
        layerIndex++;
    }
}

From source file:org.apache.lens.cube.parse.StorageCandidate.java

/**
 * Sets Storage candidates start and end time based on underlying storage-tables
 *
 * CASE 1//from   w  w w  .j a v a  2  s  .co m
 * If has Storage has single storage table*
 * Storage start time = max(storage start time , fact start time)
 * Storage end time = min(storage end time , fact start time)
 *
 * CASE 2
 * If the Storage has multiple Storage Tables (one per update period)*
 * update Period start Time = Max(update start time, fact start time)
 * update Period end Time = Min(update end time, fact end time)
 * Stoarge start and end time is derived form the underlying update period start and end times.
 * Storage start time = min(update1 start time ,...., updateN start time)
 * Storage end time = max(update1 end time ,...., updateN end time)
 *
 * Note in Case 2 its assumed that the time range supported by different update periods are either
 * overlapping(Example 2) or form a non overlapping but continuous chain(Example 1) as illustrated
 * in examples below
 *
 * Example 1
 * A Storage has 2 Non Oevralpping but continuous Update Periods.
 * MONTHLY with start time as now.month -13 months and end time as now.month -2months  and
 * DAILY with start time as now.day and end time as now.month -2months
 * Then this Sorage will have an implied start time as now.month -13 month and end time as now.day
 *
 * Example 2
 * A Storage has 2 Overlapping Update Periods.
 * MONTHLY with start time as now.month -13 months and end time as now.month -1months  and
 * DAILY with start time as now.day and end time as now.month -2months
 * Then this Sorage will have an implied start time as now.month -13 month and end time as now.day
 *
 * @throws LensException
 */
void setStorageStartAndEndDate() throws LensException {
    if (this.startTime != null && !this.isStorageTblsAtUpdatePeriodLevel) {
        //If the times are already set and are not dependent of update period, no point setting times again.
        return;
    }
    List<Date> startDates = new ArrayList<>();
    List<Date> endDates = new ArrayList<>();
    for (String storageTablePrefix : getValidStorageTableNames()) {
        startDates.add(getCubeMetastoreClient().getStorageTableStartDate(storageTablePrefix,
                fact.getSourceFactName()));
        endDates.add(
                getCubeMetastoreClient().getStorageTableEndDate(storageTablePrefix, fact.getSourceFactName()));
    }
    this.startTime = Collections.min(startDates);
    this.endTime = Collections.max(endDates);
}

From source file:io.hummer.util.test.GenericTestResult.java

public double getMaximum(String valueName) {
    return Collections.max(getValues(valueName));
}

From source file:org.dawnsci.plotting.tools.powdercheck.PowderCheckJob.java

private List<PowderCheckResult> fitPeaksToTrace(final Dataset xIn, final Dataset yIn, Dataset baselineIn) {

    resultList.clear();/*from   w ww.  ja va 2  s . co  m*/

    List<HKL> spacings = CalibrationFactory.getCalibrationStandards().getCalibrant().getHKLs();
    final double[] qVals = new double[spacings.size()];

    for (int i = 0; i < spacings.size(); i++) {
        if (xAxis == XAxis.ANGLE)
            qVals[i] = 2 * Math.toDegrees(Math.asin((metadata.getDiffractionCrystalEnvironment().getWavelength()
                    / (2 * spacings.get(i).getDNano() * 10))));
        else
            qVals[i] = (Math.PI * 2) / (spacings.get(i).getDNano() * 10);
    }

    double qMax = xIn.max().doubleValue();
    double qMin = xIn.min().doubleValue();

    List<Double> qList = new ArrayList<Double>();

    int count = 0;

    for (double q : qVals) {
        if (q > qMax || q < qMin)
            continue;
        count++;
        qList.add(q);
    }

    double minPeak = Collections.min(qList);
    double maxPeak = Collections.max(qList);

    int minXidx = ROISliceUtils.findPositionOfClosestValueInAxis(xIn, minPeak) - EDGE_PIXEL_NUMBER;
    int maxXidx = ROISliceUtils.findPositionOfClosestValueInAxis(xIn, maxPeak) + EDGE_PIXEL_NUMBER;

    int maxSize = xIn.getSize();

    minXidx = minXidx < 0 ? 0 : minXidx;
    maxXidx = maxXidx > maxSize - 1 ? maxSize - 1 : maxXidx;

    final Dataset x = xIn.getSlice(new int[] { minXidx }, new int[] { maxXidx }, null);
    final Dataset y = yIn.getSlice(new int[] { minXidx }, new int[] { maxXidx }, null);
    y.setName("Fit");
    Dataset baseline = baselineIn.getSlice(new int[] { minXidx }, new int[] { maxXidx }, null);

    List<APeak> peaks = Generic1DFitter.fitPeaks(x, y, Gaussian.class, count + 10);

    List<PowderCheckResult> initResults = new ArrayList<PowderCheckResult>();

    CompositeFunction cf = new CompositeFunction();

    for (APeak peak : peaks)
        cf.addFunction(peak);

    double limit = findMatchLimit(qList, cf);

    while (cf.getNoOfFunctions() != 0 && !qList.isEmpty())
        findMatches(initResults, qList, cf, limit);

    final CompositeFunction cfFinal = compositeFunctionFromResults(initResults);

    double[] initParam = new double[cfFinal.getFunctions().length * 3];

    {
        int i = 0;
        for (IFunction func : cfFinal.getFunctions()) {
            initParam[i++] = func.getParameter(0).getValue();
            initParam[i++] = func.getParameter(1).getValue();
            initParam[i++] = func.getParameter(2).getValue();
        }
    }

    final Dataset yfit = DatasetFactory.zeros(x, Dataset.FLOAT64);

    MultivariateOptimizer opt = new SimplexOptimizer(REL_TOL, ABS_TOL);

    MultivariateFunction fun = new MultivariateFunction() {

        @Override
        public double value(double[] arg0) {

            int j = 0;
            for (IFunction func : cfFinal.getFunctions()) {

                double[] p = func.getParameterValues();
                p[0] = arg0[j++];
                p[1] = arg0[j++];
                p[2] = arg0[j++];
                func.setParameterValues(p);
            }

            for (int i = 0; i < yfit.getSize(); i++) {
                yfit.set(cfFinal.val(x.getDouble(i)), i);
            }

            return y.residual(yfit);
        }
    };

    opt.optimize(new InitialGuess(initParam), GoalType.MINIMIZE, new ObjectiveFunction(fun),
            new MaxEval(MAX_EVAL), new NelderMeadSimplex(initParam.length));

    Dataset fit = Maths.add(yfit, baseline);
    fit.setName("Fit");
    Dataset residual = Maths.subtract(y, yfit);
    residual.setName("Residual");

    system.updatePlot1D(x, Arrays.asList(new IDataset[] { fit, residual }), null);
    setPlottingSystemAxes();
    for (int i = 0; i < cfFinal.getNoOfFunctions(); i++) {
        resultList.add(new PowderCheckResult(cfFinal.getFunction(i), initResults.get(i).getCalibrantQValue()));
    }

    return resultList;

}

From source file:de.unidue.langtech.teaching.rp.detector.LanguageDetectorWeb1T.java

@SuppressWarnings("unused")
private boolean hasDuplicates(Map<String, Double> map) {

    boolean status = false;

    List<String> maxEntrys = new ArrayList<String>();

    if (map.values().size() > 0) {

        //source: http://stackoverflow.com/a/11256352/3677505

        double maxValueInMap = (Collections.max(map.values()));

        for (Entry<String, Double> entry : map.entrySet()) {

            if (entry.getValue() == maxValueInMap) {
                maxEntrys.add(entry.getKey());
            }/* w  w w  .ja  v a2 s.com*/

        }

    }

    if (maxEntrys.size() > 1) {
        status = true;
    }

    if (status == true) {
        System.err.println(
                "Single language probabilities method could not clearly resolve language. \nStarting default method.");
    }

    return status;

}

From source file:uk.ac.diamond.scisoft.analysis.rcp.inspector.AxisSelection.java

/**
 * @return maximum order/*from   w  ww.  jav a  2 s . co m*/
 */
@SuppressWarnings({ "unchecked" })
public int getMaxOrder() {
    List<Integer> orders = (List<Integer>) CollectionUtils.collect(asData, orderTransformer);
    return orders.size() > 0 ? (Integer) Collections.max(orders) : 0;
}