Example usage for weka.core Instances relationName

List of usage examples for weka.core Instances relationName

Introduction

In this page you can find the example usage for weka.core Instances relationName.

Prototype


publicString relationName() 

Source Link

Document

Returns the relation's name.

Usage

From source file:CrossValidationMultipleRuns.java

License:Open Source License

/**
 * Performs the cross-validation. See Javadoc of class for information
 * on command-line parameters.//from w  ww.j av a 2  s  .  co m
 *
 * @param args   the command-line parameters
 * @throws Exception   if something goes wrong
 */
public static void main(String[] args) throws Exception {
    // loads data and set class index
    Instances data = DataSource.read(Utils.getOption("t", args));
    String clsIndex = Utils.getOption("c", args);
    if (clsIndex.length() == 0)
        clsIndex = "last";
    if (clsIndex.equals("first"))
        data.setClassIndex(0);
    else if (clsIndex.equals("last"))
        data.setClassIndex(data.numAttributes() - 1);
    else
        data.setClassIndex(Integer.parseInt(clsIndex) - 1);

    // classifier
    String[] tmpOptions;
    String classname;
    tmpOptions = Utils.splitOptions(Utils.getOption("W", args));
    classname = tmpOptions[0];
    tmpOptions[0] = "";
    Classifier cls = (Classifier) Utils.forName(Classifier.class, classname, tmpOptions);

    // other options
    int runs = Integer.parseInt(Utils.getOption("r", args));
    int folds = Integer.parseInt(Utils.getOption("x", args));

    // perform cross-validation
    for (int i = 0; i < runs; i++) {
        // randomize data
        int seed = i + 1;
        Random rand = new Random(seed);
        Instances randData = new Instances(data);
        randData.randomize(rand);
        //if (randData.classAttribute().isNominal())
        //   randData.stratify(folds);

        Evaluation eval = new Evaluation(randData);

        StringBuilder optionsString = new StringBuilder();
        for (String s : cls.getOptions()) {
            optionsString.append(s);
            optionsString.append(" ");
        }

        // output evaluation
        System.out.println();
        System.out.println("=== Setup run " + (i + 1) + " ===");
        System.out.println("Classifier: " + optionsString.toString());
        System.out.println("Dataset: " + data.relationName());
        System.out.println("Folds: " + folds);
        System.out.println("Seed: " + seed);
        System.out.println();

        for (int n = 0; n < folds; n++) {
            Instances train = randData.trainCV(folds, n);
            Instances test = randData.testCV(folds, n);

            // build and evaluate classifier
            Classifier clsCopy = Classifier.makeCopy(cls);
            clsCopy.buildClassifier(train);
            eval.evaluateModel(clsCopy, test);
            System.out.println(eval.toClassDetailsString());
        }

        System.out.println(
                eval.toSummaryString("=== " + folds + "-fold Cross-validation run " + (i + 1) + " ===", false));
    }
}

From source file:TextClassifierUI.java

private void setVMC(FastVector predictions, ThresholdVisualizePanel vmc, boolean masterPlot) {
    try {//from w  ww .java 2s . c o  m
        ThresholdCurve tc = new ThresholdCurve();
        Instances result = tc.getCurve(predictions);
        // method visualize
        PlotData2D tempd = new PlotData2D(result);
        tempd.setPlotName(result.relationName());
        tempd.addInstanceNumberAttribute();
        // specify which points are connected
        boolean[] cp = new boolean[result.numInstances()];
        for (int n = 1; n < cp.length; n++) {
            cp[n] = true;
        }
        tempd.setConnectPoints(cp);
        // add plot
        if (masterPlot) {
            vmc.setMasterPlot(tempd);
        } else {
            vmc.addPlot(tempd);
        }
    } catch (Exception ex) {
        System.err.println("Failed to set VMC");
        ex.printStackTrace();
    }
}

From source file:MPCKMeans.java

License:Open Source License

public static void runFromCommandLine(String[] args) {
    MPCKMeans mpckmeans = new MPCKMeans();
    Instances data = null, clusterData = null;
    ArrayList labeledPairs = null;

    try {//w w w.  ja  v a  2s  . c o  m
        String optionString = Utils.getOption('D', args);
        if (optionString.length() != 0) {
            FileReader reader = new FileReader(optionString);
            data = new Instances(reader);
            System.out.println("Reading dataset: " + data.relationName());
        }

        int classIndex = data.numAttributes() - 1;
        optionString = Utils.getOption('K', args);
        if (optionString.length() != 0) {
            classIndex = Integer.parseInt(optionString);
            if (classIndex >= 0) {
                data.setClassIndex(classIndex); // starts with 0
                // Remove the class labels before clustering
                clusterData = new Instances(data);
                mpckmeans.setNumClusters(clusterData.numClasses());
                clusterData.deleteClassAttribute();
                System.out.println("Setting classIndex: " + classIndex);
            } else {
                clusterData = new Instances(data);
            }
        } else {
            data.setClassIndex(classIndex); // starts with 0
            // Remove the class labels before clustering
            clusterData = new Instances(data);
            mpckmeans.setNumClusters(clusterData.numClasses());
            clusterData.deleteClassAttribute();
            System.out.println("Setting classIndex: " + classIndex);
        }

        optionString = Utils.getOption('C', args);
        if (optionString.length() != 0) {
            labeledPairs = mpckmeans.readConstraints(optionString);
            System.out.println("Reading constraints from: " + optionString);
        } else {
            labeledPairs = new ArrayList(0);
        }

        mpckmeans.setTotalTrainWithLabels(data);
        mpckmeans.setOptions(args);
        System.out.println();
        mpckmeans.buildClusterer(labeledPairs, clusterData, data, mpckmeans.getNumClusters(),
                data.numInstances());
        mpckmeans.printClusterAssignments();

        if (mpckmeans.m_TotalTrainWithLabels.classIndex() > -1) {
            double nCorrect = 0;
            for (int i = 0; i < mpckmeans.m_TotalTrainWithLabels.numInstances(); i++) {
                for (int j = i + 1; j < mpckmeans.m_TotalTrainWithLabels.numInstances(); j++) {
                    int cluster_i = mpckmeans.m_ClusterAssignments[i];
                    int cluster_j = mpckmeans.m_ClusterAssignments[j];
                    double class_i = (mpckmeans.m_TotalTrainWithLabels.instance(i)).classValue();
                    double class_j = (mpckmeans.m_TotalTrainWithLabels.instance(j)).classValue();
                    //         System.out.println(cluster_i + "," + cluster_j + ":" + class_i + "," + class_j);
                    if (cluster_i == cluster_j && class_i == class_j
                            || cluster_i != cluster_j && class_i != class_j) {
                        nCorrect++;
                        //        System.out.println("nCorrect:" + nCorrect);
                    }
                }
            }
            int numInstances = mpckmeans.m_TotalTrainWithLabels.numInstances();
            double RandIndex = 100 * nCorrect / (numInstances * (numInstances - 1) / 2);
            System.err.println("Acc\t" + RandIndex);
        }

        //      if (mpckmeans.getTotalTrainWithLabels().classIndex() >= 0) {
        //    SemiSupClustererEvaluation eval = new SemiSupClustererEvaluation(mpckmeans.m_TotalTrainWithLabels,
        //                             mpckmeans.m_TotalTrainWithLabels.numClasses(),
        //                             mpckmeans.m_TotalTrainWithLabels.numClasses());
        //    eval.evaluateModel(mpckmeans, mpckmeans.m_TotalTrainWithLabels, mpckmeans.m_Instances);
        //    eval.mutualInformation();
        //    eval.pairwiseFMeasure();
        //      }
    } catch (Exception e) {
        System.out.println("Option not specified");
        e.printStackTrace();
    }
}

From source file:adams.data.conversion.WekaInstancesToTimeseries.java

License:Open Source License

/**
 * Performs the actual conversion./*  www . j ava2 s  .  c o m*/
 *
 * @return      the converted data
 * @throws Exception   if something goes wrong with the conversion
 */
@Override
protected Object doConvert() throws Exception {
    Timeseries result;
    Instances input;
    Instance inst;
    int indexDate;
    int indexValue;
    TimeseriesPoint point;
    int i;
    Date timestamp;
    double value;

    input = (Instances) m_Input;

    // determine attribute indices
    m_DateAttribute.setData(input);
    indexDate = m_DateAttribute.getIntIndex();
    if (indexDate == -1)
        throw new IllegalStateException("Failed to located date attribute: " + m_DateAttribute.getIndex());
    m_ValueAttribute.setData(input);
    indexValue = m_ValueAttribute.getIntIndex();
    if (indexValue == -1)
        throw new IllegalStateException("Failed to located value attribute: " + m_ValueAttribute.getIndex());

    result = new Timeseries(input.relationName() + "-" + input.attribute(indexValue).name());
    for (i = 0; i < input.numInstances(); i++) {
        inst = input.instance(i);
        if (!inst.isMissing(indexDate) && !inst.isMissing(indexValue)) {
            timestamp = new Date((long) inst.value(indexDate));
            value = inst.value(indexValue);
            point = new TimeseriesPoint(timestamp, value);
            result.add(point);
        }
    }

    return result;
}

From source file:adams.data.instancesanalysis.PCA.java

License:Open Source License

/**
 * Performs the actual analysis./* ww  w.  jav a2s.c  om*/
 *
 * @param data   the data to analyze
 * @return      null if successful, otherwise error message
 * @throws Exception   if analysis fails
 */
@Override
protected String doAnalyze(Instances data) throws Exception {
    String result;
    Remove remove;
    PublicPrincipalComponents pca;
    int i;
    Capabilities caps;
    PartitionedMultiFilter2 part;
    Range rangeUnsupported;
    Range rangeSupported;
    TIntList listNominal;
    Range rangeNominal;
    ArrayList<ArrayList<Double>> coeff;
    Instances filtered;
    SpreadSheet transformed;
    WekaInstancesToSpreadSheet conv;
    String colName;

    result = null;
    m_Loadings = null;
    m_Scores = null;

    if (!m_AttributeRange.isAllRange()) {
        if (isLoggingEnabled())
            getLogger().info("Filtering attribute range: " + m_AttributeRange.getRange());
        remove = new Remove();
        remove.setAttributeIndicesArray(m_AttributeRange.getIntIndices());
        remove.setInvertSelection(true);
        remove.setInputFormat(data);
        data = Filter.useFilter(data, remove);
    }
    if (isLoggingEnabled())
        getLogger().info("Performing PCA...");

    listNominal = new TIntArrayList();
    if (m_SkipNominal) {
        for (i = 0; i < data.numAttributes(); i++) {
            if (i == data.classIndex())
                continue;
            if (data.attribute(i).isNominal())
                listNominal.add(i);
        }
    }

    // check for unsupported attributes
    caps = new PublicPrincipalComponents().getCapabilities();
    m_Supported = new TIntArrayList();
    m_Unsupported = new TIntArrayList();
    for (i = 0; i < data.numAttributes(); i++) {
        if (!caps.test(data.attribute(i)) || (i == data.classIndex()) || (listNominal.contains(i)))
            m_Unsupported.add(i);
        else
            m_Supported.add(i);
    }
    data.setClassIndex(-1);

    m_NumAttributes = m_Supported.size();

    // the principal components will delete the attributes without any distinct values.
    // this checks which instances will be kept.
    m_Kept = new ArrayList<>();
    for (i = 0; i < m_Supported.size(); i++) {
        if (data.numDistinctValues(m_Supported.get(i)) > 1)
            m_Kept.add(m_Supported.get(i));
    }

    // build a model using the PublicPrincipalComponents
    pca = new PublicPrincipalComponents();
    pca.setMaximumAttributes(m_MaxAttributes);
    pca.setVarianceCovered(m_Variance);
    pca.setMaximumAttributeNames(m_MaxAttributeNames);
    part = null;
    if (m_Unsupported.size() > 0) {
        rangeUnsupported = new Range();
        rangeUnsupported.setMax(data.numAttributes());
        rangeUnsupported.setIndices(m_Unsupported.toArray());
        rangeSupported = new Range();
        rangeSupported.setMax(data.numAttributes());
        rangeSupported.setIndices(m_Supported.toArray());
        part = new PartitionedMultiFilter2();
        part.setFilters(new Filter[] { pca, new AllFilter(), });
        part.setRanges(new weka.core.Range[] { new weka.core.Range(rangeSupported.getRange()),
                new weka.core.Range(rangeUnsupported.getRange()), });
    }
    try {
        if (part != null)
            part.setInputFormat(data);
        else
            pca.setInputFormat(data);
    } catch (Exception e) {
        result = Utils.handleException(this, "Failed to set data format", e);
    }

    transformed = null;
    if (result == null) {
        try {
            if (part != null)
                filtered = weka.filters.Filter.useFilter(data, part);
            else
                filtered = weka.filters.Filter.useFilter(data, pca);
        } catch (Exception e) {
            result = Utils.handleException(this, "Failed to apply filter", e);
            filtered = null;
        }
        if (filtered != null) {
            conv = new WekaInstancesToSpreadSheet();
            conv.setInput(filtered);
            result = conv.convert();
            if (result == null) {
                transformed = (SpreadSheet) conv.getOutput();
                // shorten column names again
                if (part != null) {
                    for (i = 0; i < transformed.getColumnCount(); i++) {
                        colName = transformed.getColumnName(i);
                        colName = colName.replaceFirst("filtered-[0-9]*-", "");
                        transformed.getHeaderRow().getCell(i).setContentAsString(colName);
                    }
                }
            }
        }
    }

    if (result == null) {
        // get the coefficients from the filter
        m_Scores = transformed;
        coeff = pca.getCoefficients();
        m_Loadings = extractLoadings(data, coeff);
        m_Loadings.setName("Loadings for " + data.relationName());
    }

    return result;
}

From source file:adams.data.io.output.AbstractWekaSpreadSheetWriter.java

License:Open Source License

/**
 * Performs the actual writing. The caller must ensure that the output stream
 * gets closed.//from   w w w.ja  v  a 2s  .  c  o m
 *
 * @param content   the spreadsheet to write
 * @param out      the output stream to write the spreadsheet to
 * @return      true if successfully written
 */
@Override
protected boolean doWrite(SpreadSheet content, OutputStream out) {
    boolean result;
    Instances data;
    SpreadSheetToWekaInstances convert;
    String msg;

    result = false;

    try {
        convert = new SpreadSheetToWekaInstances();
        convert.setInput(content);
        msg = convert.convert();
        if (msg == null) {
            data = (Instances) convert.getOutput();
            if (data.relationName().equals(Environment.getInstance().getProject())) {
                if (content.hasName())
                    data.setRelationName(content.getName());
            }
            m_Saver.setInstances(data);
            if (m_Stopped)
                return false;
            m_Saver.setDestination(out);
            m_Saver.writeBatch();
            result = true;
        } else {
            getLogger().severe("Failed to convert spreadsheet into WEKA Instances:\n" + msg);
            result = false;
        }
        convert.cleanUp();
    } catch (Exception e) {
        getLogger().log(Level.SEVERE, "Failed to save dataset!", e);
        result = false;
    }

    return result;
}

From source file:adams.flow.sink.WekaCostBenefitAnalysis.java

License:Open Source License

/**
 * Plots the token (the panel and dialog have already been created at
 * this stage).//from w w  w  .  java  2 s .c  o  m
 *
 * @param token   the token to display
 */
@Override
protected void display(Token token) {
    Evaluation eval;
    Attribute classAtt;
    Attribute classAttToUse;
    int classValue;
    ThresholdCurve tc;
    Instances result;
    ArrayList<String> newNames;
    CostBenefitAnalysis cbAnalysis;
    PlotData2D tempd;
    boolean[] cp;
    int n;

    try {
        if (token.getPayload() instanceof WekaEvaluationContainer)
            eval = (Evaluation) ((WekaEvaluationContainer) token.getPayload())
                    .getValue(WekaEvaluationContainer.VALUE_EVALUATION);
        else
            eval = (Evaluation) token.getPayload();
        if (eval.predictions() == null) {
            getLogger().severe("No predictions available from Evaluation object!");
            return;
        }
        classAtt = eval.getHeader().classAttribute();
        m_ClassIndex.setData(classAtt);
        classValue = m_ClassIndex.getIntIndex();
        tc = new ThresholdCurve();
        result = tc.getCurve(eval.predictions(), classValue);

        // Create a dummy class attribute with the chosen
        // class value as index 0 (if necessary).
        classAttToUse = eval.getHeader().classAttribute();
        if (classValue != 0) {
            newNames = new ArrayList<>();
            newNames.add(classAtt.value(classValue));
            for (int k = 0; k < classAtt.numValues(); k++) {
                if (k != classValue)
                    newNames.add(classAtt.value(k));
            }
            classAttToUse = new Attribute(classAtt.name(), newNames);
        }
        // assemble plot data
        tempd = new PlotData2D(result);
        tempd.setPlotName(result.relationName());
        tempd.m_alwaysDisplayPointsOfThisSize = 10;
        // specify which points are connected
        cp = new boolean[result.numInstances()];
        for (n = 1; n < cp.length; n++)
            cp[n] = true;
        tempd.setConnectPoints(cp);
        // add plot
        m_CostBenefitPanel.setCurveData(tempd, classAttToUse);
    } catch (Exception e) {
        handleException("Failed to display token: " + token, e);
    }
}

From source file:adams.flow.sink.WekaFileWriter.java

License:Open Source License

/**
 * Executes the flow item./* w ww  .  j  a  va  2  s  .  c  o  m*/
 *
 * @return      null if everything is fine, otherwise error message
 */
@Override
protected String doExecute() {
    String result;
    Instances data;
    String filename;
    File file;
    DataSink sink;

    result = null;

    data = (Instances) m_InputToken.getPayload();
    filename = null;
    try {
        // determine filename
        filename = m_OutputFile.getAbsolutePath();
        if (m_UseRelationNameAsFilename) {
            file = new File(filename);
            filename = file.getParent() + File.separator + FileUtils.createFilename(data.relationName(), "_")
                    + file.getName().replaceAll(".*\\.", ".");
        }

        if (m_UseCustomSaver) {
            m_CustomSaver.setFile(new File(filename));
            sink = new DataSink(m_CustomSaver);
        } else {
            sink = new DataSink(filename);
        }

        // save file
        sink.write(data);
    } catch (Exception e) {
        result = handleException("Failed to save dataset to: " + filename, e);
    }

    return result;
}

From source file:adams.flow.transformer.WekaBootstrapping.java

License:Open Source License

/**
 * Executes the flow item./*from  ww w  .j a v  a2  s .  c o  m*/
 *
 * @return      null if everything is fine, otherwise error message
 */
@Override
protected String doExecute() {
    String result;
    SpreadSheet sheet;
    Row row;
    Evaluation evalAll;
    Evaluation eval;
    WekaEvaluationContainer cont;
    TIntList indices;
    Random random;
    int i;
    int iteration;
    int size;
    List<Prediction> preds;
    Instances header;
    Instances data;
    ArrayList<Attribute> atts;
    Instance inst;
    boolean numeric;
    int classIndex;
    Double[] errors;
    Double[] errorsRev;
    Percentile<Double> perc;
    Percentile<Double> percRev;
    TIntList subset;

    result = null;

    if (m_InputToken.getPayload() instanceof Evaluation) {
        evalAll = (Evaluation) m_InputToken.getPayload();
    } else {
        cont = (WekaEvaluationContainer) m_InputToken.getPayload();
        evalAll = (Evaluation) cont.getValue(WekaEvaluationContainer.VALUE_EVALUATION);
    }

    if ((evalAll.predictions() == null) || (evalAll.predictions().size() == 0))
        result = "No predictions available!";

    if (result == null) {
        // init spreadsheet
        sheet = new DefaultSpreadSheet();
        row = sheet.getHeaderRow();
        row.addCell("S").setContentAsString("Subsample");
        for (EvaluationStatistic s : m_StatisticValues)
            row.addCell(s.toString()).setContentAsString(s.toString());
        for (i = 0; i < m_Percentiles.length; i++) {
            switch (m_ErrorCalculation) {
            case ACTUAL_MINUS_PREDICTED:
                row.addCell("perc-AmP-" + i).setContentAsString("Percentile-AmP-" + m_Percentiles[i]);
                break;
            case PREDICTED_MINUS_ACTUAL:
                row.addCell("perc-PmA-" + i).setContentAsString("Percentile-PmA-" + m_Percentiles[i]);
                break;
            case ABSOLUTE:
                row.addCell("perc-Abs-" + i).setContentAsString("Percentile-Abs-" + m_Percentiles[i]);
                break;
            case BOTH:
                row.addCell("perc-AmP-" + i).setContentAsString("Percentile-AmP-" + m_Percentiles[i]);
                row.addCell("perc-PmA-" + i).setContentAsString("Percentile-PmA-" + m_Percentiles[i]);
                break;
            default:
                throw new IllegalStateException("Unhandled error calculation: " + m_ErrorCalculation);
            }
        }

        // set up bootstrapping
        preds = evalAll.predictions();
        random = new Random(m_Seed);
        indices = new TIntArrayList();
        size = (int) Math.round(preds.size() * m_Percentage);
        header = evalAll.getHeader();
        numeric = header.classAttribute().isNumeric();
        m_ClassIndex.setData(header.classAttribute());
        if (numeric)
            classIndex = -1;
        else
            classIndex = m_ClassIndex.getIntIndex();
        for (i = 0; i < preds.size(); i++)
            indices.add(i);

        // create fake evalutions
        subset = new TIntArrayList();
        for (iteration = 0; iteration < m_NumSubSamples; iteration++) {
            if (isStopped()) {
                sheet = null;
                break;
            }

            // determine
            subset.clear();
            if (m_WithReplacement) {
                for (i = 0; i < size; i++)
                    subset.add(indices.get(random.nextInt(preds.size())));
            } else {
                indices.shuffle(random);
                for (i = 0; i < size; i++)
                    subset.add(indices.get(i));
            }

            // create dataset from predictions
            errors = new Double[size];
            errorsRev = new Double[size];
            atts = new ArrayList<>();
            atts.add(header.classAttribute().copy("Actual"));
            data = new Instances(header.relationName() + "-" + (iteration + 1), atts, size);
            data.setClassIndex(0);
            for (i = 0; i < subset.size(); i++) {
                inst = new DenseInstance(preds.get(subset.get(i)).weight(),
                        new double[] { preds.get(subset.get(i)).actual() });
                data.add(inst);
                switch (m_ErrorCalculation) {
                case ACTUAL_MINUS_PREDICTED:
                    errors[i] = preds.get(subset.get(i)).actual() - preds.get(subset.get(i)).predicted();
                    break;
                case PREDICTED_MINUS_ACTUAL:
                    errorsRev[i] = preds.get(subset.get(i)).predicted() - preds.get(subset.get(i)).actual();
                    break;
                case ABSOLUTE:
                    errors[i] = Math
                            .abs(preds.get(subset.get(i)).actual() - preds.get(subset.get(i)).predicted());
                    break;
                case BOTH:
                    errors[i] = preds.get(subset.get(i)).actual() - preds.get(subset.get(i)).predicted();
                    errorsRev[i] = preds.get(subset.get(i)).predicted() - preds.get(subset.get(i)).actual();
                    break;
                default:
                    throw new IllegalStateException("Unhandled error calculation: " + m_ErrorCalculation);
                }
            }

            // perform "fake" evaluation
            try {
                eval = new Evaluation(data);
                for (i = 0; i < subset.size(); i++) {
                    if (numeric)
                        eval.evaluateModelOnceAndRecordPrediction(
                                new double[] { preds.get(subset.get(i)).predicted() }, data.instance(i));
                    else
                        eval.evaluateModelOnceAndRecordPrediction(
                                ((NominalPrediction) preds.get(subset.get(i))).distribution().clone(),
                                data.instance(i));
                }
            } catch (Exception e) {
                result = handleException(
                        "Failed to create 'fake' Evaluation object (iteration: " + (iteration + 1) + ")!", e);
                break;
            }

            // add row
            row = sheet.addRow();
            row.addCell("S").setContent(iteration + 1);
            for (EvaluationStatistic s : m_StatisticValues) {
                try {
                    row.addCell(s.toString()).setContent(EvaluationHelper.getValue(eval, s, classIndex));
                } catch (Exception e) {
                    getLogger().log(Level.SEVERE,
                            "Failed to calculate statistic in iteration #" + (iteration + 1) + ": " + s, e);
                    row.addCell(s.toString()).setMissing();
                }
            }
            for (i = 0; i < m_Percentiles.length; i++) {
                perc = new Percentile<>();
                perc.addAll(errors);
                percRev = new Percentile<>();
                percRev.addAll(errorsRev);
                switch (m_ErrorCalculation) {
                case ACTUAL_MINUS_PREDICTED:
                    row.addCell("perc-AmP-" + i).setContent(perc.getPercentile(m_Percentiles[i].doubleValue()));
                    break;
                case PREDICTED_MINUS_ACTUAL:
                    row.addCell("perc-PmA-" + i)
                            .setContent(percRev.getPercentile(m_Percentiles[i].doubleValue()));
                    break;
                case ABSOLUTE:
                    row.addCell("perc-Abs-" + i).setContent(perc.getPercentile(m_Percentiles[i].doubleValue()));
                    break;
                case BOTH:
                    row.addCell("perc-AmP-" + i).setContent(perc.getPercentile(m_Percentiles[i].doubleValue()));
                    row.addCell("perc-PmA-" + i)
                            .setContent(percRev.getPercentile(m_Percentiles[i].doubleValue()));
                    break;
                default:
                    throw new IllegalStateException("Unhandled error calculation: " + m_ErrorCalculation);
                }
            }
        }

        if ((result == null) && (sheet != null))
            m_OutputToken = new Token(sheet);
    }

    return result;
}

From source file:adams.flow.transformer.WekaFilter.java

License:Open Source License

/**
 * Executes the flow item.//from w w w  .  j  a  v a2  s .  com
 *
 * @return      null if everything is fine, otherwise error message
 */
@Override
protected String doExecute() {
    String result;
    weka.core.Instances data;
    weka.core.Instances filteredData;
    weka.core.Instance inst;
    adams.data.instance.Instance instA;
    weka.core.Instance filteredInst;
    String relation;

    result = null;

    data = null;
    inst = null;
    if (m_InputToken.hasPayload(weka.core.Instance.class))
        inst = m_InputToken.getPayload(weka.core.Instance.class);
    else if (m_InputToken.hasPayload(adams.data.instance.Instance.class))
        inst = m_InputToken.getPayload(adams.data.instance.Instance.class).toInstance();
    else if (m_InputToken.hasPayload(weka.core.Instances.class))
        data = m_InputToken.getPayload(weka.core.Instances.class);
    else
        result = m_InputToken.unhandledData();

    if (result == null) {
        try {
            // initialize filter?
            if (!m_Initialized || !m_InitializeOnce) {
                if (data == null) {
                    data = new weka.core.Instances(inst.dataset(), 0);
                    data.add(inst);
                }
                initActualFilter(data);
            }

            synchronized (m_ActualFilter) {
                if (!m_FlowContextUpdated) {
                    m_FlowContextUpdated = true;
                    if (m_ActualFilter instanceof FlowContextHandler)
                        ((FlowContextHandler) m_ActualFilter).setFlowContext(this);
                }

                // filter data
                filteredData = null;
                filteredInst = null;
                if (data != null) {
                    relation = data.relationName();
                    filteredData = weka.filters.Filter.useFilter(data, m_ActualFilter);
                    if (m_KeepRelationName) {
                        filteredData.setRelationName(relation);
                        if (isLoggingEnabled())
                            getLogger().info("Setting relation name: " + relation);
                    }
                    m_Initialized = true;
                } else {
                    relation = inst.dataset().relationName();
                    m_ActualFilter.input(inst);
                    m_ActualFilter.batchFinished();
                    filteredInst = m_ActualFilter.output();
                    if (m_KeepRelationName) {
                        filteredInst.dataset().setRelationName(relation);
                        if (isLoggingEnabled())
                            getLogger().info("Setting relation name: " + relation);
                    }
                }
            }

            // build output token
            if (inst != null) {
                if (filteredInst != null) {
                    if (m_InputToken.getPayload() instanceof weka.core.Instance) {
                        m_OutputToken = new Token(filteredInst);
                    } else {
                        instA = new adams.data.instance.Instance();
                        instA.set(filteredInst);
                        m_OutputToken = createToken(m_InputToken.getPayload(), instA);
                    }
                } else if ((filteredData != null) && (filteredData.numInstances() > 0)) {
                    m_OutputToken = createToken(m_InputToken.getPayload(), filteredData.instance(0));
                }
            } else {
                m_OutputToken = createToken(m_InputToken.getPayload(), filteredData);
            }
        } catch (Exception e) {
            result = handleException("Failed to filter data: ", e);
        }
    }

    if (m_OutputToken != null)
        updateProvenance(m_OutputToken);

    return result;
}