Example usage for weka.core Instances equalHeadersMsg

List of usage examples for weka.core Instances equalHeadersMsg

Introduction

In this page you can find the example usage for weka.core Instances equalHeadersMsg.

Prototype

public String equalHeadersMsg(Instances dataset) 

Source Link

Document

Checks if two headers are equivalent.

Usage

From source file:meka.gui.explorer.classify.PredictionsOnTestset.java

License:Open Source License

/**
 * Returns the action lister to use in the menu.
 *
 * @param history   the current history/*  ww w  . j av a  2  s  . co  m*/
 * @param index     the selected history item
 * @return          the listener
 */
@Override
public ActionListener getActionListener(final ResultHistoryList history, final int index) {
    final MultiLabelClassifier classifier = (MultiLabelClassifier) getClassifier(history, index);
    final Instances header = getHeader(history, index);

    return new ActionListener() {
        @Override
        public void actionPerformed(ActionEvent e) {
            Runnable run = new Runnable() {
                @Override
                public void run() {
                    ClassifyTab owner = (ClassifyTab) getOwner();
                    Instances test;
                    owner.startBusy("Predictions on test...");
                    try {
                        MLUtils.prepareData(owner.getTestData());
                        test = new Instances(owner.getTestData());
                        test.setClassIndex(owner.getTestData().classIndex());
                        String msg = header.equalHeadersMsg(test);
                        if (msg != null)
                            throw new IllegalArgumentException(
                                    "Model's training set and current test set are not compatible:\n" + msg);
                        // collect predictions
                        Instances predicted = new Instances(test, 0);
                        for (int i = 0; i < test.numInstances(); i++) {
                            double pred[] = classifier.distributionForInstance(test.instance(i));
                            // Cut off any [no-longer-needed] probabalistic information from MT classifiers.
                            if (classifier instanceof MultiTargetClassifier)
                                pred = Arrays.copyOf(pred, test.classIndex());
                            Instance predInst = (Instance) test.instance(i).copy();
                            for (int j = 0; j < pred.length; j++)
                                predInst.setValue(j, pred[j]);
                            predicted.add(predInst);
                            if ((i + 1) % 100 == 0)
                                owner.showStatus(
                                        "Predictions on test (" + (i + 1) + "/" + test.numInstances() + ")...");
                        }
                        owner.finishBusy();
                        // display predictions
                        DataViewerDialog dialog = new DataViewerDialog(GUIHelper.getParentFrame(owner),
                                ModalityType.MODELESS);
                        dialog.setDefaultCloseOperation(DataViewerDialog.DISPOSE_ON_CLOSE);
                        dialog.setInstances(predicted);
                        dialog.setSize(800, 600);
                        dialog.setLocationRelativeTo(owner);
                        dialog.setVisible(true);
                    } catch (Exception e) {
                        owner.handleException("Predictions failed on test set:", e);
                        owner.finishBusy("Predictions failed: " + e);
                        JOptionPane.showMessageDialog(owner, "Predictions failed:\n" + e, "Error",
                                JOptionPane.ERROR_MESSAGE);
                    }
                }
            };
            ((ClassifyTab) getOwner()).start(run);
        }
    };
}

From source file:meka.gui.explorer.classify.ReevaluateModelOnTestset.java

License:Open Source License

/**
 * Returns the action lister to use in the menu.
 *
 * @param history   the current history/* w  w w  .  java  2  s .c o  m*/
 * @param index     the selected history item
 * @return          the listener
 */
@Override
public ActionListener getActionListener(final ResultHistoryList history, final int index) {
    final MultiLabelClassifier classifier = (MultiLabelClassifier) getClassifier(history, index);
    final Instances header = getHeader(history, index);

    return new ActionListener() {
        @Override
        public void actionPerformed(ActionEvent e) {
            Runnable run = new Runnable() {
                @Override
                public void run() {
                    ClassifyTab owner = (ClassifyTab) getOwner();
                    Result result;
                    Instances test;
                    owner.startBusy("Reevaluate on test...");
                    try {
                        MLUtils.prepareData(owner.getTestData());
                        test = new Instances(owner.getTestData());
                        test.setClassIndex(owner.getTestData().classIndex());
                        String msg = header.equalHeadersMsg(test);
                        if (msg != null)
                            throw new IllegalArgumentException(
                                    "Model's training set and current test set are not compatible:\n" + msg);
                        owner.log(OptionUtils.toCommandLine(classifier));
                        owner.log("Testset: " + test.relationName());
                        owner.log("Class-index: " + test.classIndex());
                        result = Evaluation.evaluateModel(classifier, test, "0.0", owner.getVOP()); // TODO what threshold to use?
                        owner.addResultToHistory(result, new Object[] { classifier, new Instances(test, 0) },
                                classifier.getClass().getName().replace("meka.classifiers.", ""));
                        owner.finishBusy();
                    } catch (Exception e) {
                        owner.handleException("Reevaluation failed on test set:", e);
                        owner.finishBusy("Reevaluation failed: " + e);
                        JOptionPane.showMessageDialog(owner, "Reevaluation failed:\n" + e, "Error",
                                JOptionPane.ERROR_MESSAGE);
                    }
                }
            };
            ((ClassifyTab) getOwner()).start(run);
        }
    };
}

From source file:meka.gui.explorer.ClassifyTab.java

License:Open Source License

/**
 * Starts the classification./*from   w w  w  . j  a  va 2s.  c o m*/
 */
protected void startClassification() {
    String type;
    Runnable run;
    final Instances data;

    if (m_ComboBoxExperiment.getSelectedIndex() == -1)
        return;

    data = new Instances(getData());
    if (m_Randomize)
        data.randomize(new Random(m_Seed));
    type = m_ComboBoxExperiment.getSelectedItem().toString();
    run = null;

    switch (type) {
    case TYPE_CROSSVALIDATION:
        run = new Runnable() {
            @Override
            public void run() {
                MultiLabelClassifier classifier;
                Result result;
                startBusy("Cross-validating...");
                try {
                    classifier = (MultiLabelClassifier) m_GenericObjectEditor.getValue();
                    log(OptionUtils.toCommandLine(classifier));
                    log("Dataset: " + data.relationName());
                    log("Class-index: " + data.classIndex());
                    result = Evaluation.cvModel(classifier, data, m_Folds, m_TOP, m_VOP);
                    addResultToHistory(result, new Object[] { classifier, new Instances(data, 0) },
                            classifier.getClass().getName().replace("meka.classifiers.", ""));
                    finishBusy();
                } catch (Exception e) {
                    handleException("Evaluation failed:", e);
                    finishBusy("Evaluation failed: " + e);
                    JOptionPane.showMessageDialog(ClassifyTab.this, "Evaluation failed (CV):\n" + e, "Error",
                            JOptionPane.ERROR_MESSAGE);
                }
            }
        };
        break;

    case TYPE_TRAINTESTSPLIT:
        run = new Runnable() {
            @Override
            public void run() {
                MultiLabelClassifier classifier;
                Result result;
                int trainSize;
                Instances train;
                Instances test;
                startBusy("Train/test split...");
                try {
                    trainSize = (int) (data.numInstances() * m_SplitPercentage / 100.0);
                    train = new Instances(data, 0, trainSize);
                    test = new Instances(data, trainSize, data.numInstances() - trainSize);
                    classifier = (MultiLabelClassifier) m_GenericObjectEditor.getValue();
                    log(OptionUtils.toCommandLine(classifier));
                    log("Dataset: " + train.relationName());
                    log("Class-index: " + train.classIndex());
                    result = Evaluation.evaluateModel(classifier, train, test, m_TOP, m_VOP);
                    addResultToHistory(result, new Object[] { classifier, new Instances(train, 0) },
                            classifier.getClass().getName().replace("meka.classifiers.", ""));
                    finishBusy();
                } catch (Exception e) {
                    handleException("Evaluation failed (train/test split):", e);
                    finishBusy("Evaluation failed: " + e);
                    JOptionPane.showMessageDialog(ClassifyTab.this, "Evaluation failed:\n" + e, "Error",
                            JOptionPane.ERROR_MESSAGE);
                }
            }
        };
        break;

    case TYPE_SUPPLIEDTESTSET:
        run = new Runnable() {
            @Override
            public void run() {
                MultiLabelClassifier classifier;
                Result result;
                int trainSize;
                Instances train;
                Instances test;
                startBusy("Supplied test...");
                try {
                    train = new Instances(data);
                    MLUtils.prepareData(m_TestInstances);
                    test = new Instances(m_TestInstances);
                    test.setClassIndex(data.classIndex());
                    String msg = train.equalHeadersMsg(test);
                    if (msg != null)
                        throw new IllegalArgumentException("Train and test set are not compatible:\n" + msg);
                    classifier = (MultiLabelClassifier) m_GenericObjectEditor.getValue();
                    log(OptionUtils.toCommandLine(classifier));
                    log("Dataset: " + train.relationName());
                    log("Class-index: " + train.classIndex());
                    result = Evaluation.evaluateModel(classifier, train, test, m_TOP, m_VOP);
                    addResultToHistory(result, new Object[] { classifier, new Instances(train, 0) },
                            classifier.getClass().getName().replace("meka.classifiers.", ""));
                    finishBusy();
                } catch (Exception e) {
                    handleException("Evaluation failed (train/test split):", e);
                    finishBusy("Evaluation failed: " + e);
                    JOptionPane.showMessageDialog(ClassifyTab.this, "Evaluation failed:\n" + e, "Error",
                            JOptionPane.ERROR_MESSAGE);
                }
            }
        };
        break;

    case TYPE_BINCREMENTAL:
        run = new Runnable() {
            @Override
            public void run() {
                MultiLabelClassifier classifier;
                Result result;
                startBusy("Incremental...");
                try {
                    classifier = (MultiLabelClassifier) m_GenericObjectEditor.getValue();
                    log(OptionUtils.toCommandLine(classifier));
                    log("Dataset: " + data.relationName());
                    log("Class-index: " + data.classIndex());
                    result = IncrementalEvaluation.evaluateModelBatchWindow(classifier, data, m_Samples, 1.,
                            m_TOP, m_VOP);
                    addResultToHistory(result, new Object[] { classifier, new Instances(data, 0) },
                            classifier.getClass().getName().replace("meka.classifiers.", ""));
                    finishBusy();
                } catch (Exception e) {
                    handleException("Evaluation failed (incremental splits):", e);
                    finishBusy("Evaluation failed: " + e);
                    JOptionPane.showMessageDialog(ClassifyTab.this, "Evaluation failed:\n" + e, "Error",
                            JOptionPane.ERROR_MESSAGE);
                }
            }
        };
        break;

    case TYPE_PREQUENTIAL:
        run = new Runnable() {
            @Override
            public void run() {
                MultiLabelClassifier classifier;
                Result result;
                startBusy("Incremental...");
                try {
                    classifier = (MultiLabelClassifier) m_GenericObjectEditor.getValue();
                    log(OptionUtils.toCommandLine(classifier));
                    log("Dataset: " + data.relationName());
                    log("Class-index: " + data.classIndex());
                    result = IncrementalEvaluation.evaluateModelPrequentialBasic(classifier, data,
                            (data.numInstances() / (m_Samples + 1)), 1., m_TOP, m_VOP);
                    addResultToHistory(result, new Object[] { classifier, new Instances(data, 0) },
                            classifier.getClass().getName().replace("meka.classifiers.", ""));
                    finishBusy();
                } catch (Exception e) {
                    handleException("Evaluation failed (incremental splits):", e);
                    finishBusy("Evaluation failed: " + e);
                    JOptionPane.showMessageDialog(ClassifyTab.this, "Evaluation failed:\n" + e, "Error",
                            JOptionPane.ERROR_MESSAGE);
                }
            }
        };
        break;

    default:
        throw new IllegalStateException("Unhandled evaluation type: " + type);
    }

    start(run);
}