Example usage for weka.clusterers ClusterEvaluation getClusterAssignments

List of usage examples for weka.clusterers ClusterEvaluation getClusterAssignments

Introduction

In this page you can find the example usage for weka.clusterers ClusterEvaluation getClusterAssignments.

Prototype

public double[] getClusterAssignments() 

Source Link

Document

Return an array of cluster assignments corresponding to the most recent set of instances clustered.

Usage

From source file:ClusteringClass.java

public static void main(String[] args) throws Exception {
    String filename = "C:\\Users\\Daniele\\Desktop\\Humoradio2.csv";

    try {/*ww w  . j  a va 2 s.  c  om*/
        FileWriter fw = new FileWriter(filename);
        Class.forName("org.apache.derby.jdbc.ClientDriver").newInstance();
        Connection conn = DriverManager.getConnection("jdbc:derby://localhost:1527/HumoRadioDB", "dani",
                "dani");

        String query = "SELECT * FROM SONG_RATING2";
        Statement stmt = conn.createStatement();
        ResultSet rs = stmt.executeQuery(query);

        for (int i = 1; i < 23; i++) {
            if (i != 2) {
                ResultSetMetaData rsmd = rs.getMetaData();
                String name = rsmd.getColumnName(i);
                fw.append(name);
                if (i != 22) {
                    fw.append(',');
                } else {
                    fw.append('\n');
                }
            }
        }

        String query1 = "SELECT * FROM SONG_DATA";
        Statement stmt1 = conn.createStatement();
        ResultSet rs1 = stmt1.executeQuery(query1);

        String[] titles = new String[150];

        for (int ii = 0; ii < 150; ii++) {
            rs1.next();
            titles[ii] = rs1.getString("TITLE");
        }

        while (rs.next()) {
            for (int i = 1; i < 23; i++) {
                if (i == 22)
                    fw.append('\n');
                else if (i != 2) {
                    fw.append(',');
                }
            }
        }

        fw.flush();
        fw.close();
        conn.close();
        System.out.println("CSV File is created successfully.");

        /*
         Clustering part
         */
        DataSource source = new DataSource("C:\\Users\\Daniele\\Desktop\\Humoradio2.csv");
        Instances train = source.getDataSet();

        /*
         Applichiamo il filtro Remove fornito da Weka per non considerare un
         attributo nell'algoritmo di Clustering.
         */
        Remove filter = new Remove();
        filter.setAttributeIndices("1");
        filter.setInputFormat(train);
        Instances train2 = Filter.useFilter(train, filter);
        System.out.println("Nominal attributes removed from computation.");

        /*
         Applichiamo il filtro Normalize fornito da Weka per normalizzare il 
         nostro dataset.
         */
        Normalize norm = new Normalize();
        norm.setInputFormat(train2);
        Instances train3 = Filter.useFilter(train2, norm);
        System.out.println("Dataset normalized.");

        /*
         First Clustering Algorithm
         */
        EuclideanDistance df = new EuclideanDistance();
        SimpleKMeans clus1 = new SimpleKMeans();
        int k = 10;
        clus1.setNumClusters(k);
        clus1.setDistanceFunction(df);
        clus1.setPreserveInstancesOrder(true);
        clus1.buildClusterer(train3);

        /*
         First Evaluation
         */
        ClusterEvaluation eval1 = new ClusterEvaluation();
        eval1.setClusterer(clus1);
        eval1.evaluateClusterer(train3);
        System.out.println(eval1.clusterResultsToString());

        int[] assignments = clus1.getAssignments();
        String[][] dati = new String[150][4];

        for (int kk = 0; kk < 150; kk++) {
            dati[kk][0] = String.valueOf(kk);
            dati[kk][1] = train2.instance(kk).toString();
            dati[kk][2] = String.valueOf(assignments[kk]);
            dati[kk][3] = titles[kk];
        }

        for (int w = 0; w < 10; w++) {
            System.out.println();
            for (int i = 0; i < 150; i++) {
                if (dati[i][2].equals(String.valueOf(w))) {
                    for (int j = 0; j < 4; j++) {
                        if (j != 3) {
                            System.out.print(dati[i][j] + "-> \t");
                        } else {
                            System.out.println(dati[i][j]);
                        }
                    }
                }
            }
        }

        /*first graph  
                
         PlotData2D predData = ClustererPanel.setUpVisualizableInstances(train, eval1);
         //String name = (new SimpleDateFormat("HH:mm:ss - ")).format(new Date());
         String name = "";
         String cname = clus1.getClass().getName();
         if (cname.startsWith("weka.clusterers."))
         name += cname.substring("weka.clusterers.".length());
         else
         name += cname;
                
                
         VisualizePanel vp = new VisualizePanel();
         vp.setName(name + " (" + train.relationName() + ")");
         predData.setPlotName(name + " (" + train.relationName() + ")");
         vp.addPlot(predData);
                
         String plotName = vp.getName();
         final javax.swing.JFrame jf = new javax.swing.JFrame("Weka Clusterer Visualize: " + plotName);
         jf.setSize(500,400);
         jf.getContentPane().setLayout(new BorderLayout());
         jf.getContentPane().add(vp, BorderLayout.CENTER);
         jf.dispose();
         jf.addWindowListener(new java.awt.event.WindowAdapter() {
         public void windowClosing(java.awt.event.WindowEvent e) {
         jf.dispose();
         }
         });
         jf.setVisible(true);
                
         end first graph
         */

        /*
         Second Clustering Algorithm
         */

        System.out.println();

        DBSCAN clus3 = new DBSCAN();
        clus3.setEpsilon(0.7);
        clus3.setMinPoints(2);
        clus3.buildClusterer(train3);

        /*
         Second Evaluation
         */
        ClusterEvaluation eval3 = new ClusterEvaluation();
        eval3.setClusterer(clus3);
        eval3.evaluateClusterer(train3);
        System.out.println(eval3.clusterResultsToString());

        double[] assignments3 = eval3.getClusterAssignments();
        String[][] dati3 = new String[150][4];

        for (int kk = 0; kk < 150; kk++) {
            dati3[kk][0] = String.valueOf(kk);
            dati3[kk][1] = train2.instance(kk).toString();
            dati3[kk][2] = String.valueOf(assignments3[kk]);
            dati3[kk][3] = titles[kk];
        }

        for (int w = 0; w < eval3.getNumClusters(); w++) {
            System.out.println();
            for (int i = 0; i < 150; i++) {
                if (Double.parseDouble(dati3[i][2]) == w) {
                    for (int j = 0; j < 4; j++) {
                        if (j != 3) {
                            System.out.print(dati3[i][j] + "-> \t");
                        } else {
                            System.out.println(dati3[i][j]);
                        }
                    }
                }
            }
        }
        System.out.println();
        for (int i = 0; i < 150; i++) {
            if (Double.parseDouble(dati3[i][2]) == -1.0) {
                for (int j = 0; j < 4; j++) {
                    if (j != 3) {
                        System.out.print(dati3[i][j] + "-> \t");
                    } else {
                        System.out.println(dati3[i][j]);
                    }
                }
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:adams.flow.transformer.WekaClusterAssignments.java

License:Open Source License

/**
 * Executes the flow item./*  ww  w.j  a  v a  2 s .  co m*/
 *
 * @return      null if everything is fine, otherwise error message
 */
@Override
protected String doExecute() {
    String result;
    ClusterEvaluation eval;
    SpreadSheet sheet;

    result = null;

    if (m_InputToken.getPayload() instanceof WekaClusterEvaluationContainer)
        eval = (ClusterEvaluation) ((WekaClusterEvaluationContainer) m_InputToken.getPayload())
                .getValue(WekaClusterEvaluationContainer.VALUE_EVALUATION);
    else
        eval = (ClusterEvaluation) m_InputToken.getPayload();

    if ((eval != null) && (eval.getClusterAssignments() != null)) {
        sheet = new DefaultSpreadSheet();

        // header
        sheet.getHeaderRow().addCell("A").setContent("Assignment");

        // data
        for (double assignment : eval.getClusterAssignments())
            sheet.addRow().addCell("A").setContent(assignment);

        m_OutputToken = new Token(sheet);
    } else {
        getLogger().warning("No evaluation or cluster assignments available!");
    }

    return result;
}

From source file:com.spread.experiment.tempuntilofficialrelease.ClassificationViaClustering108.java

License:Open Source License

/**
 * builds the classifier/*from  ww w  .ja v  a 2  s .com*/
 * 
 * @param data the training instances
 * @throws Exception if something goes wrong
 */
@Override
public void buildClassifier(Instances data) throws Exception {

    // can classifier handle the data?
    getCapabilities().testWithFail(data);

    // save original header (needed for clusters to classes output)
    m_OriginalHeader = data.stringFreeStructure();

    // remove class attribute for clusterer
    Instances clusterData = new Instances(data);
    clusterData.setClassIndex(-1);
    clusterData.deleteAttributeAt(data.classIndex());
    m_ClusteringHeader = clusterData.stringFreeStructure();

    if (m_ClusteringHeader.numAttributes() == 0) {
        System.err.println("Data contains only class attribute, defaulting to ZeroR model.");
        m_ZeroR = new ZeroR();
        m_ZeroR.buildClassifier(data);
    } else {
        m_ZeroR = null;

        // build clusterer
        m_ActualClusterer = AbstractClusterer.makeCopy(m_Clusterer);
        m_ActualClusterer.buildClusterer(clusterData);

        if (!getLabelAllClusters()) {

            // determine classes-to-clusters mapping
            ClusterEvaluation eval = new ClusterEvaluation();
            eval.setClusterer(m_ActualClusterer);
            eval.evaluateClusterer(clusterData);
            double[] clusterAssignments = eval.getClusterAssignments();
            int[][] counts = new int[eval.getNumClusters()][m_OriginalHeader.numClasses()];
            int[] clusterTotals = new int[eval.getNumClusters()];
            double[] best = new double[eval.getNumClusters() + 1];
            double[] current = new double[eval.getNumClusters() + 1];
            for (int i = 0; i < data.numInstances(); i++) {
                Instance instance = data.instance(i);
                if (!instance.classIsMissing()) {
                    counts[(int) clusterAssignments[i]][(int) instance.classValue()]++;
                    clusterTotals[(int) clusterAssignments[i]]++;
                }
            }
            best[eval.getNumClusters()] = Double.MAX_VALUE;
            ClusterEvaluation.mapClasses(eval.getNumClusters(), 0, counts, clusterTotals, current, best, 0);
            m_ClustersToClasses = new double[best.length];
            System.arraycopy(best, 0, m_ClustersToClasses, 0, best.length);
        } else {
            m_ClusterClassProbs = new double[m_ActualClusterer.numberOfClusters()][data.numClasses()];
            for (int i = 0; i < data.numInstances(); i++) {
                Instance clusterInstance = clusterData.instance(i);
                Instance originalInstance = data.instance(i);
                if (!originalInstance.classIsMissing()) {
                    double[] probs = m_ActualClusterer.distributionForInstance(clusterInstance);
                    for (int j = 0; j < probs.length; j++) {
                        m_ClusterClassProbs[j][(int) originalInstance.classValue()] += probs[j];
                    }
                }
            }
            for (int i = 0; i < m_ClusterClassProbs.length; i++) {
                Utils.normalize(m_ClusterClassProbs[i]);
            }
        }
    }
}

From source file:lineage.AAFClusterer.java

License:Open Source License

/**
 * Expectation Maximization clustering/*w  w  w . j av a  2s. co m*/
 * @param data - matrix of observations (numObs x numFeatures)
 * @param k - number of clusters
 */
public Cluster[] em(double[][] data, int numObs, int numFeatures) {
    Instances ds = convertMatrixToWeka(data, numObs, numFeatures);
    EM clusterer = new EM();
    try {
        clusterer.buildClusterer(ds);
        ClusterEvaluation eval = new ClusterEvaluation();
        eval.setClusterer(clusterer);
        eval.evaluateClusterer(new Instances(ds));
        int numClusters = eval.getNumClusters();

        Cluster[] clusters = new Cluster[numClusters];
        double[][] clusterCentroids = new double[numClusters][numFeatures];
        int[] clusterCount = new int[numClusters];

        double[] assignments = eval.getClusterAssignments();
        for (int i = 0; i < ds.numInstances(); i++) {
            Instance inst = ds.instance(i);
            int clusterId = (int) assignments[i];
            for (int j = 0; j < numFeatures; j++) {
                clusterCentroids[clusterId][j] += inst.value(j);
            }
            clusterCount[clusterId]++;
        }

        for (int i = 0; i < numClusters; i++) {
            double[] mean = new double[numFeatures];
            for (int j = 0; j < numFeatures; j++) {
                mean[j] = clusterCentroids[i][j] / clusterCount[i];
            }
            clusters[i] = new Cluster(mean, i);
        }

        // cluster members & std dev
        double[][] clusterStdDev = new double[numClusters][numFeatures];
        for (int i = 0; i < ds.numInstances(); i++) {
            int clusterId = (int) assignments[i];
            clusters[clusterId].addMember(i);
            for (int j = 0; j < numFeatures; j++) {
                clusterStdDev[clusterId][j] += Math
                        .pow(ds.instance(i).value(j) - clusters[clusterId].getCentroid()[j], 2);
            }
        }

        for (int i = 0; i < numClusters; i++) {
            double[] dev = new double[numFeatures];
            for (int j = 0; j < numFeatures; j++) {
                dev[j] = Math.sqrt(clusterStdDev[i][j] / clusterCount[i]);
            }
            clusters[i].setStdDev(dev);
        }

        return clusters;
    } catch (Exception e) {
        e.printStackTrace();
        System.exit(-1);
        return null;
    }
}

From source file:lu.lippmann.cdb.datasetview.tabs.UnsupervisedFeatureEvaluationTabView.java

License:Open Source License

private static Instances buildDerivatedDatasetForFeaturesClusters(final Instances dataSet, final int k)
        throws Exception {
    final Instances trdataSet = WekaDataProcessingUtil.buildTransposedDataSet(dataSet);

    final EuclideanDistance distanceFunction = new EuclideanDistance(trdataSet);

    final SimpleKMeans skm = WekaMachineLearningUtil.buildSimpleKMeansClustererWithK(k, distanceFunction);
    skm.buildClusterer(trdataSet);/*  w  ww . j  a  va 2 s .  c o  m*/
    final ClusterEvaluation eval = new ClusterEvaluation();
    eval.setClusterer(skm);
    eval.evaluateClusterer(trdataSet);

    final int numClusters = eval.getNumClusters();
    final List<String> possibleValues = new ArrayList<String>(numClusters);
    for (int c = 0; c < numClusters; c++)
        possibleValues.add("cluster_" + c);

    final double[] clusterAssignments = eval.getClusterAssignments();

    final int numAttributes = dataSet.numAttributes();
    final List<Integer> valueForEachFeature = new ArrayList<Integer>(numAttributes);
    for (int j = 0; j < numAttributes; j++) {
        //System.out.println(clusterAssignments[j]+" "+(int)clusterAssignments[j]);
        valueForEachFeature.add((int) clusterAssignments[j]);
    }

    return buildDerivatedDataset(dataSet, possibleValues, valueForEachFeature);
}

From source file:lu.lippmann.cdb.lab.beta.util.WekaUtil2.java

License:Open Source License

/**
 * //from  w  w w . j a v a2s . com
 * @param newInstances
 * @param K
 * @return
 * @throws Exception
 */
public static double[] doKMeans(final Instances newInstances, final int K) throws Exception {
    final SimpleKMeans clusterer = new SimpleKMeans();
    clusterer.setOptions(
            Utils.splitOptions("-N " + K + " -R first-last -I 500 -S 10 -A weka.core.EuclideanDistance"));

    clusterer.buildClusterer(newInstances);

    final ClusterEvaluation eval = new ClusterEvaluation();
    eval.setClusterer(clusterer);
    eval.evaluateClusterer(newInstances);

    double[] ass = eval.getClusterAssignments();
    return ass;
}

From source file:lu.lippmann.cdb.lab.beta.util.WekaUtil2.java

License:Open Source License

/**
 * //from   www  .  j  a v a2 s . co  m
 * @param wekaClusterer
 * @param instances
 * @return
 * @throws Exception
 */
public static List<IndexedInstance> computeClusters(final Clusterer wekaClusterer, final Instances instances)
        throws Exception {
    final Instances ii = new Instances(instances);
    ii.setClassIndex(-1);

    wekaClusterer.buildClusterer(ii);

    final ClusterEvaluation eval = new ClusterEvaluation();
    eval.setClusterer(wekaClusterer);
    eval.evaluateClusterer(ii);

    final int clustersCount = eval.getNumClusters();
    final List<IndexedInstance> clustersList = new ArrayList<IndexedInstance>(clustersCount);

    //Initialize instances
    for (int k = 0; k < clustersCount; k++) {
        clustersList.add(new IndexedInstance(new Instances(instances, 0), new HashMap<Integer, Integer>()));
    }

    final double[] ass = eval.getClusterAssignments();
    if (ass.length != ii.numInstances())
        throw new IllegalStateException();
    for (int i = 0; i < ass.length; i++) {
        IndexedInstance idxi = clustersList.get((int) ass[i]);
        idxi.getInstances().add(instances.instance(i));
        int pos = idxi.getInstances().size() - 1;
        idxi.getMapOrigIndex().put(pos, i);
    }

    return clustersList;
}

From source file:lu.lippmann.cdb.lab.kmeans.KmeansImproved.java

License:Open Source License

/**
 * //from  w  w w  . j a  v a2s. c om
 * @return
 * @throws Exception
 */
public double[] getClusteredInstances() throws Exception {

    //Removing potential class index 
    instances.setClassIndex(-1);

    //Clustering using Kmeans
    int k;
    double max = 0, r2 = 0, pseudoF = 0;

    //Testing from 2 to 10 clusters, should be set as entry of this function
    SimpleKMeans bestKMeans = new SimpleKMeans();
    for (k = 2; k <= maxClusters; k++) {
        final SimpleKMeans kMeans = new SimpleKMeans();
        kMeans.setNumClusters(k);
        kMeans.buildClusterer(instances);
        //Choosing the "optimal" number of clusters
        r2 = R2(kMeans);
        pseudoF = pseudoF(r2, k);
        //System.out.println(pseudo_f);
        if (pseudoF > max) {
            max = pseudoF;
            bestKMeans = kMeans;
        }
    }

    //Real clustering using the chosen number
    final ClusterEvaluation eval = new ClusterEvaluation();
    eval.setClusterer(bestKMeans);
    eval.evaluateClusterer(instances);
    double[] clusterAssignments = eval.getClusterAssignments();

    this.usedKmeans = bestKMeans;

    return clusterAssignments;

}