List of usage examples for weka.core Instance valueSparse
public double valueSparse(int indexOfIndex);
From source file:pk.lums.edu.sma.processing.ml.DBSCAN.ManhattanDataObject.java
License:Open Source License
/** * Calculates the manhattan-distance between dataObject and this.dataObject * /*from ww w .j a v a 2 s. com*/ * @param dataObject * The DataObject, that is used for distance-calculation with * this.dataObject now assumed to be of the same type and with * the same structure * @return double-value The manhattan-distance between dataObject and * this.dataObject NaN, if the computation could not be performed */ public double distance(DataObject dataObject) { double dist = 0.0; Instance firstInstance = getInstance(); Instance secondInstance = dataObject.getInstance(); int firstNumValues = firstInstance.numValues(); int secondNumValues = secondInstance.numValues(); int numAttributes = firstInstance.numAttributes(); int firstI, secondI; for (int p1 = 0, p2 = 0; p1 < firstNumValues || p2 < secondNumValues;) { if (p1 >= firstNumValues) { firstI = numAttributes; } else { firstI = firstInstance.index(p1); } if (p2 >= secondNumValues) { secondI = numAttributes; } else { secondI = secondInstance.index(p2); } double cDistance = 0; if (firstI == secondI) { cDistance = computeDistance(firstI, firstInstance.valueSparse(p1), secondInstance.valueSparse(p2)); p1++; p2++; } else if (firstI > secondI) { cDistance = computeDistance(secondI, 0, secondInstance.valueSparse(p2)); p2++; } else { cDistance = computeDistance(firstI, firstInstance.valueSparse(p1), 0); p1++; } dist += Math.abs(cDistance); } return dist; }
From source file:xlong.urlclassify.others.SPegasos.java
License:Open Source License
/** * Updates the classifier with the given instance. * * @param instance the new training instance to include in the model * @exception Exception if the instance could not be incorporated in * the model./*from w ww. j a va 2 s.co m*/ */ public void updateClassifier(Instance instance) throws Exception { if (!instance.classIsMissing()) { double learningRate = 1.0 / (m_lambda * m_t); //double scale = 1.0 - learningRate * m_lambda; double scale = 1.0 - 1.0 / m_t; double y = (instance.classValue() == 0) ? -1 : 1; double wx = dotProd(instance, m_weights, instance.classIndex()); double z = y * (wx + m_weights[m_weights.length - 1]); for (int j = 0; j < m_weights.length - 1; j++) { if (j != instance.classIndex()) { m_weights[j] *= scale; } } if (m_loss == LOGLOSS || (z < 1)) { double loss = dloss(z); int n1 = instance.numValues(); for (int p1 = 0; p1 < n1; p1++) { int indS = instance.index(p1); if (indS != instance.classIndex() && !instance.isMissingSparse(p1)) { double m = learningRate * loss * (instance.valueSparse(p1) * y); m_weights[indS] += m; } } // update the bias m_weights[m_weights.length - 1] += learningRate * loss * y; } double norm = 0; for (int k = 0; k < m_weights.length - 1; k++) { if (k != instance.classIndex()) { norm += (m_weights[k] * m_weights[k]); } } double scale2 = Math.min(1.0, (1.0 / (m_lambda * norm))); if (scale2 < 1.0) { scale2 = Math.sqrt(scale2); for (int j = 0; j < m_weights.length - 1; j++) { if (j != instance.classIndex()) { m_weights[j] *= scale2; } } } m_t++; } }