List of usage examples for org.apache.mahout.cf.taste.impl.common RunningAverage getAverage
double getAverage();
From source file:GavaFactorizer.java
License:Apache License
double getAveragePreference() throws TasteException { RunningAverage average = new FullRunningAverage(); LongPrimitiveIterator it = dataModel.getUserIDs(); while (it.hasNext()) { for (Preference pref : dataModel.getPreferencesFromUser(it.nextLong())) { average.addDatum(pref.getValue()); }//from w w w . jav a 2 s . c o m } return average.getAverage(); }
From source file:com.msiiplab.recsys.rwr.GLRecommenderIRStatsEvaluator.java
License:Apache License
public GLIRStatisticsImpl evaluate(RecommenderBuilder recommenderBuilder, List<DataModel> trainingDataModels, List<DataModel> testingDataModels, IDRescorer rescorer, int at, double relevanceThreshold, double evaluationPercentage) throws TasteException { Preconditions.checkArgument(recommenderBuilder != null, "recommenderBuilder is null"); Preconditions.checkArgument(trainingDataModels != null, "trainingDataModels is null"); Preconditions.checkArgument(testingDataModels != null, "testingDataModels is null"); Preconditions.checkArgument(testingDataModels.size() == trainingDataModels.size(), "trainingDataModels.size must equals testingDataModels.size"); Preconditions.checkArgument(at >= 1, "at must be at least 1"); Preconditions.checkArgument(evaluationPercentage > 0.0 && evaluationPercentage <= 1.0, "Invalid evaluationPercentage: %s", evaluationPercentage); // num of train/test pair: num of cross validation folds int numFolds = trainingDataModels.size(); RunningAverage CrossValidationPrecision = new GLRunningAverage(); RunningAverage CrossValidationRPrecision = new GLRunningAverage(); RunningAverage CrossValidationRecall = new GLRunningAverage(); RunningAverage CrossValidationFallOut = new GLRunningAverage(); RunningAverage CrossValidationNDCG = new GLRunningAverage(); RunningAverage CrossValidationRNDCG = new GLRunningAverage();//rating-nDCG RunningAverage CrossValidationReach = new GLRunningAverage(); RunningAverage CrossValidationMacroDOA = new GLRunningAverage(); RunningAverage CrossValidationMicroDOA = new GLRunningAverage(); RunningAverage CrossValidationMacroInnerDOA = new GLRunningAverage(); RunningAverage CrossValidationMicroInnerDOA = new GLRunningAverage(); for (int i_folds = 0; i_folds < numFolds; i_folds++) { log.info("fold {}", i_folds); DataModel trainDataModel = trainingDataModels.get(i_folds); DataModel testDataModel = testingDataModels.get(i_folds); FastIDSet MovieIDs = new FastIDSet(); LongPrimitiveIterator it_train_temp = trainDataModel.getItemIDs(); LongPrimitiveIterator it_test_temp = testDataModel.getItemIDs(); while (it_train_temp.hasNext()) { MovieIDs.add(it_train_temp.nextLong()); }/*from w w w .jav a 2s. c o m*/ while (it_test_temp.hasNext()) { MovieIDs.add(it_test_temp.nextLong()); } int numTrainItems = trainDataModel.getNumItems(); int numTestItems = testDataModel.getNumItems(); int numItems = numTestItems + numTrainItems; RunningAverage precision = new GLRunningAverage(); RunningAverage rPrecision = new GLRunningAverage(); RunningAverage recall = new GLRunningAverage(); RunningAverage fallOut = new GLRunningAverage(); RunningAverage nDCG = new GLRunningAverage(); RunningAverage rNDCG = new GLRunningAverage(); RunningAverage macroDOA = new GLRunningAverage(); RunningAverage microDOA1 = new GLRunningAverage(); RunningAverage microDOA2 = new GLRunningAverage(); RunningAverage macroInnerDOA = new GLRunningAverage(); RunningAverage microInnerDOA1 = new GLRunningAverage(); RunningAverage microInnerDOA2 = new GLRunningAverage(); int numUsersRecommendedFor = 0; int numUsersWithRecommendations = 0; long start = System.currentTimeMillis(); // Build recommender Recommender recommender = recommenderBuilder.buildRecommender(trainDataModel); LongPrimitiveIterator it_user = testDataModel.getUserIDs(); while (it_user.hasNext()) { long userID = it_user.nextLong(); log.info("user {}", userID); // Use all in testDataModel as relevant FastIDSet learnedItemIDs; FastIDSet relevantItemIDs; try { learnedItemIDs = trainDataModel.getItemIDsFromUser(userID); relevantItemIDs = testDataModel.getItemIDsFromUser(userID); } catch (NoSuchUserException e1) { continue; } // We excluded zero relevant items situation int numRelevantItems = relevantItemIDs.size(); if (numRelevantItems <= 0) { continue; } // We excluded all prefs for the user that has no pref record in // training set try { trainDataModel.getPreferencesFromUser(userID); } catch (NoSuchUserException nsee) { continue; // Oops we excluded all prefs for the user -- just // move on } // Recommend items List<RecommendedItem> recommendedItems = recommender.recommend(userID, at, rescorer); List<RecommendedItem> recommendedItemsAtRelNum = recommender.recommend(userID, numRelevantItems, rescorer); PreferenceArray userPreferences = testDataModel.getPreferencesFromUser(userID); FastByIDMap<Preference> userPreferenceMap = getPrefereceMap(userPreferences); userPreferences.sortByValueReversed(); // relevantItemIDsAtN only consider top N items as relevant items FastIDSet relevantItemIDsAtN = new FastIDSet(); Iterator<Preference> it_pref = userPreferences.iterator(); int num_pref = 0; while (it_pref.hasNext()) { relevantItemIDsAtN.add(it_pref.next().getItemID()); num_pref++; if (num_pref >= at) { break; } } // Compute intersection between recommended items and relevant // items int intersectionSize = 0; int numRecommendedItems = recommendedItems.size(); for (RecommendedItem recommendedItem : recommendedItems) { if (relevantItemIDs.contains(recommendedItem.getItemID())) { intersectionSize++; } } // Precision double prec = 0; if (numRecommendedItems > 0) { prec = (double) intersectionSize / (double) numRecommendedItems; } precision.addDatum(prec); log.info("Precision for user {} is {}", userID, prec); // Recall double rec = (double) intersectionSize / (double) numRelevantItems; recall.addDatum(rec); log.info("Recall for user {} is {}", userID, rec); // R-precision double rprec = 0; int intersectionSizeAtRelNum = 0; int numRecommendedItemsAtRelNum = recommendedItemsAtRelNum.size(); for (RecommendedItem recommendedItem : recommendedItemsAtRelNum) { if (relevantItemIDs.contains(recommendedItem.getItemID())) { intersectionSizeAtRelNum++; } } if (numRecommendedItemsAtRelNum > 0) { rprec = (double) intersectionSizeAtRelNum / (double) numRelevantItems; } rPrecision.addDatum(rprec); log.info("RPrecision for user {} is {}", userID, rprec); double F1 = 0; if (prec + rec > 0) { F1 = 2 * prec * rec / (prec + rec); } log.info("F1 for user {} is {}", userID, F1); // Fall-out double fall = 0; int size = numRelevantItems + trainDataModel.getItemIDsFromUser(userID).size(); if (numRelevantItems < size) { fall = (double) (numRecommendedItems - intersectionSize) / (double) (numItems - numRelevantItems); } fallOut.addDatum(fall); log.info("Fallout for user {} is {}", userID, fall); // nDCG // In computing, assume relevant IDs have relevance ${rating} and others // 0 PreferenceArray userPredictions = getPreferenceArray(recommendedItems, userID); double userNDCG = computeNDCG(userPreferences, userPredictions, relevantItemIDs, userPreferenceMap, at); double userRNDCG = computeRNDCG(userPreferences, userPredictions, relevantItemIDs, userPreferenceMap, at); nDCG.addDatum(userNDCG); rNDCG.addDatum(userRNDCG); log.info("NDCG for user {} is {}", userID, userNDCG); log.info("RNDCG for user {} is {}", userID, userRNDCG); // Reach numUsersRecommendedFor++; if (numRecommendedItems > 0) { numUsersWithRecommendations++; } // DOA // [Siegel and Castellan, 1988] and [Gori and Pucci, 2007] // LongPrimitiveIterator it_movies = MovieIDs.iterator(); LongPrimitiveIterator it_movies = trainDataModel.getItemIDs(); long numNW = 0; long sumCheckOrder = 0; while (it_movies.hasNext()) { long itemID = it_movies.nextLong(); if (!learnedItemIDs.contains(itemID) && !relevantItemIDs.contains(itemID)) { // itemID is in NW_{u_i} numNW++; LongPrimitiveIterator it_test = relevantItemIDs.iterator(); while (it_test.hasNext()) { long testItemID = it_test.nextLong(); float itemPref = 0; float testItemPref = 0; try { itemPref = recommender.estimatePreference(userID, itemID); } catch (NoSuchItemException e) { } try { testItemPref = recommender.estimatePreference(userID, testItemID); } catch (NoSuchItemException e) { } if (itemPref <= testItemPref) { sumCheckOrder++; } } } } if (numNW > 0 && relevantItemIDs.size() > 0) { macroDOA.addDatum((double) sumCheckOrder / (double) (relevantItemIDs.size() * numNW)); microDOA1.addDatum((double) sumCheckOrder); microDOA2.addDatum((double) (relevantItemIDs.size() * numNW)); } // log.info( // "sumCheckOrder / (numNW * numRelevant) = {} / ({} * {})", // sumCheckOrder, numNW, relevantItemIDs.size()); // InnerDOA: only check the agreement of order in test set LongPrimitiveIterator it_test1 = relevantItemIDs.iterator(); long sumCheckInnerOrder = 0; long sumAll = 0; while (it_test1.hasNext()) { long itemID1 = it_test1.nextLong(); LongPrimitiveIterator it_test2 = relevantItemIDs.iterator(); while (it_test2.hasNext()) { long itemID2 = it_test2.nextLong(); if (itemID1 != itemID2) { try { float pref_v1 = testDataModel.getPreferenceValue(userID, itemID1); float pref_v2 = testDataModel.getPreferenceValue(userID, itemID2); float predict_v1 = recommender.estimatePreference(userID, itemID1); float predict_v2 = recommender.estimatePreference(userID, itemID2); if ((pref_v1 >= pref_v2 && predict_v1 >= predict_v2) || (pref_v1 <= pref_v2 && predict_v1 <= predict_v2)) { sumCheckInnerOrder++; } sumAll++; } catch (NoSuchItemException e) { // do nothing, just ignore } } } } if (relevantItemIDs.size() > 1) { macroInnerDOA.addDatum((double) sumCheckInnerOrder / (double) sumAll); microInnerDOA1.addDatum((double) sumCheckInnerOrder); microInnerDOA2.addDatum((double) sumAll); } // log.info( // "sumCheckInnerOrder / (|T| * (|T|-1) ) = {} / ({} * {}) = ", // sumCheckInnerOrder, relevantItemIDs.size(), relevantItemIDs.size()-1); } long end = System.currentTimeMillis(); CrossValidationPrecision.addDatum(precision.getAverage()); CrossValidationRPrecision.addDatum(rPrecision.getAverage()); CrossValidationRecall.addDatum(recall.getAverage()); CrossValidationFallOut.addDatum(fallOut.getAverage()); CrossValidationNDCG.addDatum(nDCG.getAverage()); CrossValidationRNDCG.addDatum(rNDCG.getAverage()); CrossValidationReach.addDatum((double) numUsersWithRecommendations / (double) numUsersRecommendedFor); CrossValidationMacroDOA.addDatum(macroDOA.getAverage()); CrossValidationMicroDOA.addDatum(microDOA1.getAverage() / microDOA2.getAverage()); CrossValidationMacroInnerDOA.addDatum(macroInnerDOA.getAverage()); CrossValidationMicroInnerDOA.addDatum(microInnerDOA1.getAverage() / microInnerDOA2.getAverage()); log.info("Evaluated with training/testing set # {} in {}ms", i_folds, end - start); System.out.printf("Evaluated with training/testing set # %d in %d ms \n", i_folds, end - start); log.info( "Precision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA/macroInnerDOA/microInnerDOA: {} / {} / {} / {} / {} / {} / {} / {} / {} / {} / {}", precision.getAverage(), rPrecision.getAverage(), recall.getAverage(), fallOut.getAverage(), nDCG.getAverage(), rNDCG.getAverage(), (double) numUsersWithRecommendations / (double) numUsersRecommendedFor, macroDOA.getAverage(), microDOA1.getAverage() / microDOA2.getAverage(), macroInnerDOA.getAverage(), microInnerDOA1.getAverage() / microInnerDOA2.getAverage()); System.out.printf( "Precision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA/macroInnerDOA/microInnerDOA: %f / %f / %f / %f / %f / %f / %f / %f / %f / %f / %f \n", precision.getAverage(), rPrecision.getAverage(), recall.getAverage(), fallOut.getAverage(), nDCG.getAverage(), rNDCG.getAverage(), (double) numUsersWithRecommendations / (double) numUsersRecommendedFor, macroDOA.getAverage(), microDOA1.getAverage() / microDOA2.getAverage(), macroInnerDOA.getAverage(), microInnerDOA1.getAverage() / microInnerDOA2.getAverage()); } log.info( "Cross Validation Precision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA: {} / {} / {} / {} / {} / {} / {} / {} / {} / {} / {}", CrossValidationPrecision.getAverage(), CrossValidationRPrecision.getAverage(), CrossValidationRecall.getAverage(), CrossValidationFallOut.getAverage(), CrossValidationNDCG.getAverage(), CrossValidationRNDCG.getAverage(), CrossValidationReach.getAverage(), CrossValidationMacroDOA.getAverage(), CrossValidationMicroDOA.getAverage(), CrossValidationMacroInnerDOA.getAverage(), CrossValidationMicroInnerDOA.getAverage()); System.out.printf( "Cross Validation: \nPrecision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA: %f / %f / %f / %f / %f / %f / %f / %f / %f / %f / %f\n", CrossValidationPrecision.getAverage(), CrossValidationRPrecision.getAverage(), CrossValidationRecall.getAverage(), CrossValidationFallOut.getAverage(), CrossValidationNDCG.getAverage(), CrossValidationRNDCG.getAverage(), CrossValidationReach.getAverage(), CrossValidationMacroDOA.getAverage(), CrossValidationMicroDOA.getAverage(), CrossValidationMacroInnerDOA.getAverage(), CrossValidationMicroInnerDOA.getAverage()); return new GLIRStatisticsImpl(CrossValidationPrecision.getAverage(), CrossValidationRPrecision.getAverage(), CrossValidationRecall.getAverage(), CrossValidationFallOut.getAverage(), CrossValidationNDCG.getAverage(), CrossValidationRNDCG.getAverage(), CrossValidationReach.getAverage(), CrossValidationMacroDOA.getAverage(), CrossValidationMicroDOA.getAverage(), CrossValidationMacroInnerDOA.getAverage(), CrossValidationMicroInnerDOA.getAverage()); }
From source file:com.msiiplab.recsys.rwr.ParallelGLRecommenderIRStatsEvaluator.java
License:Apache License
@Override public GLIRStatisticsImpl evaluate(RecommenderBuilder recommenderBuilder, List<DataModel> trainingDataModels, List<DataModel> testingDataModels, IDRescorer rescorer, int at, double relevanceThreshold, double evaluationPercentage) throws TasteException { Preconditions.checkArgument(recommenderBuilder != null, "recommenderBuilder is null"); Preconditions.checkArgument(trainingDataModels != null, "trainingDataModels is null"); Preconditions.checkArgument(testingDataModels != null, "testingDataModels is null"); Preconditions.checkArgument(testingDataModels.size() == trainingDataModels.size(), "trainingDataModels.size must equals testingDataModels.size"); Preconditions.checkArgument(at >= 1, "at must be at least 1"); Preconditions.checkArgument(evaluationPercentage > 0.0 && evaluationPercentage <= 1.0, "Invalid evaluationPercentage: %s", evaluationPercentage); // num of train/test pair: num of cross validation folds int numFolds = trainingDataModels.size(); RunningAverage CrossValidationPrecision = new GLRunningAverage(); RunningAverage CrossValidationRPrecision = new GLRunningAverage(); RunningAverage CrossValidationRecall = new GLRunningAverage(); RunningAverage CrossValidationFallOut = new GLRunningAverage(); RunningAverage CrossValidationNDCG = new GLRunningAverage(); RunningAverage CrossValidationRNDCG = new GLRunningAverage();//rating-nDCG RunningAverage CrossValidationReach = new GLRunningAverage(); RunningAverage CrossValidationMacroDOA = new GLRunningAverage(); RunningAverage CrossValidationMicroDOA = new GLRunningAverage(); RunningAverage CrossValidationMacroInnerDOA = new GLRunningAverage(); RunningAverage CrossValidationMicroInnerDOA = new GLRunningAverage(); for (int i_folds = 0; i_folds < numFolds; i_folds++) { log.info("fold {}", i_folds); DataModel trainDataModel = trainingDataModels.get(i_folds); DataModel testDataModel = testingDataModels.get(i_folds); FastIDSet MovieIDs = new FastIDSet(); LongPrimitiveIterator it_train_temp = trainDataModel.getItemIDs(); LongPrimitiveIterator it_test_temp = testDataModel.getItemIDs(); while (it_train_temp.hasNext()) { MovieIDs.add(it_train_temp.nextLong()); }/*w ww. ja v a 2s .c o m*/ while (it_test_temp.hasNext()) { MovieIDs.add(it_test_temp.nextLong()); } int numTrainItems = trainDataModel.getNumItems(); int numTestItems = testDataModel.getNumItems(); int numItems = numTestItems + numTrainItems; RunningAverage precision = new GLRunningAverage(); RunningAverage rPrecision = new GLRunningAverage(); RunningAverage recall = new GLRunningAverage(); RunningAverage fallOut = new GLRunningAverage(); RunningAverage nDCG = new GLRunningAverage(); RunningAverage rNDCG = new GLRunningAverage(); RunningAverage macroDOA = new GLRunningAverage(); RunningAverage microDOA1 = new GLRunningAverage(); RunningAverage microDOA2 = new GLRunningAverage(); RunningAverage macroInnerDOA = new GLRunningAverage(); RunningAverage microInnerDOA1 = new GLRunningAverage(); RunningAverage microInnerDOA2 = new GLRunningAverage(); int numUsersRecommendedFor = 0; int numUsersWithRecommendations = 0; long start = System.currentTimeMillis(); // Build recommender Recommender recommender = recommenderBuilder.buildRecommender(trainDataModel); // Futures ArrayList<Future<Integer>> futureList = new ArrayList<Future<Integer>>(); int N_CPUS = Runtime.getRuntime().availableProcessors(); ExecutorService pool = Executors.newFixedThreadPool(N_CPUS - 1); // ExecutorService pool = Executors.newFixedThreadPool(1); LongPrimitiveIterator it_user = testDataModel.getUserIDs(); while (it_user.hasNext()) { long userID = it_user.nextLong(); Future<Integer> future = pool.submit(new Eval(precision, rPrecision, recall, fallOut, nDCG, rNDCG, macroDOA, microDOA1, microDOA2, macroInnerDOA, microInnerDOA1, microInnerDOA2, trainDataModel, testDataModel, userID, recommender, at, rescorer, numItems)); futureList.add(future); } for (Future<Integer> future : futureList) { numUsersRecommendedFor++; try { if (future.get() == 1) { numUsersWithRecommendations++; } } catch (InterruptedException e) { e.printStackTrace(); } catch (ExecutionException e) { e.printStackTrace(); e.getCause().printStackTrace(); System.exit(1); } } pool.shutdown(); long end = System.currentTimeMillis(); CrossValidationPrecision.addDatum(precision.getAverage()); CrossValidationRPrecision.addDatum(rPrecision.getAverage()); CrossValidationRecall.addDatum(recall.getAverage()); CrossValidationFallOut.addDatum(fallOut.getAverage()); CrossValidationNDCG.addDatum(nDCG.getAverage()); CrossValidationRNDCG.addDatum(rNDCG.getAverage()); CrossValidationReach.addDatum((double) numUsersWithRecommendations / (double) numUsersRecommendedFor); CrossValidationMacroDOA.addDatum(macroDOA.getAverage()); CrossValidationMicroDOA.addDatum(microDOA1.getAverage() / microDOA2.getAverage()); CrossValidationMacroInnerDOA.addDatum(macroInnerDOA.getAverage()); CrossValidationMicroInnerDOA.addDatum(microInnerDOA1.getAverage() / microInnerDOA2.getAverage()); log.info("Evaluated with training/testing set # {} in {}ms", i_folds, end - start); System.out.printf("Evaluated with training/testing set # %d in %d ms \n", i_folds, end - start); log.info( "Precision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA/macroInnerDOA/microInnerDOA: {} / {} / {} / {} / {} / {} / {} / {} / {} / {} / {}", precision.getAverage(), rPrecision.getAverage(), recall.getAverage(), fallOut.getAverage(), nDCG.getAverage(), rNDCG.getAverage(), (double) numUsersWithRecommendations / (double) numUsersRecommendedFor, macroDOA.getAverage(), microDOA1.getAverage() / microDOA2.getAverage(), macroInnerDOA.getAverage(), microInnerDOA1.getAverage() / microInnerDOA2.getAverage()); System.out.printf( "Precision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA/macroInnerDOA/microInnerDOA: %f / %f / %f / %f / %f / %f / %f / %f / %f / %f / %f \n", precision.getAverage(), rPrecision.getAverage(), recall.getAverage(), fallOut.getAverage(), nDCG.getAverage(), rNDCG.getAverage(), (double) numUsersWithRecommendations / (double) numUsersRecommendedFor, macroDOA.getAverage(), microDOA1.getAverage() / microDOA2.getAverage(), macroInnerDOA.getAverage(), microInnerDOA1.getAverage() / microInnerDOA2.getAverage()); } log.info( "Cross Validation Precision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA: {} / {} / {} / {} / {} / {} / {} / {} / {} / {} / {}", CrossValidationPrecision.getAverage(), CrossValidationRPrecision.getAverage(), CrossValidationRecall.getAverage(), CrossValidationFallOut.getAverage(), CrossValidationNDCG.getAverage(), CrossValidationRNDCG.getAverage(), CrossValidationReach.getAverage(), CrossValidationMacroDOA.getAverage(), CrossValidationMicroDOA.getAverage(), CrossValidationMacroInnerDOA.getAverage(), CrossValidationMicroInnerDOA.getAverage()); System.out.printf( "Cross Validation: \nPrecision/R-Precision/recall/fall-out/nDCG/rNDCG/reach/macroDOA/microDOA: %f / %f / %f / %f / %f / %f / %f / %f / %f / %f / %f\n", CrossValidationPrecision.getAverage(), CrossValidationRPrecision.getAverage(), CrossValidationRecall.getAverage(), CrossValidationFallOut.getAverage(), CrossValidationNDCG.getAverage(), CrossValidationRNDCG.getAverage(), CrossValidationReach.getAverage(), CrossValidationMacroDOA.getAverage(), CrossValidationMicroDOA.getAverage(), CrossValidationMacroInnerDOA.getAverage(), CrossValidationMicroInnerDOA.getAverage()); return new GLIRStatisticsImpl(CrossValidationPrecision.getAverage(), CrossValidationRPrecision.getAverage(), CrossValidationRecall.getAverage(), CrossValidationFallOut.getAverage(), CrossValidationNDCG.getAverage(), CrossValidationRNDCG.getAverage(), CrossValidationReach.getAverage(), CrossValidationMacroDOA.getAverage(), CrossValidationMicroDOA.getAverage(), CrossValidationMacroInnerDOA.getAverage(), CrossValidationMicroInnerDOA.getAverage()); }
From source file:com.skp.experiment.cf.als.hadoop.ParallelALSFactorizationJobTest.java
License:Apache License
/** * small integration test that runs the full job * * <pre>/*from www .jav a 2 s .co m*/ * * user-item-matrix * * burger hotdog berries icecream * dog 5 5 2 - * rabbit 2 - 3 5 * cow - 5 - 3 * donkey 3 - - 5 * * </pre> */ @Test public void completeJobToyExample() throws Exception { Double na = Double.NaN; Matrix preferences = new SparseRowMatrix(4, 4, new Vector[] { new DenseVector(new double[] { 5.0, 5.0, 2.0, na }), new DenseVector(new double[] { 2.0, na, 3.0, 5.0 }), new DenseVector(new double[] { na, 5.0, na, 3.0 }), new DenseVector(new double[] { 3.0, na, na, 5.0 }) }); writeLines(inputFile, preferencesAsText(preferences)); indexSizeFile.deleteOnExit(); writeLines(indexSizeFile, "0,4\n1,4"); ParallelALSFactorizationJob alsFactorization = new ParallelALSFactorizationJob(); alsFactorization.setConf(conf); int numFeatures = 3; int numIterations = 5; double lambda = 0.065; alsFactorization .run(new String[] { "--input", inputFile.getAbsolutePath(), "--output", outputDir.getAbsolutePath(), "--tempDir", tmpDir.getAbsolutePath(), "--lambda", String.valueOf(lambda), "--numFeatures", String.valueOf(numFeatures), "--numIterations", String.valueOf(numIterations), "--indexSizes", indexSizeFile.toString(), "--useTransform", "false" }); Matrix u = MathHelper.readMatrix(conf, new Path(outputDir.getAbsolutePath(), "U/part-m-00000"), preferences.numRows(), numFeatures); Matrix m = MathHelper.readMatrix(conf, new Path(outputDir.getAbsolutePath(), "M/part-m-00000"), preferences.numCols(), numFeatures); StringBuilder info = new StringBuilder(); info.append("\nA - users x items\n\n"); info.append(MathHelper.nice(preferences)); info.append("\nU - users x features\n\n"); info.append(MathHelper.nice(u)); info.append("\nM - items x features\n\n"); info.append(MathHelper.nice(m)); Matrix Ak = u.times(m.transpose()); info.append("\nAk - users x items\n\n"); info.append(MathHelper.nice(Ak)); info.append('\n'); log.info(info.toString()); RunningAverage avg = new FullRunningAverage(); Iterator<MatrixSlice> sliceIterator = preferences.iterateAll(); while (sliceIterator.hasNext()) { MatrixSlice slice = sliceIterator.next(); Iterator<Vector.Element> elementIterator = slice.vector().iterateNonZero(); while (elementIterator.hasNext()) { Vector.Element e = elementIterator.next(); if (!Double.isNaN(e.get())) { double pref = e.get(); double estimate = u.viewRow(slice.index()).dot(m.viewRow(e.index())); double err = pref - estimate; avg.addDatum(err * err); log.info("Comparing preference of user [{}] towards item [{}], was [{}] estimate is [{}]", new Object[] { slice.index(), e.index(), pref, estimate }); } } } double rmse = Math.sqrt(avg.getAverage()); log.info("RMSE: {}", rmse); assertTrue(rmse < 0.2); }
From source file:com.skp.experiment.cf.als.hadoop.ParallelALSFactorizationJobTest.java
License:Apache License
@Test public void completeJobImplicitToyExample() throws Exception { Matrix observations = new SparseRowMatrix(4, 4, new Vector[] { new DenseVector(new double[] { 5.0, 5.0, 2.0, 0 }), new DenseVector(new double[] { 2.0, 0, 3.0, 5.0 }), new DenseVector(new double[] { 0, 5.0, 0, 3.0 }), new DenseVector(new double[] { 3.0, 0, 0, 5.0 }) }); Matrix preferences = new SparseRowMatrix(4, 4, new Vector[] { new DenseVector(new double[] { 1.0, 1.0, 1.0, 0 }), new DenseVector(new double[] { 1.0, 0, 1.0, 1.0 }), new DenseVector(new double[] { 0, 1.0, 0, 1.0 }), new DenseVector(new double[] { 1.0, 0, 0, 1.0 }) }); writeLines(inputFile, preferencesAsText(observations)); writeLines(indexSizeFile, "0,4\n1,4"); ParallelALSFactorizationJob alsFactorization = new ParallelALSFactorizationJob(); alsFactorization.setConf(conf);/*w w w. ja v a2 s . c o m*/ int numFeatures = 3; int numIterations = 5; double lambda = 0.065; double alpha = 20; alsFactorization.run(new String[] { "--input", inputFile.getAbsolutePath(), "--output", outputDir.getAbsolutePath(), "--tempDir", tmpDir.getAbsolutePath(), "--lambda", String.valueOf(lambda), "--implicitFeedback", String.valueOf(true), "--alpha", String.valueOf(alpha), "--numFeatures", String.valueOf(numFeatures), "--numIterations", String.valueOf(numIterations), "--indexSizes", indexSizeFile.toString(), "--useTransform", "false" }); Matrix u = MathHelper.readMatrix(conf, new Path(outputDir.getAbsolutePath(), "U/part-m-00000"), observations.numRows(), numFeatures); Matrix m = MathHelper.readMatrix(conf, new Path(outputDir.getAbsolutePath(), "M/part-m-00000"), observations.numCols(), numFeatures); StringBuilder info = new StringBuilder(); info.append("\nObservations - users x items\n"); info.append(MathHelper.nice(observations)); info.append("\nA - users x items\n\n"); info.append(MathHelper.nice(preferences)); info.append("\nU - users x features\n\n"); info.append(MathHelper.nice(u)); info.append("\nM - items x features\n\n"); info.append(MathHelper.nice(m)); Matrix Ak = u.times(m.transpose()); info.append("\nAk - users x items\n\n"); info.append(MathHelper.nice(Ak)); info.append('\n'); log.info(info.toString()); RunningAverage avg = new FullRunningAverage(); Iterator<MatrixSlice> sliceIterator = preferences.iterateAll(); while (sliceIterator.hasNext()) { MatrixSlice slice = sliceIterator.next(); for (Vector.Element e : slice.vector()) { if (!Double.isNaN(e.get())) { double pref = e.get(); double estimate = u.viewRow(slice.index()).dot(m.viewRow(e.index())); double confidence = 1 + alpha * observations.getQuick(slice.index(), e.index()); double err = confidence * (pref - estimate) * (pref - estimate); avg.addDatum(err); log.info( "Comparing preference of user [{}] towards item [{}], was [{}] with confidence [{}] " + "estimate is [{}]", new Object[] { slice.index(), e.index(), pref, confidence, estimate }); } } } double rmse = Math.sqrt(avg.getAverage()); log.info("RMSE: {}", rmse); assertTrue(rmse < 0.4); }
From source file:de.tuberlin.dima.recsys.ssnmm.interactioncut.BiasedItemBasedRecommender.java
License:Apache License
public BiasedItemBasedRecommender(DataModel dataModel, ItemSimilarity similarity, int k, double lambda2, double lambda3) throws TasteException { super(dataModel, similarity); this.k = k;// w w w . j ava 2 s. c o m this.similarity = similarity; RunningAverage averageRating = new FullRunningAverage(); LongPrimitiveIterator itemIDs = getDataModel().getItemIDs(); while (itemIDs.hasNext()) { for (Preference pref : getDataModel().getPreferencesForItem(itemIDs.next())) { averageRating.addDatum(pref.getValue()); } } mu = averageRating.getAverage(); itemBiases = new OpenLongDoubleHashMap(getDataModel().getNumItems()); userBiases = new OpenLongDoubleHashMap(getDataModel().getNumUsers()); itemIDs = getDataModel().getItemIDs(); while (itemIDs.hasNext()) { long itemID = itemIDs.nextLong(); PreferenceArray preferences = getDataModel().getPreferencesForItem(itemID); double sum = 0; for (Preference pref : preferences) { sum += pref.getValue() - mu; } double bi = sum / (lambda2 + preferences.length()); itemBiases.put(itemID, bi); } LongPrimitiveIterator userIDs = getDataModel().getUserIDs(); while (userIDs.hasNext()) { long userID = userIDs.nextLong(); PreferenceArray preferences = getDataModel().getPreferencesFromUser(userID); double sum = 0; for (Preference pref : preferences) { sum += pref.getValue() - mu - itemBiases.get(pref.getItemID()); } double bu = sum / (lambda3 + preferences.length()); userBiases.put(userID, bu); } }
From source file:de.tuberlin.dima.recsys.ssnmm.ratingprediction.AverageRating.java
License:Apache License
public static void main(String[] args) { File dir = new File("/home/ssc/Entwicklung/datasets/yahoo-songs/"); File[] trainingFiles = dir.listFiles(new FilenameFilter() { @Override/* w ww . j a v a 2 s . c om*/ public boolean accept(File dir, String name) { return name.startsWith("train_"); } }); Pattern sep = Pattern.compile("\t"); RunningAverage avg = new FullRunningAverage(); int ratingsProcessed = 0; for (File trainingFile : trainingFiles) { for (String line : Utils.readLines(trainingFile)) { int rating = Integer.parseInt(sep.split(line)[2]); avg.addDatum(rating); if (++ratingsProcessed % 10000000 == 0) { System.out.println(ratingsProcessed + " ratings processed"); } } } System.out.println("average rating " + avg.getAverage()); }
From source file:de.tuberlin.dima.recsys.ssnmm.ratingprediction.Evaluate.java
License:Apache License
public static void main(String[] args) throws IOException { int numUsers = 1823179; int numItems = 136736; double mu = 3.157255412010664; String distributedSimilarityMatrixPath = "/home/ssc/Desktop/yahoo/similarityMatrix/"; String itemBiasesFilePath = "/home/ssc/Desktop/yahoo/itemBiases.tsv"; String userBiasesFilePath = "/home/ssc/Desktop/yahoo/userBiases.tsv"; String trainingSetPath = "/home/ssc/Entwicklung/datasets/yahoo-songs/songs.tsv"; String holdoutSetPath = "home/ssc/Entwicklung/datasets/yahoo-songs/holdout.tsv"; Matrix similarities = new SparseRowMatrix(numItems, numItems); System.out.println("Reading similarities..."); int similaritiesRead = 0; Configuration conf = new Configuration(); for (Pair<IntWritable, VectorWritable> pair : new SequenceFileDirIterable<IntWritable, VectorWritable>( new Path(distributedSimilarityMatrixPath), PathType.LIST, PathFilters.partFilter(), conf)) { int item = pair.getFirst().get(); Iterator<Vector.Element> elements = pair.getSecond().get().iterateNonZero(); while (elements.hasNext()) { Vector.Element elem = elements.next(); similarities.setQuick(item, elem.index(), elem.get()); similaritiesRead++;/*from w w w . j ava 2 s. c o m*/ } } System.out.println("Found " + similaritiesRead + " similarities"); Pattern sep = Pattern.compile("\t"); double[] itemBiases = new double[numItems]; double[] userBiases = new double[numUsers]; System.out.println("Reading item biases"); for (String line : new FileLineIterable(new File(itemBiasesFilePath))) { String[] parts = sep.split(line); itemBiases[Integer.parseInt(parts[0])] = Double.parseDouble(parts[1]); } System.out.println("Reading user biases"); for (String line : new FileLineIterable(new File(userBiasesFilePath))) { String[] parts = sep.split(line); userBiases[Integer.parseInt(parts[0])] = Double.parseDouble(parts[1]); } Iterator<Rating> trainRatings = new RatingsIterable(new File(trainingSetPath)).iterator(); Iterator<Rating> heldOutRatings = new RatingsIterable(new File(holdoutSetPath)).iterator(); int currentUser = 0; OpenIntDoubleHashMap prefs = new OpenIntDoubleHashMap(); int usersProcessed = 0; RunningAverage rmse = new FullRunningAverage(); RunningAverage mae = new FullRunningAverage(); RunningAverage rmseBase = new FullRunningAverage(); RunningAverage maeBase = new FullRunningAverage(); while (trainRatings.hasNext()) { Rating rating = trainRatings.next(); if (rating.user() != currentUser) { for (int n = 0; n < 10; n++) { Rating heldOutRating = heldOutRatings.next(); Preconditions.checkState(heldOutRating.user() == currentUser); double preference = 0.0; double totalSimilarity = 0.0; int count = 0; Iterator<Vector.Element> similarItems = similarities.viewRow(heldOutRating.item()) .iterateNonZero(); while (similarItems.hasNext()) { Vector.Element similarity = similarItems.next(); int similarItem = similarity.index(); if (prefs.containsKey(similarItem)) { preference += similarity.get() * (prefs.get(similarItem) - (mu + userBiases[currentUser] + itemBiases[similarItem])); totalSimilarity += Math.abs(similarity.get()); count++; } } double baselineEstimate = mu + userBiases[currentUser] + itemBiases[heldOutRating.item()]; double estimate = baselineEstimate; if (count > 1) { estimate += preference / totalSimilarity; } double baseError = Math.abs(heldOutRating.rating() - baselineEstimate); maeBase.addDatum(baseError); rmseBase.addDatum(baseError * baseError); double error = Math.abs(heldOutRating.rating() - estimate); mae.addDatum(error); rmse.addDatum(error * error); } if (++usersProcessed % 10000 == 0) { System.out.println(usersProcessed + " users processed, MAE " + mae.getAverage() + ", RMSE " + Math.sqrt(rmse.getAverage()) + " | baseline MAE " + maeBase.getAverage() + ", baseline RMSE " + Math.sqrt(rmseBase.getAverage())); } currentUser = rating.user(); prefs.clear(); } prefs.put(rating.item(), rating.rating()); } System.out.println(usersProcessed + " users processed, MAE " + mae.getAverage() + ", RMSE " + Math.sqrt(rmse.getAverage()) + " | baseline MAE " + maeBase.getAverage() + ", baseline RMSE " + Math.sqrt(rmseBase.getAverage())); }
From source file:de.tuberlin.dima.recsys.ssnmm.ratingprediction.UserItemBaseline.java
License:Apache License
void test() throws IOException { RunningAverage rmse = new FullRunningAverage(); RunningAverage mae = new FullRunningAverage(); System.out.println("Calculating predictions"); for (Rating rating : new RatingsIterable(tests)) { double error = Math.abs(rating.rating() - baselineEstimate(rating.user(), rating.item())); mae.addDatum(error);/*from w ww . j a va 2 s . c o m*/ rmse.addDatum(error * error); } System.out.println("MAE " + mae.getAverage() + ", RMSE: " + Math.sqrt(rmse.getAverage())); }
From source file:net.myrrix.online.candidate.LocationSensitiveHashTest.java
License:Apache License
@Test public void testLSH() { System.setProperty("model.lsh.sampleRatio", "0.1"); System.setProperty("model.lsh.numHashes", "20"); RandomGenerator random = RandomManager.getRandom(); RunningAverage avgPercentTopRecsConsidered = new FullRunningAverage(); RunningAverage avgNDCG = new FullRunningAverage(); RunningAverage avgPercentAllItemsConsidered = new FullRunningAverage(); for (int iteration = 0; iteration < ITERATIONS; iteration++) { FastByIDMap<float[]> Y = new FastByIDMap<float[]>(); for (int i = 0; i < NUM_ITEMS; i++) { Y.put(i, RandomUtils.randomUnitVector(NUM_FEATURES, random)); }// w ww.j ava2s.c o m float[] userVec = RandomUtils.randomUnitVector(NUM_FEATURES, random); double[] results = doTestRandomVecs(Y, userVec); double percentTopRecsConsidered = results[0]; double ndcg = results[1]; double percentAllItemsConsidered = results[2]; log.info("Considered {}% of all candidates, {} nDCG, got {}% recommendations correct", 100 * percentAllItemsConsidered, ndcg, 100 * percentTopRecsConsidered); avgPercentTopRecsConsidered.addDatum(percentTopRecsConsidered); avgNDCG.addDatum(ndcg); avgPercentAllItemsConsidered.addDatum(percentAllItemsConsidered); } log.info(avgPercentTopRecsConsidered.toString()); log.info(avgNDCG.toString()); log.info(avgPercentAllItemsConsidered.toString()); assertTrue(avgPercentTopRecsConsidered.getAverage() > 0.55); assertTrue(avgNDCG.getAverage() > 0.55); assertTrue(avgPercentAllItemsConsidered.getAverage() < 0.075); }