org.biocaddie.citationanalysis.metrics.JavaPageRankInt.java Source code

Java tutorial

Introduction

Here is the source code for org.biocaddie.citationanalysis.metrics.JavaPageRankInt.java

Source

package org.biocaddie.citationanalysis.metrics;
/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

import scala.Tuple2;

import com.google.common.collect.Iterables;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import org.apache.spark.api.java.function.PairFunction;
import org.rcsb.spark.util.SparkUtils;

import java.util.ArrayList;
import java.util.List;
import java.util.Iterator;
import java.util.regex.Pattern;

/**
 * Computes the PageRank of URLs from an input file. Input file should
 * be in format of:
 * URL         neighbor URL
 * URL         neighbor URL
 * URL         neighbor URL
 * ...
 * where URL and their neighbors are separated by space(s).
 *
 * This is an example implementation for learning how to use Spark. For more conventional use,
 * please refer to org.apache.spark.graphx.lib.PageRank
 * 
 * https://www.codatlas.com/github.com/apache/spark/HEAD/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java?line=1
 */
public final class JavaPageRankInt {
    private static final Pattern SPACES = Pattern.compile("\\s+");

    private static class Sum implements Function2<Double, Double, Double> {
        @Override
        public Double call(Double a, Double b) {
            return a + b;
        }
    }

    public static void main(String[] args) throws Exception {
        if (args.length < 2) {
            System.err.println("Usage: JavaPageRank <file> <number_of_iterations>");
            System.exit(1);
        }

        double alpha = 0.5;
        JavaSparkContext ctx = SparkUtils.getJavaSparkContext("JavaPageRank");

        // Loads in input file. It should be in format of:
        //     URL         neighbor URL
        //     URL         neighbor URL
        //     URL         neighbor URL
        //     ...
        //    JavaRDD<String> lines = ctx.textFile(args[0], 1);
        JavaRDD<String> lines = ctx.textFile(args[0]);

        // Loads all URLs from input file and initialize their neighbors.
        JavaPairRDD<Integer, Iterable<Integer>> links = lines
                .mapToPair(new PairFunction<String, Integer, Integer>() {
                    @Override
                    public Tuple2<Integer, Integer> call(String s) {
                        String[] parts = SPACES.split(s);
                        return new Tuple2<Integer, Integer>(Integer.parseInt(parts[0]), Integer.parseInt(parts[1]));
                    }
                }).distinct().groupByKey().cache();

        // Loads all URLs with other URL(s) link to from input file and initialize ranks of them to one.

        JavaPairRDD<Integer, Double> ranks = links.mapValues(new Function<Iterable<Integer>, Double>() {
            @Override
            public Double call(Iterable<Integer> rs) {
                return 1.0;
            }
        });

        // Calculates and updates URL ranks continuously using PageRank algorithm.
        for (int current = 0; current < Integer.parseInt(args[2]); current++) {
            // Calculates URL contributions to the rank of other URLs.
            JavaPairRDD<Integer, Double> contribs = links.join(ranks).values()
                    .flatMapToPair(new PairFlatMapFunction<Tuple2<Iterable<Integer>, Double>, Integer, Double>() {
                        @Override
                        public Iterable<Tuple2<Integer, Double>> call(Tuple2<Iterable<Integer>, Double> s) {
                            int urlCount = Iterables.size(s._1);
                            List<Tuple2<Integer, Double>> results = new ArrayList<Tuple2<Integer, Double>>();
                            for (Integer n : s._1) {
                                results.add(new Tuple2<Integer, Double>(n, s._2() / urlCount));
                            }
                            return results;
                        }
                    });

            // Re-calculates URL ranks based on neighbor contributions.
            ranks = contribs.reduceByKey(new Sum()).mapValues(new Function<Double, Double>() {
                @Override
                public Double call(Double sum) {
                    return alpha + sum * 1 - alpha;
                    //         return 0.15 + sum * 0.85;
                }
            });
        }

        JavaRDD<String> idLines = ctx.textFile(args[1]);
        JavaPairRDD<Integer, Integer> pmIds = idLines.mapToPair(new PairFunction<String, Integer, Integer>() {
            @Override
            public Tuple2<Integer, Integer> call(String s) {
                String[] parts = s.split(",");
                return new Tuple2<Integer, Integer>(Integer.parseInt(parts[0]), Integer.parseInt(parts[1]));
            }
        });

        ranks = ranks.filter(t -> t._2 > 0.8);

        JavaPairRDD<Integer, Tuple2<Double, Integer>> join = ranks.join(pmIds);

        List<Tuple2<Integer, Tuple2<Double, Integer>>> collect = join.collect();
        for (Tuple2<Integer, Tuple2<Double, Integer>> t : collect) {
            System.out.println(t._1 + "," + t._2._2 + "," + t._2._1);
        }

        // Collects all URL ranks and dump them to console.
        //    List<Tuple2<Integer, Double>> output = ranks.collect();
        //    for (Tuple2<?,?> tuple : output) {
        //        System.out.println(tuple._1() + " has rank: " + tuple._2() + ".");
        //    }

        ctx.stop();
    }
}