Example usage for java.util ArrayList size

List of usage examples for java.util ArrayList size

Introduction

In this page you can find the example usage for java.util ArrayList size.

Prototype

int size

To view the source code for java.util ArrayList size.

Click Source Link

Document

The size of the ArrayList (the number of elements it contains).

Usage

From source file:gedi.lfc.quick.ShiroguchiCounter.java

public static void main(String[] args) throws IOException {

    String path = "/home/users/erhard/biostor/seq/ngade/shiroguchi_randombarcodes/data/";

    MemoryIntervalTreeStorage<int[]> reads = new MemoryIntervalTreeStorage<int[]>(int[].class);

    String[] files = { "Shiroguchi_A_collapsed.bed", "Shiroguchi_B_collapsed.bed",
            "Shiroguchi_A_uncollapsed.bed", "Shiroguchi_B_uncollapsed.bed" };
    for (int i = 0; i < 4; i++) {
        Iterator<String> it = new LineOrientedFile(path + files[i]).lineIterator();
        while (it.hasNext()) {
            String[] f = StringUtils.split(it.next(), '\t');
            Chromosome chr = Chromosome.obtain(f[0]);
            ArrayGenomicRegion region = new ArrayGenomicRegion(Integer.parseInt(f[1]), Integer.parseInt(f[2]));
            int c = Integer.parseInt(StringUtils.splitField(f[3], '|', 0));

            int[] counts = reads.getData(chr, region);
            if (counts == null)
                reads.add(chr, region, counts = new int[4]);

            counts[i] += c;/* ww w  .  j av  a  2 s  . co m*/
        }
    }

    HashMap<String, String> map = new HashMap<String, String>();
    new LineOrientedFile(path + "U00096.2.genes.csv").lineIterator().forEachRemaining(s -> {
        String[] f = StringUtils.split(s, '\t');
        map.put(f[0], f[7]);
    });

    LineOrientedFile fragments = new LineOrientedFile("fragments.csv");
    fragments.startWriting();
    fragments.writef("Gene\tonlyA\tonlyB\tBoth\tLength\n");

    LineOrientedFile bias = new LineOrientedFile("bias.csv");
    bias.startWriting();
    bias.writef("OriginalA\tBiasA\tOriginalB\tBiasB\n");

    IntArrayList biasFactors = new IntArrayList();
    ArrayList<GeneData> geneData = new ArrayList<GeneData>();

    MemoryIntervalTreeStorage<Transcript> genes = new BiomartExonFileReader(path + "U00096.2.exons.csv", false)
            .readIntoMemoryTakeFirst();
    for (ImmutableReferenceGenomicRegion<Transcript> g : genes.getReferenceGenomicRegions()) {
        ArrayList<ImmutableReferenceGenomicRegion<int[]>> frag = reads
                .getReferenceRegionsIntersecting(g.getReference().toStrandIndependent(), g.getRegion());

        GeneData gd = new GeneData();

        int l = g.getRegion().getTotalLength();
        for (ImmutableReferenceGenomicRegion<int[]> r : frag) {
            if (r.getData()[0] == 0)
                gd.onlyB++;
            if (r.getData()[1] == 0)
                gd.onlyA++;
            if (r.getData()[0] == 0 && r.getData()[1] == 0)
                throw new RuntimeException();

            bias.writef("%d\t%.0f\t%d\t%.0f\n", r.getData()[0], r.getData()[2] / (double) r.getData()[0],
                    r.getData()[1], r.getData()[3] / (double) r.getData()[1]);
            if (r.getData()[0] > 0) {
                biasFactors.add(r.getData()[2] / r.getData()[0]);
            }
            if (r.getData()[1] > 0) {
                biasFactors.add(r.getData()[3] / r.getData()[1]);
            }

        }
        gd.both = frag.size() - gd.onlyA - gd.onlyB;
        fragments.writef("%s\t%d\t%d\t%d\t%d\n", map.get(g.getData().getTranscriptId()), gd.onlyA, gd.onlyB,
                gd.both, l);

        if (gd.onlyA + gd.onlyB + gd.both > 0)
            geneData.add(gd);
    }

    fragments.finishWriting();
    bias.finishWriting();

    double fc = 1.4;
    int rep = 5;
    int nDiff = 1000;
    int n = 10000;
    int N = 6000;
    double noise = 0.05;

    LineOrientedFile countMatrix = new LineOrientedFile("countMatrix.csv");
    countMatrix.startWriting();

    LineOrientedFile downCountMatrix = new LineOrientedFile("countMatrix_downsampled.csv");
    downCountMatrix.startWriting();

    RandomNumbers rnd = new RandomNumbers();
    for (int i = 0; i < n; i++) {
        GeneData gd = geneData.get(rnd.getUnif(0, geneData.size()));

        //         int N = gd.both==0?Integer.MAX_VALUE/2:(int) (gd.onlyA+gd.onlyB+gd.both+gd.onlyA*gd.onlyB/gd.both);
        double p1 = (gd.onlyA + gd.both) / (double) N;
        double p2 = i < nDiff ? p1 / fc : p1;

        ArrayList<ReadData> list = new ArrayList<ReadData>();
        for (int r = 0; r < rep * 2; r++) {
            int k = rnd.getBinom(N, r < rep ? p1 : p2) + 1;
            int hit = N == -1 ? 0 : rnd.getBinom(k, list.size() / (double) N);
            rnd.shuffle(list);
            for (int x = 0; x < hit; x++)
                list.get(x).reads[r] = (int) rnd.getNormal(list.get(x).bias, list.get(x).bias * noise);
            for (int x = 0; x < k - hit; x++)
                list.add(new ReadData(biasFactors.getInt(rnd.getUnif(0, biasFactors.size())), rep * 2, r));
        }

        int[] c = new int[rep * 2];
        for (ReadData d : list) {
            for (int r = 0; r < c.length; r++) {
                c[r] += d.reads[r];
            }
        }

        double[] down = new double[rep * 2];
        for (ReadData d : list) {
            double max = ArrayUtils.max(d.reads);
            for (int r = 0; r < down.length; r++) {
                down[r] += d.reads[r] / max;
            }
        }

        countMatrix.writeLine(StringUtils.concat("\t", c));
        downCountMatrix.writeLine(StringUtils.concat("\t", down));

    }

    countMatrix.finishWriting();
    downCountMatrix.finishWriting();

}

From source file:fr.inria.edelweiss.kgdqp.core.FedInferrencingCLI.java

public static void main(String args[]) throws ParseException, EngineException, InterruptedException {

    List<String> endpoints = new ArrayList<String>();
    String queryPath = null;/* ww  w .j  ava  2s .c o m*/
    boolean rulesSelection = false;
    File rulesDir = null;
    File ontDir = null;

    Options options = new Options();
    Option helpOpt = new Option("h", "help", false, "print this message");
    Option queryOpt = new Option("q", "query", true, "specify the sparql query file");
    Option endpointOpt = new Option("e", "endpoint", true, "a federated sparql endpoint URL");
    Option versionOpt = new Option("v", "version", false, "print the version information and exit");
    Option rulesOpt = new Option("r", "rulesDir", true, "directory containing the inference rules");
    Option ontOpt = new Option("o", "ontologiesDir", true,
            "directory containing the ontologies for rules selection");
    //        Option selOpt = new Option("s", "rulesSelection", false, "if set to true, only the applicable rules are run");
    options.addOption(queryOpt);
    options.addOption(endpointOpt);
    options.addOption(helpOpt);
    options.addOption(versionOpt);
    options.addOption(rulesOpt);
    options.addOption(ontOpt);
    //        options.addOption(selOpt);

    String header = "Corese/KGRAM distributed rule engine command line interface";
    String footer = "\nPlease report any issue to alban.gaignard@cnrs.fr, olivier.corby@inria.fr";

    CommandLineParser parser = new BasicParser();
    CommandLine cmd = parser.parse(options, args);
    if (cmd.hasOption("h")) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("kgdqp", header, options, footer, true);
        System.exit(0);
    }
    if (!cmd.hasOption("e")) {
        logger.info("You must specify at least the URL of one sparql endpoint !");
        System.exit(0);
    } else {
        endpoints = new ArrayList<String>(Arrays.asList(cmd.getOptionValues("e")));
    }
    if (cmd.hasOption("o")) {
        rulesSelection = true;
        String ontDirPath = cmd.getOptionValue("o");
        ontDir = new File(ontDirPath);
        if (!ontDir.isDirectory()) {
            logger.warn(ontDirPath + " is not a valid directory path.");
            System.exit(0);
        }
    }
    if (!cmd.hasOption("r")) {
        logger.info("You must specify a path for inference rules directory !");
        System.exit(0);
    } else if (rulesSelection) {

    }

    if (cmd.hasOption("v")) {
        logger.info("version 3.0.4-SNAPSHOT");
        System.exit(0);
    }

    String rulesDirPath = cmd.getOptionValue("r");
    rulesDir = new File(rulesDirPath);
    if (!rulesDir.isDirectory()) {
        logger.warn(rulesDirPath + " is not a valid directory path.");
        System.exit(0);
    }

    /////////////////
    Graph graph = Graph.create();
    QueryProcessDQP execDQP = QueryProcessDQP.create(graph);
    for (String url : endpoints) {
        try {
            execDQP.addRemote(new URL(url), WSImplem.REST);
        } catch (MalformedURLException ex) {
            logger.error(url + " is not a well-formed URL");
            System.exit(1);
        }
    }

    // Local rules graph initialization
    Graph rulesG = Graph.create();
    Load ld = Load.create(rulesG);

    if (rulesSelection) {
        // Ontology loading
        if (ontDir.isDirectory()) {
            for (File o : ontDir.listFiles()) {
                logger.info("Loading " + o.getAbsolutePath());
                ld.load(o.getAbsolutePath());
            }
        }
    }

    // Rules loading
    if (rulesDir.isDirectory()) {
        for (File r : rulesDir.listFiles()) {
            logger.info("Loading " + r.getAbsolutePath());
            ld.load(r.getAbsolutePath());
        }
    }

    // Rule engine initialization
    RuleEngine ruleEngine = RuleEngine.create(graph);
    ruleEngine.set(execDQP);

    StopWatch sw = new StopWatch();
    logger.info("Federated graph size : " + graph.size());
    logger.info("Rules graph size : " + rulesG.size());

    // Rule selection
    logger.info("Rules selection");
    QueryProcess localKgram = QueryProcess.create(rulesG);
    ArrayList<String> applicableRules = new ArrayList<String>();
    sw.start();
    String rulesSelQuery = "";
    if (rulesSelection) {
        rulesSelQuery = pertinentRulesQuery;
    } else {
        rulesSelQuery = allRulesQuery;
    }
    Mappings maps = localKgram.query(rulesSelQuery);
    logger.info("Rules selected in " + sw.getTime() + " ms");
    logger.info("Applicable rules : " + maps.size());

    // Selected rule loading
    for (Mapping map : maps) {
        IDatatype dt = (IDatatype) map.getValue("?res");
        String rule = dt.getLabel();
        //loading rule in the rule engine
        //            logger.info("Adding rule : " + rule);
        applicableRules.add(rule);
        ruleEngine.addRule(rule);
    }

    // Rules application on distributed sparql endpoints
    logger.info("Rules application (" + applicableRules.size() + " rules)");
    ExecutorService threadPool = Executors.newCachedThreadPool();
    RuleEngineThread ruleThread = new RuleEngineThread(ruleEngine);
    sw.reset();
    sw.start();

    //        ruleEngine.process();
    threadPool.execute(ruleThread);
    threadPool.shutdown();

    //monitoring loop
    while (!threadPool.isTerminated()) {
        System.out.println("******************************");
        System.out.println(Util.jsonDqpCost(QueryProcessDQP.queryCounter, QueryProcessDQP.queryVolumeCounter,
                QueryProcessDQP.sourceCounter, QueryProcessDQP.sourceVolumeCounter));
        System.out.println("Rule engine running for " + sw.getTime() + " ms");
        System.out.println("Federated graph size : " + graph.size());
        Thread.sleep(10000);
    }

    logger.info("Federated graph size : " + graph.size());
    logger.info(Util.jsonDqpCost(QueryProcessDQP.queryCounter, QueryProcessDQP.queryVolumeCounter,
            QueryProcessDQP.sourceCounter, QueryProcessDQP.sourceVolumeCounter));

    ///////////// Query file processing
    //        StringBuffer fileData = new StringBuffer(1000);
    //        BufferedReader reader = null;
    //        try {
    //            reader = new BufferedReader(new FileReader(queryPath));
    //        } catch (FileNotFoundException ex) {
    //             logger.error("Query file "+queryPath+" not found !");
    //             System.exit(1);
    //        }
    //        char[] buf = new char[1024];
    //        int numRead = 0;
    //        try {
    //            while ((numRead = reader.read(buf)) != -1) {
    //                String readData = String.valueOf(buf, 0, numRead);
    //                fileData.append(readData);
    //                buf = new char[1024];
    //            }
    //            reader.close();
    //        } catch (IOException ex) {
    //           logger.error("Error while reading query file "+queryPath);
    //           System.exit(1);
    //        }
    //
    //        String sparqlQuery = fileData.toString();
    //
    //        Query q = exec.compile(sparqlQuery,null);
    //        System.out.println(q);
    //        
    //        StopWatch sw = new StopWatch();
    //        sw.start();
    //        Mappings map = exec.query(sparqlQuery);
    //        int dqpSize = map.size();
    //        System.out.println("--------");
    //        long time = sw.getTime();
    //        System.out.println(time + " " + dqpSize);
}

From source file:edu.cmu.lti.oaqa.knn4qa.apps.CollectionSplitter.java

public static void main(String[] args) {
    Options options = new Options();

    options.addOption("i", null, true, "Input file");
    options.addOption("o", null, true, "Output file prefix");
    options.addOption("p", null, true, "Comma separated probabilities e.g., 0.1,0.2,0.7.");
    options.addOption("n", null, true, "Comma separated part names, e.g., dev,test,train");

    CommandLineParser parser = new org.apache.commons.cli.GnuParser();

    try {/* www  .  j  a  v a 2  s. c  o m*/
        CommandLine cmd = parser.parse(options, args);

        InputStream input = null;

        if (cmd.hasOption("i")) {
            input = CompressUtils.createInputStream(cmd.getOptionValue("i"));
        } else {
            Usage("Specify Input file");
        }

        ArrayList<Double> probs = new ArrayList<Double>();
        String[] partNames = null;

        if (cmd.hasOption("p")) {
            String parts[] = cmd.getOptionValue("p").split(",");

            try {
                double sum = 0;
                for (String s : parts) {
                    double p = Double.parseDouble(s);
                    if (p <= 0 || p > 1)
                        Usage("All probabilities must be in the range (0,1)");
                    sum += p;
                    probs.add(p);
                }

                if (Math.abs(sum - 1.0) > Float.MIN_NORMAL) {
                    Usage("The sum of probabilities should be equal to 1, but it's: " + sum);
                }
            } catch (NumberFormatException e) {
                Usage("Can't convert some of the probabilities to a floating-point number.");
            }
        } else {
            Usage("Specify part probabilities.");
        }

        if (cmd.hasOption("n")) {
            partNames = cmd.getOptionValue("n").split(",");

            if (partNames.length != probs.size())
                Usage("The number of probabilities is not equal to the number of parts!");
        } else {
            Usage("Specify part names");
        }

        BufferedWriter[] outFiles = new BufferedWriter[partNames.length];

        if (cmd.hasOption("o")) {
            String outPrefix = cmd.getOptionValue("o");

            for (int partId = 0; partId < partNames.length; ++partId) {
                outFiles[partId] = new BufferedWriter(new OutputStreamWriter(
                        CompressUtils.createOutputStream(outPrefix + "_" + partNames[partId] + ".gz")));
            }
        } else {
            Usage("Specify Output file prefix");
        }

        System.out.println("Using probabilities:");
        for (int partId = 0; partId < partNames.length; ++partId)
            System.out.println(partNames[partId] + " : " + probs.get(partId));
        System.out.println("=================================================");

        XmlIterator inpIter = new XmlIterator(input, YahooAnswersReader.DOCUMENT_TAG);

        String oneRec = inpIter.readNext();
        int docNum = 1;
        for (; !oneRec.isEmpty(); ++docNum, oneRec = inpIter.readNext()) {
            double p = Math.random();

            if (docNum % 1000 == 0) {
                System.out.println(String.format("Processed %d documents", docNum));
            }

            BufferedWriter out = null;

            for (int partId = 0; partId < partNames.length; ++partId) {
                double pp = probs.get(partId);
                if (p <= pp || partId + 1 == partNames.length) {
                    out = outFiles[partId];
                    break;
                }
                p -= pp;
            }

            oneRec = oneRec.trim() + System.getProperty("line.separator");
            out.write(oneRec);
        }
        System.out.println(String.format("Processed %d documents", docNum - 1));
        // It's important to close all the streams here!
        for (BufferedWriter f : outFiles)
            f.close();
    } catch (ParseException e) {
        Usage("Cannot parse arguments");
    } catch (Exception e) {
        System.err.println("Terminating due to an exception: " + e);
        System.exit(1);
    }
}

From source file:edu.nyu.vida.data_polygamy.standard_techniques.CorrelationTechniques.java

/**
 * @param args//from w w  w.ja v a 2s.c o m
 * @throws ParseException 
 */
@SuppressWarnings({ "deprecation" })
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {

    Options options = new Options();

    Option forceOption = new Option("f", "force", false,
            "force the computation of the relationship " + "even if files already exist");
    forceOption.setRequired(false);
    options.addOption(forceOption);

    Option g1Option = new Option("g1", "first-group", true, "set first group of datasets");
    g1Option.setRequired(true);
    g1Option.setArgName("FIRST GROUP");
    g1Option.setArgs(Option.UNLIMITED_VALUES);
    options.addOption(g1Option);

    Option g2Option = new Option("g2", "second-group", true, "set second group of datasets");
    g2Option.setRequired(false);
    g2Option.setArgName("SECOND GROUP");
    g2Option.setArgs(Option.UNLIMITED_VALUES);
    options.addOption(g2Option);

    Option machineOption = new Option("m", "machine", true, "machine identifier");
    machineOption.setRequired(true);
    machineOption.setArgName("MACHINE");
    machineOption.setArgs(1);
    options.addOption(machineOption);

    Option nodesOption = new Option("n", "nodes", true, "number of nodes");
    nodesOption.setRequired(true);
    nodesOption.setArgName("NODES");
    nodesOption.setArgs(1);
    options.addOption(nodesOption);

    Option s3Option = new Option("s3", "s3", false, "data on Amazon S3");
    s3Option.setRequired(false);
    options.addOption(s3Option);

    Option awsAccessKeyIdOption = new Option("aws_id", "aws-id", true,
            "aws access key id; " + "this is required if the execution is on aws");
    awsAccessKeyIdOption.setRequired(false);
    awsAccessKeyIdOption.setArgName("AWS-ACCESS-KEY-ID");
    awsAccessKeyIdOption.setArgs(1);
    options.addOption(awsAccessKeyIdOption);

    Option awsSecretAccessKeyOption = new Option("aws_key", "aws-id", true,
            "aws secrect access key; " + "this is required if the execution is on aws");
    awsSecretAccessKeyOption.setRequired(false);
    awsSecretAccessKeyOption.setArgName("AWS-SECRET-ACCESS-KEY");
    awsSecretAccessKeyOption.setArgs(1);
    options.addOption(awsSecretAccessKeyOption);

    Option bucketOption = new Option("b", "s3-bucket", true,
            "bucket on s3; " + "this is required if the execution is on aws");
    bucketOption.setRequired(false);
    bucketOption.setArgName("S3-BUCKET");
    bucketOption.setArgs(1);
    options.addOption(bucketOption);

    Option helpOption = new Option("h", "help", false, "display this message");
    helpOption.setRequired(false);
    options.addOption(helpOption);

    HelpFormatter formatter = new HelpFormatter();
    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;

    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e) {
        formatter.printHelp(
                "hadoop jar data-polygamy.jar "
                        + "edu.nyu.vida.data_polygamy.standard_techniques.CorrelationTechniques",
                options, true);
        System.exit(0);
    }

    if (cmd.hasOption("h")) {
        formatter.printHelp(
                "hadoop jar data-polygamy.jar "
                        + "edu.nyu.vida.data_polygamy.standard_techniques.CorrelationTechniques",
                options, true);
        System.exit(0);
    }

    boolean s3 = cmd.hasOption("s3");
    String s3bucket = "";
    String awsAccessKeyId = "";
    String awsSecretAccessKey = "";

    if (s3) {
        if ((!cmd.hasOption("aws_id")) || (!cmd.hasOption("aws_key")) || (!cmd.hasOption("b"))) {
            System.out.println(
                    "Arguments 'aws_id', 'aws_key', and 'b'" + " are mandatory if execution is on AWS.");
            formatter.printHelp(
                    "hadoop jar data-polygamy.jar "
                            + "edu.nyu.vida.data_polygamy.standard_techniques.CorrelationTechniques",
                    options, true);
            System.exit(0);
        }
        s3bucket = cmd.getOptionValue("b");
        awsAccessKeyId = cmd.getOptionValue("aws_id");
        awsSecretAccessKey = cmd.getOptionValue("aws_key");
    }

    boolean snappyCompression = false;
    boolean bzip2Compression = false;
    String machine = cmd.getOptionValue("m");
    int nbNodes = Integer.parseInt(cmd.getOptionValue("n"));

    Configuration s3conf = new Configuration();
    if (s3) {
        s3conf.set("fs.s3.awsAccessKeyId", awsAccessKeyId);
        s3conf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey);
        s3conf.set("bucket", s3bucket);
    }

    Path path = null;
    FileSystem fs = FileSystem.get(new Configuration());

    ArrayList<String> shortDataset = new ArrayList<String>();
    ArrayList<String> firstGroup = new ArrayList<String>();
    ArrayList<String> secondGroup = new ArrayList<String>();
    HashMap<String, String> datasetAgg = new HashMap<String, String>();

    boolean removeExistingFiles = cmd.hasOption("f");

    String[] firstGroupCmd = cmd.getOptionValues("g1");
    String[] secondGroupCmd = cmd.hasOption("g2") ? cmd.getOptionValues("g2") : new String[0];
    addDatasets(firstGroupCmd, firstGroup, shortDataset, datasetAgg, path, fs, s3conf, s3, s3bucket);
    addDatasets(secondGroupCmd, secondGroup, shortDataset, datasetAgg, path, fs, s3conf, s3, s3bucket);

    if (shortDataset.size() == 0) {
        System.out.println("No datasets to process.");
        System.exit(0);
    }

    if (firstGroup.isEmpty()) {
        System.out.println("First group of datasets (G1) is empty. " + "Doing G1 = G2.");
        firstGroup.addAll(secondGroup);
    }

    if (secondGroup.isEmpty()) {
        System.out.println("Second group of datasets (G2) is empty. " + "Doing G2 = G1.");
        secondGroup.addAll(firstGroup);
    }

    // getting dataset ids

    String datasetNames = "";
    String datasetIds = "";
    HashMap<String, String> datasetId = new HashMap<String, String>();
    Iterator<String> it = shortDataset.iterator();
    while (it.hasNext()) {
        datasetId.put(it.next(), null);
    }

    if (s3) {
        path = new Path(s3bucket + FrameworkUtils.datasetsIndexDir);
        fs = FileSystem.get(path.toUri(), s3conf);
    } else {
        path = new Path(fs.getHomeDirectory() + "/" + FrameworkUtils.datasetsIndexDir);
    }
    BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(path)));
    String line = br.readLine();
    while (line != null) {
        String[] dt = line.split("\t");
        if (datasetId.containsKey(dt[0])) {
            datasetId.put(dt[0], dt[1]);
            datasetNames += dt[0] + ",";
            datasetIds += dt[1] + ",";
        }
        line = br.readLine();
    }
    br.close();
    if (s3)
        fs.close();

    datasetNames = datasetNames.substring(0, datasetNames.length() - 1);
    datasetIds = datasetIds.substring(0, datasetIds.length() - 1);
    it = shortDataset.iterator();
    while (it.hasNext()) {
        String dataset = it.next();
        if (datasetId.get(dataset) == null) {
            System.out.println("No dataset id for " + dataset);
            System.exit(0);
        }
    }

    String firstGroupStr = "";
    String secondGroupStr = "";
    for (String dataset : firstGroup) {
        firstGroupStr += datasetId.get(dataset) + ",";
    }
    for (String dataset : secondGroup) {
        secondGroupStr += datasetId.get(dataset) + ",";
    }
    firstGroupStr = firstGroupStr.substring(0, firstGroupStr.length() - 1);
    secondGroupStr = secondGroupStr.substring(0, secondGroupStr.length() - 1);

    FrameworkUtils.createDir(s3bucket + FrameworkUtils.correlationTechniquesDir, s3conf, s3);

    String dataAttributesInputDirs = "";
    String noRelationship = "";

    HashSet<String> dirs = new HashSet<String>();

    String dataset1;
    String dataset2;
    String datasetId1;
    String datasetId2;
    for (int i = 0; i < firstGroup.size(); i++) {
        for (int j = 0; j < secondGroup.size(); j++) {

            if (Integer.parseInt(datasetId.get(firstGroup.get(i))) < Integer
                    .parseInt(datasetId.get(secondGroup.get(j)))) {
                dataset1 = firstGroup.get(i);
                dataset2 = secondGroup.get(j);
            } else {
                dataset1 = secondGroup.get(j);
                dataset2 = firstGroup.get(i);
            }

            datasetId1 = datasetId.get(dataset1);
            datasetId2 = datasetId.get(dataset2);

            if (dataset1.equals(dataset2))
                continue;
            String correlationOutputFileName = s3bucket + FrameworkUtils.correlationTechniquesDir + "/"
                    + dataset1 + "-" + dataset2 + "/";

            if (removeExistingFiles) {
                FrameworkUtils.removeFile(correlationOutputFileName, s3conf, s3);
            }
            if (!FrameworkUtils.fileExists(correlationOutputFileName, s3conf, s3)) {
                dirs.add(s3bucket + FrameworkUtils.aggregatesDir + "/" + dataset1);
                dirs.add(s3bucket + FrameworkUtils.aggregatesDir + "/" + dataset2);
            } else {
                noRelationship += datasetId1 + "-" + datasetId2 + ",";
            }
        }
    }

    if (dirs.isEmpty()) {
        System.out.println("All the relationships were already computed.");
        System.out.println("Use -f in the beginning of the command line to force the computation.");
        System.exit(0);
    }

    for (String dir : dirs) {
        dataAttributesInputDirs += dir + ",";
    }

    Configuration conf = new Configuration();
    Machine machineConf = new Machine(machine, nbNodes);

    String jobName = "correlation";
    String correlationOutputDir = s3bucket + FrameworkUtils.correlationTechniquesDir + "/tmp/";

    FrameworkUtils.removeFile(correlationOutputDir, s3conf, s3);

    for (int i = 0; i < shortDataset.size(); i++) {
        conf.set("dataset-" + datasetId.get(shortDataset.get(i)) + "-agg", datasetAgg.get(shortDataset.get(i)));
    }
    for (int i = 0; i < shortDataset.size(); i++) {
        conf.set("dataset-" + datasetId.get(shortDataset.get(i)) + "-agg-size",
                Integer.toString(datasetAgg.get(shortDataset.get(i)).split(",").length));
    }
    conf.set("dataset-keys", datasetIds);
    conf.set("dataset-names", datasetNames);
    conf.set("first-group", firstGroupStr);
    conf.set("second-group", secondGroupStr);
    conf.set("main-dataset-id", datasetId.get(shortDataset.get(0)));
    if (noRelationship.length() > 0) {
        conf.set("no-relationship", noRelationship.substring(0, noRelationship.length() - 1));
    }

    conf.set("mapreduce.tasktracker.map.tasks.maximum", String.valueOf(machineConf.getMaximumTasks()));
    conf.set("mapreduce.tasktracker.reduce.tasks.maximum", String.valueOf(machineConf.getMaximumTasks()));
    conf.set("mapreduce.jobtracker.maxtasks.perjob", "-1");
    conf.set("mapreduce.reduce.shuffle.parallelcopies", "20");
    conf.set("mapreduce.input.fileinputformat.split.minsize", "0");
    conf.set("mapreduce.task.io.sort.mb", "200");
    conf.set("mapreduce.task.io.sort.factor", "100");
    conf.set("mapreduce.task.timeout", "2400000");

    if (s3) {
        machineConf.setMachineConfiguration(conf);
        conf.set("fs.s3.awsAccessKeyId", awsAccessKeyId);
        conf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey);
        conf.set("bucket", s3bucket);
    }

    if (snappyCompression) {
        conf.set("mapreduce.map.output.compress", "true");
        conf.set("mapreduce.map.output.compress.codec", "org.apache.hadoop.io.compress.SnappyCodec");
        //conf.set("mapreduce.output.fileoutputformat.compress.codec", "org.apache.hadoop.io.compress.SnappyCodec");
    }
    if (bzip2Compression) {
        conf.set("mapreduce.map.output.compress", "true");
        conf.set("mapreduce.map.output.compress.codec", "org.apache.hadoop.io.compress.BZip2Codec");
        //conf.set("mapreduce.output.fileoutputformat.compress.codec", "org.apache.hadoop.io.compress.BZip2Codec");
    }

    Job job = new Job(conf);
    job.setJobName(jobName);

    job.setMapOutputKeyClass(PairAttributeWritable.class);
    job.setMapOutputValueClass(SpatioTemporalValueWritable.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    job.setMapperClass(CorrelationTechniquesMapper.class);
    job.setReducerClass(CorrelationTechniquesReducer.class);
    job.setNumReduceTasks(machineConf.getNumberReduces());

    job.setInputFormatClass(SequenceFileInputFormat.class);
    LazyOutputFormat.setOutputFormatClass(job, TextOutputFormat.class);

    FileInputFormat.setInputDirRecursive(job, true);
    FileInputFormat.setInputPaths(job,
            dataAttributesInputDirs.substring(0, dataAttributesInputDirs.length() - 1));
    FileOutputFormat.setOutputPath(job, new Path(correlationOutputDir));

    job.setJarByClass(CorrelationTechniques.class);

    long start = System.currentTimeMillis();
    job.submit();
    job.waitForCompletion(true);
    System.out.println(jobName + "\t" + (System.currentTimeMillis() - start));

    // moving files to right place
    for (int i = 0; i < firstGroup.size(); i++) {
        for (int j = 0; j < secondGroup.size(); j++) {

            if (Integer.parseInt(datasetId.get(firstGroup.get(i))) < Integer
                    .parseInt(datasetId.get(secondGroup.get(j)))) {
                dataset1 = firstGroup.get(i);
                dataset2 = secondGroup.get(j);
            } else {
                dataset1 = secondGroup.get(j);
                dataset2 = firstGroup.get(i);
            }

            if (dataset1.equals(dataset2))
                continue;

            String from = s3bucket + FrameworkUtils.correlationTechniquesDir + "/tmp/" + dataset1 + "-"
                    + dataset2 + "/";
            String to = s3bucket + FrameworkUtils.correlationTechniquesDir + "/" + dataset1 + "-" + dataset2
                    + "/";
            FrameworkUtils.renameFile(from, to, s3conf, s3);
        }
    }
}

From source file:edu.nyu.vida.data_polygamy.relationship_computation.Relationship.java

/**
 * @param args/*from   ww w .  j a va2  s  .c om*/
 * @throws ParseException 
 */
@SuppressWarnings({ "deprecation" })
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {

    Options options = new Options();

    Option forceOption = new Option("f", "force", false,
            "force the computation of the relationship " + "even if files already exist");
    forceOption.setRequired(false);
    options.addOption(forceOption);

    Option scoreOption = new Option("sc", "score", true, "set threhsold for relationship score");
    scoreOption.setRequired(false);
    scoreOption.setArgName("SCORE THRESHOLD");
    options.addOption(scoreOption);

    Option strengthOption = new Option("st", "strength", true, "set threhsold for relationship strength");
    strengthOption.setRequired(false);
    strengthOption.setArgName("STRENGTH THRESHOLD");
    options.addOption(strengthOption);

    Option completeRandomizationOption = new Option("c", "complete-randomization", false,
            "use complete randomization when performing significance tests");
    completeRandomizationOption.setRequired(false);
    options.addOption(completeRandomizationOption);

    Option idOption = new Option("id", "ids", false, "output id instead of names for datasets and attributes");
    idOption.setRequired(false);
    options.addOption(idOption);

    Option g1Option = new Option("g1", "first-group", true, "set first group of datasets");
    g1Option.setRequired(true);
    g1Option.setArgName("FIRST GROUP");
    g1Option.setArgs(Option.UNLIMITED_VALUES);
    options.addOption(g1Option);

    Option g2Option = new Option("g2", "second-group", true, "set second group of datasets");
    g2Option.setRequired(false);
    g2Option.setArgName("SECOND GROUP");
    g2Option.setArgs(Option.UNLIMITED_VALUES);
    options.addOption(g2Option);

    Option machineOption = new Option("m", "machine", true, "machine identifier");
    machineOption.setRequired(true);
    machineOption.setArgName("MACHINE");
    machineOption.setArgs(1);
    options.addOption(machineOption);

    Option nodesOption = new Option("n", "nodes", true, "number of nodes");
    nodesOption.setRequired(true);
    nodesOption.setArgName("NODES");
    nodesOption.setArgs(1);
    options.addOption(nodesOption);

    Option s3Option = new Option("s3", "s3", false, "data on Amazon S3");
    s3Option.setRequired(false);
    options.addOption(s3Option);

    Option awsAccessKeyIdOption = new Option("aws_id", "aws-id", true,
            "aws access key id; " + "this is required if the execution is on aws");
    awsAccessKeyIdOption.setRequired(false);
    awsAccessKeyIdOption.setArgName("AWS-ACCESS-KEY-ID");
    awsAccessKeyIdOption.setArgs(1);
    options.addOption(awsAccessKeyIdOption);

    Option awsSecretAccessKeyOption = new Option("aws_key", "aws-id", true,
            "aws secrect access key; " + "this is required if the execution is on aws");
    awsSecretAccessKeyOption.setRequired(false);
    awsSecretAccessKeyOption.setArgName("AWS-SECRET-ACCESS-KEY");
    awsSecretAccessKeyOption.setArgs(1);
    options.addOption(awsSecretAccessKeyOption);

    Option bucketOption = new Option("b", "s3-bucket", true,
            "bucket on s3; " + "this is required if the execution is on aws");
    bucketOption.setRequired(false);
    bucketOption.setArgName("S3-BUCKET");
    bucketOption.setArgs(1);
    options.addOption(bucketOption);

    Option helpOption = new Option("h", "help", false, "display this message");
    helpOption.setRequired(false);
    options.addOption(helpOption);

    Option removeOption = new Option("r", "remove-not-significant", false,
            "remove relationships that are not" + "significant from the final output");
    removeOption.setRequired(false);
    options.addOption(removeOption);

    HelpFormatter formatter = new HelpFormatter();
    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;

    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e) {
        formatter.printHelp("hadoop jar data-polygamy.jar "
                + "edu.nyu.vida.data_polygamy.relationship_computation.Relationship", options, true);
        System.exit(0);
    }

    if (cmd.hasOption("h")) {
        formatter.printHelp("hadoop jar data-polygamy.jar "
                + "edu.nyu.vida.data_polygamy.relationship_computation.Relationship", options, true);
        System.exit(0);
    }

    boolean s3 = cmd.hasOption("s3");
    String s3bucket = "";
    String awsAccessKeyId = "";
    String awsSecretAccessKey = "";

    if (s3) {
        if ((!cmd.hasOption("aws_id")) || (!cmd.hasOption("aws_key")) || (!cmd.hasOption("b"))) {
            System.out.println(
                    "Arguments 'aws_id', 'aws_key', and 'b'" + " are mandatory if execution is on AWS.");
            formatter.printHelp(
                    "hadoop jar data-polygamy.jar "
                            + "edu.nyu.vida.data_polygamy.relationship_computation.Relationship",
                    options, true);
            System.exit(0);
        }
        s3bucket = cmd.getOptionValue("b");
        awsAccessKeyId = cmd.getOptionValue("aws_id");
        awsSecretAccessKey = cmd.getOptionValue("aws_key");
    }

    boolean snappyCompression = false;
    boolean bzip2Compression = false;
    String machine = cmd.getOptionValue("m");
    int nbNodes = Integer.parseInt(cmd.getOptionValue("n"));

    Configuration s3conf = new Configuration();
    if (s3) {
        s3conf.set("fs.s3.awsAccessKeyId", awsAccessKeyId);
        s3conf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey);
        s3conf.set("bucket", s3bucket);
    }

    Path path = null;
    FileSystem fs = FileSystem.get(new Configuration());

    ArrayList<String> shortDataset = new ArrayList<String>();
    ArrayList<String> firstGroup = new ArrayList<String>();
    ArrayList<String> secondGroup = new ArrayList<String>();
    HashMap<String, String> datasetAgg = new HashMap<String, String>();

    boolean removeNotSignificant = cmd.hasOption("r");
    boolean removeExistingFiles = cmd.hasOption("f");
    boolean completeRandomization = cmd.hasOption("c");
    boolean hasScoreThreshold = cmd.hasOption("sc");
    boolean hasStrengthThreshold = cmd.hasOption("st");
    boolean outputIds = cmd.hasOption("id");
    String scoreThreshold = hasScoreThreshold ? cmd.getOptionValue("sc") : "";
    String strengthThreshold = hasStrengthThreshold ? cmd.getOptionValue("st") : "";

    // all datasets
    ArrayList<String> all_datasets = new ArrayList<String>();
    if (s3) {
        path = new Path(s3bucket + FrameworkUtils.datasetsIndexDir);
        fs = FileSystem.get(path.toUri(), s3conf);
    } else {
        path = new Path(fs.getHomeDirectory() + "/" + FrameworkUtils.datasetsIndexDir);
    }
    BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(path)));
    String line = br.readLine();
    while (line != null) {
        all_datasets.add(line.split("\t")[0]);
        line = br.readLine();
    }
    br.close();
    if (s3)
        fs.close();
    String[] all_datasets_array = new String[all_datasets.size()];
    all_datasets.toArray(all_datasets_array);

    String[] firstGroupCmd = cmd.getOptionValues("g1");
    String[] secondGroupCmd = cmd.hasOption("g2") ? cmd.getOptionValues("g2") : all_datasets_array;
    addDatasets(firstGroupCmd, firstGroup, shortDataset, datasetAgg, path, fs, s3conf, s3, s3bucket);
    addDatasets(secondGroupCmd, secondGroup, shortDataset, datasetAgg, path, fs, s3conf, s3, s3bucket);

    if (shortDataset.size() == 0) {
        System.out.println("No datasets to process.");
        System.exit(0);
    }

    if (firstGroup.isEmpty()) {
        System.out.println("No indices from datasets in G1.");
        System.exit(0);
    }

    if (secondGroup.isEmpty()) {
        System.out.println("No indices from datasets in G2.");
        System.exit(0);
    }

    // getting dataset ids

    String datasetNames = "";
    String datasetIds = "";
    HashMap<String, String> datasetId = new HashMap<String, String>();
    Iterator<String> it = shortDataset.iterator();
    while (it.hasNext()) {
        datasetId.put(it.next(), null);
    }

    if (s3) {
        path = new Path(s3bucket + FrameworkUtils.datasetsIndexDir);
        fs = FileSystem.get(path.toUri(), s3conf);
    } else {
        path = new Path(fs.getHomeDirectory() + "/" + FrameworkUtils.datasetsIndexDir);
    }
    br = new BufferedReader(new InputStreamReader(fs.open(path)));
    line = br.readLine();
    while (line != null) {
        String[] dt = line.split("\t");
        all_datasets.add(dt[0]);
        if (datasetId.containsKey(dt[0])) {
            datasetId.put(dt[0], dt[1]);
            datasetNames += dt[0] + ",";
            datasetIds += dt[1] + ",";
        }
        line = br.readLine();
    }
    br.close();
    if (s3)
        fs.close();

    datasetNames = datasetNames.substring(0, datasetNames.length() - 1);
    datasetIds = datasetIds.substring(0, datasetIds.length() - 1);
    it = shortDataset.iterator();
    while (it.hasNext()) {
        String dataset = it.next();
        if (datasetId.get(dataset) == null) {
            System.out.println("No dataset id for " + dataset);
            System.exit(0);
        }
    }

    String firstGroupStr = "";
    String secondGroupStr = "";
    for (String dataset : firstGroup) {
        firstGroupStr += datasetId.get(dataset) + ",";
    }
    for (String dataset : secondGroup) {
        secondGroupStr += datasetId.get(dataset) + ",";
    }
    firstGroupStr = firstGroupStr.substring(0, firstGroupStr.length() - 1);
    secondGroupStr = secondGroupStr.substring(0, secondGroupStr.length() - 1);

    String relationshipsDir = "";
    if (outputIds) {
        relationshipsDir = FrameworkUtils.relationshipsIdsDir;
    } else {
        relationshipsDir = FrameworkUtils.relationshipsDir;
    }

    FrameworkUtils.createDir(s3bucket + relationshipsDir, s3conf, s3);

    String random = completeRandomization ? "complete" : "restricted";

    String indexInputDirs = "";
    String noRelationship = "";

    HashSet<String> dirs = new HashSet<String>();

    String dataset1;
    String dataset2;
    String datasetId1;
    String datasetId2;
    for (int i = 0; i < firstGroup.size(); i++) {
        for (int j = 0; j < secondGroup.size(); j++) {

            if (Integer.parseInt(datasetId.get(firstGroup.get(i))) < Integer
                    .parseInt(datasetId.get(secondGroup.get(j)))) {
                dataset1 = firstGroup.get(i);
                dataset2 = secondGroup.get(j);
            } else {
                dataset1 = secondGroup.get(j);
                dataset2 = firstGroup.get(i);
            }

            datasetId1 = datasetId.get(dataset1);
            datasetId2 = datasetId.get(dataset2);

            if (dataset1.equals(dataset2))
                continue;
            String correlationOutputFileName = s3bucket + relationshipsDir + "/" + dataset1 + "-" + dataset2
                    + "/";

            if (removeExistingFiles) {
                FrameworkUtils.removeFile(correlationOutputFileName, s3conf, s3);
            }
            if (!FrameworkUtils.fileExists(correlationOutputFileName, s3conf, s3)) {
                dirs.add(s3bucket + FrameworkUtils.indexDir + "/" + dataset1);
                dirs.add(s3bucket + FrameworkUtils.indexDir + "/" + dataset2);
            } else {
                noRelationship += datasetId1 + "-" + datasetId2 + ",";
            }
        }
    }

    if (dirs.isEmpty()) {
        System.out.println("All the relationships were already computed.");
        System.out.println("Use -f in the beginning of the command line to force the computation.");
        System.exit(0);
    }

    for (String dir : dirs) {
        indexInputDirs += dir + ",";
    }

    Configuration conf = new Configuration();
    Machine machineConf = new Machine(machine, nbNodes);

    String jobName = "relationship" + "-" + random;
    String relationshipOutputDir = s3bucket + relationshipsDir + "/tmp/";

    FrameworkUtils.removeFile(relationshipOutputDir, s3conf, s3);

    for (int i = 0; i < shortDataset.size(); i++) {
        conf.set("dataset-" + datasetId.get(shortDataset.get(i)) + "-agg", datasetAgg.get(shortDataset.get(i)));
    }
    for (int i = 0; i < shortDataset.size(); i++) {
        conf.set("dataset-" + datasetId.get(shortDataset.get(i)) + "-agg-size",
                Integer.toString(datasetAgg.get(shortDataset.get(i)).split(",").length));
    }
    conf.set("dataset-keys", datasetIds);
    conf.set("dataset-names", datasetNames);
    conf.set("first-group", firstGroupStr);
    conf.set("second-group", secondGroupStr);
    conf.set("complete-random", String.valueOf(completeRandomization));
    conf.set("output-ids", String.valueOf(outputIds));
    conf.set("complete-random-str", random);
    conf.set("main-dataset-id", datasetId.get(shortDataset.get(0)));
    conf.set("remove-not-significant", String.valueOf(removeNotSignificant));
    if (noRelationship.length() > 0) {
        conf.set("no-relationship", noRelationship.substring(0, noRelationship.length() - 1));
    }
    if (hasScoreThreshold) {
        conf.set("score-threshold", scoreThreshold);
    }
    if (hasStrengthThreshold) {
        conf.set("strength-threshold", strengthThreshold);
    }

    conf.set("mapreduce.tasktracker.map.tasks.maximum", String.valueOf(machineConf.getMaximumTasks()));
    conf.set("mapreduce.tasktracker.reduce.tasks.maximum", String.valueOf(machineConf.getMaximumTasks()));
    conf.set("mapreduce.jobtracker.maxtasks.perjob", "-1");
    conf.set("mapreduce.reduce.shuffle.parallelcopies", "20");
    conf.set("mapreduce.input.fileinputformat.split.minsize", "0");
    conf.set("mapreduce.task.io.sort.mb", "200");
    conf.set("mapreduce.task.io.sort.factor", "100");
    conf.set("mapreduce.task.timeout", "2400000");

    if (s3) {
        machineConf.setMachineConfiguration(conf);
        conf.set("fs.s3.awsAccessKeyId", awsAccessKeyId);
        conf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey);
        conf.set("bucket", s3bucket);
    }

    if (snappyCompression) {
        conf.set("mapreduce.map.output.compress", "true");
        conf.set("mapreduce.map.output.compress.codec", "org.apache.hadoop.io.compress.SnappyCodec");
        //conf.set("mapreduce.output.fileoutputformat.compress.codec", "org.apache.hadoop.io.compress.SnappyCodec");
    }
    if (bzip2Compression) {
        conf.set("mapreduce.map.output.compress", "true");
        conf.set("mapreduce.map.output.compress.codec", "org.apache.hadoop.io.compress.BZip2Codec");
        //conf.set("mapreduce.output.fileoutputformat.compress.codec", "org.apache.hadoop.io.compress.BZip2Codec");
    }

    Job job = new Job(conf);
    job.setJobName(jobName);

    job.setMapOutputKeyClass(PairAttributeWritable.class);
    job.setMapOutputValueClass(TopologyTimeSeriesWritable.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    job.setMapperClass(CorrelationMapper.class);
    job.setReducerClass(CorrelationReducer.class);
    job.setNumReduceTasks(machineConf.getNumberReduces());

    job.setInputFormatClass(SequenceFileInputFormat.class);
    //job.setOutputFormatClass(TextOutputFormat.class);
    LazyOutputFormat.setOutputFormatClass(job, TextOutputFormat.class);

    FileInputFormat.setInputDirRecursive(job, true);
    FileInputFormat.setInputPaths(job, indexInputDirs.substring(0, indexInputDirs.length() - 1));
    FileOutputFormat.setOutputPath(job, new Path(relationshipOutputDir));

    job.setJarByClass(Relationship.class);

    long start = System.currentTimeMillis();
    job.submit();
    job.waitForCompletion(true);
    System.out.println(jobName + "\t" + (System.currentTimeMillis() - start));

    // moving files to right place
    for (int i = 0; i < firstGroup.size(); i++) {
        for (int j = 0; j < secondGroup.size(); j++) {

            if (Integer.parseInt(datasetId.get(firstGroup.get(i))) < Integer
                    .parseInt(datasetId.get(secondGroup.get(j)))) {
                dataset1 = firstGroup.get(i);
                dataset2 = secondGroup.get(j);
            } else {
                dataset1 = secondGroup.get(j);
                dataset2 = firstGroup.get(i);
            }

            if (dataset1.equals(dataset2))
                continue;

            String from = s3bucket + relationshipsDir + "/tmp/" + dataset1 + "-" + dataset2 + "/";
            String to = s3bucket + relationshipsDir + "/" + dataset1 + "-" + dataset2 + "/";
            FrameworkUtils.renameFile(from, to, s3conf, s3);
        }
    }
}

From source file:fr.inria.edelweiss.kgdqp.core.CentralizedInferrencing.java

public static void main(String args[])
        throws ParseException, EngineException, InterruptedException, IOException {

    List<String> endpoints = new ArrayList<String>();
    String queryPath = null;/*from w  w  w  .  j a v a 2 s . c  o  m*/
    boolean rulesSelection = false;
    File rulesDir = null;
    File ontDir = null;

    /////////////////
    Graph graph = Graph.create();
    QueryProcess exec = QueryProcess.create(graph);

    Options options = new Options();
    Option helpOpt = new Option("h", "help", false, "print this message");
    //        Option queryOpt = new Option("q", "query", true, "specify the sparql query file");
    //        Option endpointOpt = new Option("e", "endpoint", true, "a federated sparql endpoint URL");
    Option versionOpt = new Option("v", "version", false, "print the version information and exit");
    Option rulesOpt = new Option("r", "rulesDir", true, "directory containing the inference rules");
    Option ontOpt = new Option("o", "ontologiesDir", true,
            "directory containing the ontologies for rules selection");
    //        Option locOpt = new Option("c", "centralized", false, "performs centralized inferences");
    Option dataOpt = new Option("l", "load", true, "data file or directory to be loaded");
    //        Option selOpt = new Option("s", "rulesSelection", false, "if set to true, only the applicable rules are run");
    //        options.addOption(queryOpt);
    //        options.addOption(endpointOpt);
    options.addOption(helpOpt);
    options.addOption(versionOpt);
    options.addOption(rulesOpt);
    options.addOption(ontOpt);
    //        options.addOption(selOpt);
    //        options.addOption(locOpt);
    options.addOption(dataOpt);

    String header = "Corese/KGRAM rule engine experiment command line interface";
    String footer = "\nPlease report any issue to alban.gaignard@cnrs.fr, olivier.corby@inria.fr";

    CommandLineParser parser = new BasicParser();
    CommandLine cmd = parser.parse(options, args);
    if (cmd.hasOption("h")) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("kgdqp", header, options, footer, true);
        System.exit(0);
    }
    if (cmd.hasOption("o")) {
        rulesSelection = true;
        String ontDirPath = cmd.getOptionValue("o");
        ontDir = new File(ontDirPath);
        if (!ontDir.isDirectory()) {
            logger.warn(ontDirPath + " is not a valid directory path.");
            System.exit(0);
        }
    }
    if (!cmd.hasOption("r")) {
        logger.info("You must specify a path for inference rules directory !");
        System.exit(0);
    }

    if (cmd.hasOption("l")) {
        String[] dataPaths = cmd.getOptionValues("l");
        for (String path : dataPaths) {
            Load ld = Load.create(graph);
            ld.load(path);
            logger.info("Loaded " + path);
        }
    }

    if (cmd.hasOption("v")) {
        logger.info("version 3.0.4-SNAPSHOT");
        System.exit(0);
    }

    String rulesDirPath = cmd.getOptionValue("r");
    rulesDir = new File(rulesDirPath);
    if (!rulesDir.isDirectory()) {
        logger.warn(rulesDirPath + " is not a valid directory path.");
        System.exit(0);
    }

    // Local rules graph initialization
    Graph rulesG = Graph.create();
    Load ld = Load.create(rulesG);

    if (rulesSelection) {
        // Ontology loading
        if (ontDir.isDirectory()) {
            for (File o : ontDir.listFiles()) {
                logger.info("Loading " + o.getAbsolutePath());
                ld.load(o.getAbsolutePath());
            }
        }
    }

    // Rules loading
    if (rulesDir.isDirectory()) {
        for (File r : rulesDir.listFiles()) {
            logger.info("Loading " + r.getAbsolutePath());
            ld.load(r.getAbsolutePath());
        }
    }

    // Rule engine initialization
    RuleEngine ruleEngine = RuleEngine.create(graph);
    ruleEngine.set(exec);
    ruleEngine.setOptimize(true);
    ruleEngine.setConstructResult(true);
    ruleEngine.setTrace(true);

    StopWatch sw = new StopWatch();
    logger.info("Federated graph size : " + graph.size());
    logger.info("Rules graph size : " + rulesG.size());

    // Rule selection
    logger.info("Rules selection");
    QueryProcess localKgram = QueryProcess.create(rulesG);
    ArrayList<String> applicableRules = new ArrayList<String>();
    sw.start();
    String rulesSelQuery = "";
    if (rulesSelection) {
        rulesSelQuery = pertinentRulesQuery;
    } else {
        rulesSelQuery = allRulesQuery;
    }
    Mappings maps = localKgram.query(rulesSelQuery);
    logger.info("Rules selected in " + sw.getTime() + " ms");
    logger.info("Applicable rules : " + maps.size());

    // Selected rule loading
    for (Mapping map : maps) {
        IDatatype dt = (IDatatype) map.getValue("?res");
        String rule = dt.getLabel();
        //loading rule in the rule engine
        //            logger.info("Adding rule : ");
        //            System.out.println("-------");
        //            System.out.println(rule);
        //            System.out.println("");
        //            if (! rule.toLowerCase().contains("sameas")) {
        applicableRules.add(rule);
        ruleEngine.addRule(rule);
        //            }
    }

    // Rules application on distributed sparql endpoints
    logger.info("Rules application (" + applicableRules.size() + " rules)");
    ExecutorService threadPool = Executors.newCachedThreadPool();
    RuleEngineThread ruleThread = new RuleEngineThread(ruleEngine);
    sw.reset();
    sw.start();

    //        ruleEngine.process();
    threadPool.execute(ruleThread);
    threadPool.shutdown();

    //monitoring loop
    while (!threadPool.isTerminated()) {
        //            System.out.println("******************************");
        //            System.out.println(Util.jsonDqpCost(QueryProcessDQP.queryCounter, QueryProcessDQP.queryVolumeCounter, QueryProcessDQP.sourceCounter, QueryProcessDQP.sourceVolumeCounter));
        //            System.out.println("Rule engine running for " + sw.getTime() + " ms");
        //            System.out.println("Federated graph size : " + graph.size());
        System.out.println(sw.getTime() + " , " + graph.size());
        Thread.sleep(5000);
    }

    logger.info("Federated graph size : " + graph.size());
    //        logger.info(Util.jsonDqpCost(QueryProcessDQP.queryCounter, QueryProcessDQP.queryVolumeCounter, QueryProcessDQP.sourceCounter, QueryProcessDQP.sourceVolumeCounter));

    //        TripleFormat f = TripleFormat.create(graph, true);
    //        f.write("/tmp/gAll.ttl");

}

From source file:com.tonygalati.sprites.SpriteGenerator.java

public static void main(String[] args) throws IOException {

    //        if (args.length != 3)
    //        {// w  w  w .  j  av  a 2s  .  c  om
    //           System.out.print("Usage: com.tonygalati.sprites.SpriteGenerator {path to images} {margin between images in px} {output file}\n");
    //           System.out.print("Note: The max height should only be around 32,767px due to Microsoft GDI using a 16bit signed integer to store dimensions\n");
    //           System.out.print("going beyond this dimension is possible with this tool but the generated sprite image will not work correctly with\n");
    //           System.out.print("most browsers.\n\n");
    //           return;
    //        }

    //        Integer margin = Integer.parseInt(args[1]);
    Integer margin = Integer.parseInt("1");
    String spriteFile = "icons-" + RandomStringUtils.randomAlphanumeric(10) + ".png";
    SpriteCSSGenerator cssGenerator = new SpriteCSSGenerator();

    ClassLoader classLoader = SpriteGenerator.class.getClassLoader();
    URL folderPath = classLoader.getResource(NAIC_SMALL_ICON);
    if (folderPath != null) {
        File imageFolder = new File(folderPath.getPath());

        // Read images
        ArrayList<BufferedImage> imageList = new ArrayList<BufferedImage>();
        Integer yCoordinate = null;

        for (File f : imageFolder.listFiles()) {
            if (f.isFile()) {
                String fileName = f.getName();
                String ext = fileName.substring(fileName.lastIndexOf(".") + 1, fileName.length());

                if (ext.equals("png")) {
                    System.out.println("adding file " + fileName);
                    BufferedImage image = ImageIO.read(f);
                    imageList.add(image);

                    if (yCoordinate == null) {
                        yCoordinate = 0;
                    } else {
                        yCoordinate += (image.getHeight() + margin);
                    }

                    // add to cssGenerator
                    cssGenerator.addSpriteCSS(fileName.substring(0, fileName.indexOf(".")), 0, yCoordinate);
                }
            }
        }

        // Find max width and total height
        int maxWidth = 0;
        int totalHeight = 0;

        for (BufferedImage image : imageList) {
            totalHeight += image.getHeight() + margin;

            if (image.getWidth() > maxWidth)
                maxWidth = image.getWidth();
        }

        System.out.format("Number of images: %s, total height: %spx, width: %spx%n", imageList.size(),
                totalHeight, maxWidth);

        // Create the actual sprite
        BufferedImage sprite = new BufferedImage(maxWidth, totalHeight, BufferedImage.TYPE_INT_ARGB);

        int currentY = 0;
        Graphics g = sprite.getGraphics();
        for (BufferedImage image : imageList) {
            g.drawImage(image, 0, currentY, null);
            currentY += image.getHeight() + margin;
        }

        System.out.format("Writing sprite: %s%n", spriteFile);
        ImageIO.write(sprite, "png", new File(spriteFile));
        File cssFile = cssGenerator.getFile(spriteFile);
        System.out.println(cssFile.getAbsolutePath() + " created");
    } else {
        System.err.println("Could not find folder: " + NAIC_SMALL_ICON);

    }

}

From source file:fr.inria.edelweiss.kgdqp.core.CentralizedInferrencingNoSpin.java

public static void main(String args[])
        throws ParseException, EngineException, InterruptedException, IOException, LoadException {

    List<String> endpoints = new ArrayList<String>();
    String queryPath = null;// www  . j a  v a 2s.c  o  m
    boolean rulesSelection = false;
    File rulesDir = null;
    File ontDir = null;

    /////////////////
    Graph graph = Graph.create();
    QueryProcess exec = QueryProcess.create(graph);

    Options options = new Options();
    Option helpOpt = new Option("h", "help", false, "print this message");
    //        Option queryOpt = new Option("q", "query", true, "specify the sparql query file");
    //        Option endpointOpt = new Option("e", "endpoint", true, "a federated sparql endpoint URL");
    Option versionOpt = new Option("v", "version", false, "print the version information and exit");
    Option rulesOpt = new Option("r", "rulesDir", true, "directory containing the inference rules");
    Option ontOpt = new Option("o", "ontologiesDir", true,
            "directory containing the ontologies for rules selection");
    //        Option locOpt = new Option("c", "centralized", false, "performs centralized inferences");
    Option dataOpt = new Option("l", "load", true, "data file or directory to be loaded");
    //        Option selOpt = new Option("s", "rulesSelection", false, "if set to true, only the applicable rules are run");
    //        options.addOption(queryOpt);
    //        options.addOption(endpointOpt);
    options.addOption(helpOpt);
    options.addOption(versionOpt);
    options.addOption(rulesOpt);
    options.addOption(ontOpt);
    //        options.addOption(selOpt);
    //        options.addOption(locOpt);
    options.addOption(dataOpt);

    String header = "Corese/KGRAM rule engine experiment command line interface";
    String footer = "\nPlease report any issue to alban.gaignard@cnrs.fr, olivier.corby@inria.fr";

    CommandLineParser parser = new BasicParser();
    CommandLine cmd = parser.parse(options, args);
    if (cmd.hasOption("h")) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("kgdqp", header, options, footer, true);
        System.exit(0);
    }
    if (cmd.hasOption("o")) {
        rulesSelection = true;
        String ontDirPath = cmd.getOptionValue("o");
        ontDir = new File(ontDirPath);
        if (!ontDir.isDirectory()) {
            logger.warn(ontDirPath + " is not a valid directory path.");
            System.exit(0);
        }
    }
    if (!cmd.hasOption("r")) {
        logger.info("You must specify a path for inference rules directory !");
        System.exit(0);
    }

    if (cmd.hasOption("l")) {
        String[] dataPaths = cmd.getOptionValues("l");
        for (String path : dataPaths) {
            Load ld = Load.create(graph);
            ld.load(path);
            logger.info("Loaded " + path);
        }
    }

    if (cmd.hasOption("v")) {
        logger.info("version 3.0.4-SNAPSHOT");
        System.exit(0);
    }

    String rulesDirPath = cmd.getOptionValue("r");
    rulesDir = new File(rulesDirPath);
    if (!rulesDir.isDirectory()) {
        logger.warn(rulesDirPath + " is not a valid directory path.");
        System.exit(0);
    }

    // Local rules graph initialization
    Graph rulesG = Graph.create();
    Load ld = Load.create(rulesG);

    if (rulesSelection) {
        // Ontology loading
        if (ontDir.isDirectory()) {
            for (File o : ontDir.listFiles()) {
                logger.info("Loading " + o.getAbsolutePath());
                ld.load(o.getAbsolutePath());
            }
        }
    }

    // Rules loading
    if (rulesDir.isDirectory()) {
        for (File r : rulesDir.listFiles()) {
            if (r.getAbsolutePath().endsWith(".rq")) {
                logger.info("Loading " + r.getAbsolutePath());
                //                ld.load(r.getAbsolutePath());

                //                    byte[] encoded = Files.readAllBytes(Paths.get(r.getAbsolutePath()));
                //                    String construct = new String(encoded, "UTF-8"); //StandardCharsets.UTF_8);

                FileInputStream f = new FileInputStream(r);
                QueryLoad ql = QueryLoad.create();
                String construct = ql.read(f);
                f.close();

                SPINProcess sp = SPINProcess.create();
                String spinConstruct = sp.toSpin(construct);

                ld.load(new ByteArrayInputStream(spinConstruct.getBytes()), Load.TURTLE_FORMAT);
                logger.info("Rules graph size : " + rulesG.size());

            }
        }
    }

    // Rule engine initialization
    RuleEngine ruleEngine = RuleEngine.create(graph);
    ruleEngine.set(exec);
    ruleEngine.setOptimize(true);
    ruleEngine.setConstructResult(true);
    ruleEngine.setTrace(true);

    StopWatch sw = new StopWatch();
    logger.info("Federated graph size : " + graph.size());
    logger.info("Rules graph size : " + rulesG.size());

    // Rule selection
    logger.info("Rules selection");
    QueryProcess localKgram = QueryProcess.create(rulesG);
    ArrayList<String> applicableRules = new ArrayList<String>();
    sw.start();
    String rulesSelQuery = "";
    if (rulesSelection) {
        rulesSelQuery = pertinentRulesQuery;
    } else {
        rulesSelQuery = allRulesQuery;
    }
    Mappings maps = localKgram.query(rulesSelQuery);
    logger.info("Rules selected in " + sw.getTime() + " ms");
    logger.info("Applicable rules : " + maps.size());

    // Selected rule loading
    for (Mapping map : maps) {
        IDatatype dt = (IDatatype) map.getValue("?res");
        String rule = dt.getLabel();
        //loading rule in the rule engine
        //            logger.info("Adding rule : ");
        //            System.out.println("-------");
        //            System.out.println(rule);
        //            System.out.println("");
        //            if (! rule.toLowerCase().contains("sameas")) {
        applicableRules.add(rule);
        ruleEngine.addRule(rule);
        //            }
    }

    // Rules application on distributed sparql endpoints
    logger.info("Rules application (" + applicableRules.size() + " rules)");
    ExecutorService threadPool = Executors.newCachedThreadPool();
    RuleEngineThread ruleThread = new RuleEngineThread(ruleEngine);
    sw.reset();
    sw.start();

    //        ruleEngine.process();
    threadPool.execute(ruleThread);
    threadPool.shutdown();

    //monitoring loop
    while (!threadPool.isTerminated()) {
        //            System.out.println("******************************");
        //            System.out.println(Util.jsonDqpCost(QueryProcessDQP.queryCounter, QueryProcessDQP.queryVolumeCounter, QueryProcessDQP.sourceCounter, QueryProcessDQP.sourceVolumeCounter));
        //            System.out.println("Rule engine running for " + sw.getTime() + " ms");
        //            System.out.println("Federated graph size : " + graph.size());
        System.out.println(sw.getTime() + " , " + graph.size());
        Thread.sleep(5000);
    }

    logger.info("Federated graph size : " + graph.size());
    //        logger.info(Util.jsonDqpCost(QueryProcessDQP.queryCounter, QueryProcessDQP.queryVolumeCounter, QueryProcessDQP.sourceCounter, QueryProcessDQP.sourceVolumeCounter));

    //        TripleFormat f = TripleFormat.create(graph, true);
    //        f.write("/tmp/gAll.ttl");
}

From source file:fr.iphc.grid.jobmonitor.CeList.java

public static void main(String[] args) throws Exception {

    SessionFactory.createSession(true);//ww  w . j a va2s .  c  o m
    CeList command = new CeList();
    CommandLine line = command.parse(args);
    Integer timeout = 0;
    String TableSql = "monce";
    //      MySQLAccess sql = new MySQLAccess();
    if (line.getOptionValue(OPT_TIMEOUT) == null) {
        timeout = 15;
    } else {
        timeout = Integer.parseInt(line.getOptionValue(OPT_TIMEOUT));
    }
    timeout = timeout * 60; // convertir en secondes
    Date start = new Date();
    String OutDir = line.getOptionValue(OPT_OUTDIR);

    if (OutDir == null) {
        OutDir = "/tmp/thread";
    }
    ArrayList<URL> CeList = null;
    if (line.getOptionValue(OPT_CEPATH) == null) {
        CeList = AvailableLdapCe();
        //         for (URL k : CeList) {
        //            // System.out.println(k);
        //         }
    } else {
        CeList = AvailableCe(line.getOptionValue(OPT_CEPATH));
    }
    Boolean ret = initDirectory(new File(OutDir));
    if (!ret) {
        System.out.println("ERROR: " + OutDir + "STOP");
        System.exit(-1);
    }

    // check if we can connect to the grid
    // try{
    // SessionFactory.createSession(true);
    // }catch(NoSuccessException e){
    // System.err.println("Could not connect to the grid at all
    // ("+e.getMessage()+")");
    // System.err.println("Aborting");
    // System.exit(0);
    //
    // }

    SubmitterThread[] st = new SubmitterThread[CeList.size()];

    Iterator<URL> i = CeList.iterator();
    int index = 0;
    while (i.hasNext()) {
        URL serviceURL = i.next();
        // Ne pas importer dans thread because options.
        Properties prop = new Properties();
        prop.setProperty("Executable", "/bin/hostname");//
        // prop.setProperty("Executable", "touch /dev/null");
        JobDescription desc = createJobDescription(prop);
        desc.setAttribute(JobDescription.INTERACTIVE, "true");
        desc.setAttribute(JobDescription.EXECUTABLE, "/bin/hostname");
        // proxy="/home/dsa/.globus/biomed.txt";
        // Context ctx = ContextFactory.createContext();
        // ctx.setAttribute(Context.TYPE, "VOMS");
        // ctx.setAttribute(Context.USERVO, "biomed");
        // ctx.setAttribute(Context.USERPROXY,proxy);
        // Session session = SessionFactory.createSession(false);
        // session.addContext(ctx);
        Session session = SessionFactory.createSession(true);
        st[index] = new SubmitterThread(serviceURL, session, desc, OutDir, timeout, start);
        st[index].setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
            @Override
            public void uncaughtException(Thread t, Throwable e) {
                // System.out.println("Error! An exception occured in "
                // + t.getName() + ". Cause: " + e.getMessage());
            }
        });
        st[index].start();
        // Thread.sleep(15*1000);
        // test si fichier exist
        // System.out.println("Alive
        // "+OutDir+"/"+serviceURL.getHost()+"_"+serviceURL.getPath().replaceAll("/cream-","")+".out");
        // while ((!((new
        // File(OutDir+"/"+serviceURL.getHost()+"_"+serviceURL.getPath().replaceAll("/cream-","")+".out").exists())
        // ||
        // (new
        // File(OutDir+"/"+serviceURL.getHost()+"_"+serviceURL.getPath().replaceAll("/cream-","")+".err").exists()))))
        // {
        // Thread.sleep(500);
        // }
        // System.out.println("Alive "+serviceURL.getHost()+"-"+ index+"FILE
        // EXIST");
        index++;

    }
    ;

    // System.out.println("BEGIN LOOP: Max " + index);
    long date_start = System.currentTimeMillis();
    // System.out.println("BEGIN START: " + date_start);
    Integer time_out = (timeout + 180) * 1000; // unit ms value in minute
    // +120
    // =delta par rapport thread
    Boolean Alive = true;
    //      int nb = 0;
    long now = System.currentTimeMillis();
    do {
        now = System.currentTimeMillis();
        Alive = false;
        //         nb = 0;
        for (int j = 0; j < index; j++) {
            if (st[j].isAlive()) {
                // System.out.println("Alive "+j);
                Alive = true;
                //               nb++;
            }
        }
        // System.out.println(nb);
        Thread.sleep(10000);
    } while ((Alive) && ((now - date_start) < time_out));

    for (int j = 0; j < index; j++) {
        if (st[j].isAlive()) {
            st[j].Requeststop();
        }
    }
    BilanCe(OutDir, CeList, TableSql);
    jobManagerLdap jm = new jobManagerLdap();
    jm.updateLdapCe();
    System.out.println("END " + new Date());
    // faire un traitement...
    System.exit(0);
}

From source file:jCloisterZone.CarcassonneEnvironment.java

public static void main(String[] args) {
    int repetitions = 100;
    double[] scores = new double[repetitions];

    RRLJCloisterClient client = new LocalCarcassonneClient("config.ini");
    ServerIF server = null;/* www. ja v  a 2  s . com*/
    Game game = client.getGame();
    Player firstPlayer = null;
    ArrayList<PlayerSlot> slots = new ArrayList<PlayerSlot>();
    for (int r = 0; r < repetitions; r++) {
        client.createGame();
        if (game == null) {
            server = new LocalCarcassonneServer(client.getGame());
            PlayerSlot slot = new PlayerSlot(0, PlayerSlot.SlotType.AI, "RANDOM" + 0, client.getClientId());
            slot.setAiClassName(RandomAIPlayer.class.getName());
            slots.add(slot);
            for (int j = 1; j < Integer.parseInt(args[0]); j++) {
                slot = new PlayerSlot(j, PlayerSlot.SlotType.AI, "AI" + j, client.getClientId());
                slot.setAiClassName(LegacyAiPlayer.class.getName());
                slots.add(slot);
            }
            game = client.getGame();
        } else {
            // Reset the UIs
            server.stopGame();
            game.clearUserInterface();

            // Clear the slots and re-add them.
            for (int i = 0; i < PlayerSlot.COUNT; i++) {
                server.updateSlot(new PlayerSlot(i), null);
            }
        }

        Collections.shuffle(slots);
        for (int i = 0; i < slots.size(); i++) {
            PlayerSlot slot = slots.get(i);
            PlayerSlot cloneSlot = new PlayerSlot(i, slot.getType(), slot.getNick(), slot.getOwner());
            cloneSlot.setAiClassName(slot.getAiClassName());
            server.updateSlot(cloneSlot, LegacyAiPlayer.supportedExpansions());
        }

        server.startGame();

        Phase phase = game.getPhase();

        // Cycle through (probably only once) to keep the game moving.
        while (phase != null && !phase.isEntered()) {
            // Modifying phases to proxyless versions
            if (phase.getClass().equals(CreateGamePhase.class))
                phase = game.getPhases().get(ProxylessCreateGamePhase.class);
            if (phase.getClass().equals(DrawPhase.class))
                phase = game.getPhases().get(ProxylessDrawPhase.class);

            phase.setEntered(true);
            phase.enter();
            phase = game.getPhase();

            if (game.getTurnPlayer().getNick().equals("RANDOM0"))
                firstPlayer = game.getTurnPlayer();
        }
        int score = firstPlayer.getPoints();
        scores[r] = score;
        System.out.println(score);
    }

    Mean m = new Mean();
    StandardDeviation sd = new StandardDeviation();
    System.out.println("Mean: " + m.evaluate(scores) + ", SD: " + sd.evaluate(scores));
}