Example usage for java.io FileReader FileReader

List of usage examples for java.io FileReader FileReader

Introduction

In this page you can find the example usage for java.io FileReader FileReader.

Prototype

public FileReader(FileDescriptor fd) 

Source Link

Document

Creates a new FileReader , given the FileDescriptor to read, using the platform's java.nio.charset.Charset#defaultCharset() default charset .

Usage

From source file:edu.illinois.cs.cogcomp.datalessclassification.ta.W2VDatalessAnnotator.java

/**
 * @param args config: config file path testFile: Test File
 *//*from   w w w .j a  va  2 s .  c  o  m*/
public static void main(String[] args) {
    CommandLine cmd = ESADatalessAnnotator.getCMDOpts(args);

    ResourceManager rm;

    try {
        String configFile = cmd.getOptionValue("config", "config/project.properties");
        ResourceManager nonDefaultRm = new ResourceManager(configFile);

        rm = new W2VDatalessConfigurator().getConfig(nonDefaultRm);
    } catch (IOException e) {
        rm = new W2VDatalessConfigurator().getDefaultConfig();
    }

    String testFile = cmd.getOptionValue("testFile", "data/graphicsTestDocument.txt");

    StringBuilder sb = new StringBuilder();

    String line;

    try (BufferedReader br = new BufferedReader(new FileReader(new File(testFile)))) {
        while ((line = br.readLine()) != null) {
            sb.append(line);
            sb.append(" ");
        }

        String text = sb.toString().trim();

        TokenizerTextAnnotationBuilder taBuilder = new TokenizerTextAnnotationBuilder(new StatefulTokenizer());
        TextAnnotation ta = taBuilder.createTextAnnotation(text);

        W2VDatalessAnnotator datalessAnnotator = new W2VDatalessAnnotator(rm);
        datalessAnnotator.addView(ta);

        List<Constituent> annots = ta.getView(ViewNames.DATALESS_W2V).getConstituents();

        System.out.println("Predicted LabelIDs:");

        for (Constituent annot : annots) {
            System.out.println(annot.getLabel());
        }

        Map<String, String> labelNameMap = DatalessAnnotatorUtils
                .getLabelNameMap(rm.getString(DatalessConfigurator.LabelName_Path.key));

        System.out.println("Predicted Labels:");

        for (Constituent annot : annots) {
            System.out.println(labelNameMap.get(annot.getLabel()));
        }
    } catch (FileNotFoundException e) {
        e.printStackTrace();
        logger.error("Test File not found at " + testFile + " ... exiting");
        System.exit(-1);
    } catch (AnnotatorException e) {
        e.printStackTrace();
        logger.error("Error Annotating the Test Document with the Dataless View ... exiting");
        System.exit(-1);
    } catch (IOException e) {
        e.printStackTrace();
        logger.error("IO Error while reading the test file ... exiting");
        System.exit(-1);
    }
}

From source file:DOMTreeWalkerTreeModel.java

/**
 * This main() method demonstrates the use of this class, the use of the
 * Xerces DOM parser, and the creation of a DOM Level 2 TreeWalker object.
 *///from  w w  w  .  j ava2  s.  co  m
public static void main(String[] args) throws IOException, SAXException {
    // Obtain an instance of a Xerces parser to build a DOM tree.
    // Note that we are not using the JAXP API here, so this
    // code uses Apache Xerces APIs that are not standards
    DOMParser parser = new org.apache.xerces.parsers.DOMParser();

    // Get a java.io.Reader for the input XML file and
    // wrap the input file in a SAX input source
    Reader in = new BufferedReader(new FileReader(args[0]));
    InputSource input = new org.xml.sax.InputSource(in);

    // Tell the Xerces parser to parse the input source
    parser.parse(input);

    // Ask the parser to give us our DOM Document. Once we've got the DOM
    // tree, we don't have to use the Apache Xerces APIs any more; from
    // here on, we use the standard DOM APIs
    Document document = parser.getDocument();

    // If we're using a DOM Level 2 implementation, then our Document
    // object ought to implement DocumentTraversal
    DocumentTraversal traversal = (DocumentTraversal) document;

    // For this demonstration, we create a NodeFilter that filters out
    // Text nodes containing only space; these just clutter up the tree
    NodeFilter filter = new NodeFilter() {
        public short acceptNode(Node n) {
            if (n.getNodeType() == Node.TEXT_NODE) {
                // Use trim() to strip off leading and trailing space.
                // If nothing is left, then reject the node
                if (((Text) n).getData().trim().length() == 0)
                    return NodeFilter.FILTER_REJECT;
            }
            return NodeFilter.FILTER_ACCEPT;
        }
    };

    // This set of flags says to "show" all node types except comments
    int whatToShow = NodeFilter.SHOW_ALL & ~NodeFilter.SHOW_COMMENT;

    // Create a TreeWalker using the filter and the flags
    TreeWalker walker = traversal.createTreeWalker(document, whatToShow, filter, false);

    // Instantiate a TreeModel and a JTree to display it
    JTree tree = new JTree(new DOMTreeWalkerTreeModel(walker));

    // Create a frame and a scrollpane to display the tree, and pop them up
    JFrame frame = new JFrame("DOMTreeWalkerTreeModel Demo");
    frame.getContentPane().add(new JScrollPane(tree));
    frame.addWindowListener(new WindowAdapter() {
        public void windowClosing(WindowEvent e) {
            System.exit(0);
        }
    });

    frame.setSize(500, 250);
    frame.setVisible(true);
}

From source file:graticules2wld.Main.java

/**
 * @param args//w  ww .  j  a  va2  s.c  o m
 * @throws Exception
 */
public static void main(String[] args) throws Exception {

    /* parse the command line arguments */
    // create the command line parser
    CommandLineParser parser = new PosixParser();

    // create the Options
    Options options = new Options();
    options.addOption("x", "originx", true, "x component of projected coordinates of upper left pixel");
    options.addOption("y", "originy", true, "y component of projected coordinates of upper left pixel");
    options.addOption("u", "tometers", true, "multiplication factor to get source units into meters");
    options.addOption("h", "help", false, "prints this usage page");
    options.addOption("d", "debug", false, "prints debugging information to stdout");

    double originNorthing = 0;
    double originEasting = 0;

    String inputFileName = null;
    String outputFileName = null;

    try {
        // parse the command line arguments
        CommandLine line = parser.parse(options, args);

        if (line.hasOption("help"))
            printUsage(0); // print usage then exit using a non error exit status

        if (line.hasOption("debug"))
            debug = true;

        // these arguments are required
        if (!line.hasOption("originy") || !line.hasOption("originx"))
            printUsage(1);

        originNorthing = Double.parseDouble(line.getOptionValue("originy"));
        originEasting = Double.parseDouble(line.getOptionValue("originx"));

        if (line.hasOption("tometers"))
            unitsToMeters = Double.parseDouble(line.getOptionValue("tometers"));

        // two args should be left. the input csv file name and the output wld file name.
        String[] iofiles = line.getArgs();
        if (iofiles.length < 2) {
            printUsage(1);
        }

        inputFileName = iofiles[0];
        outputFileName = iofiles[1];
    } catch (ParseException exp) {
        System.err.println("Unexpected exception:" + exp.getMessage());
        System.exit(1);
    }

    // try to open the input file for reading and the output file for writing
    File graticulesCsvFile;
    BufferedReader csvReader = null;

    File wldFile;
    BufferedWriter wldWriter = null;

    try {
        graticulesCsvFile = new File(inputFileName);
        csvReader = new BufferedReader(new FileReader(graticulesCsvFile));
    } catch (IOException exp) {
        System.err.println("Could not open input file for reading: " + inputFileName);
        System.exit(1);
    }

    try {
        wldFile = new File(outputFileName);
        wldWriter = new BufferedWriter(new FileWriter(wldFile));
    } catch (IOException exp) {
        System.err.println("Could not open output file for writing: " + outputFileName);
        System.exit(1);
    }

    // list of lon graticules and lat graticules
    ArrayList<Graticule> lonGrats = new ArrayList<Graticule>();
    ArrayList<Graticule> latGrats = new ArrayList<Graticule>();

    // read the source CSV and convert its information into the two ArrayList<Graticule> data structures
    readCSV(csvReader, lonGrats, latGrats);

    // we now need to start finding the world file paramaters
    DescriptiveStatistics stats = new DescriptiveStatistics();

    // find theta and phi
    for (Graticule g : latGrats) {
        stats.addValue(g.angle());
    }

    double theta = stats.getMean(); // we use the mean of the lat angles as theta
    if (debug)
        System.out.println("theta range = " + Math.toDegrees(stats.getMax() - stats.getMin()));
    stats.clear();

    for (Graticule g : lonGrats) {
        stats.addValue(g.angle());
    }

    double phi = stats.getMean(); // ... and the mean of the lon angles for phi
    if (debug)
        System.out.println("phi range = " + Math.toDegrees(stats.getMax() - stats.getMin()));
    stats.clear();

    // print these if in debug mode
    if (debug) {
        System.out.println("theta = " + Math.toDegrees(theta) + "deg");
        System.out.println("phi = " + Math.toDegrees(phi) + "deg");
    }

    // find x and y (distance beteen pixels in map units)
    Collections.sort(latGrats);
    Collections.sort(lonGrats);
    int prevMapValue = 0; //fixme: how to stop warning about not being initilised?
    Line2D prevGratPixelSys = new Line2D.Double();

    boolean first = true;
    for (Graticule g : latGrats) {
        if (!first) {
            int deltaMapValue = Math.abs(g.realValue() - prevMapValue);
            double deltaPixelValue = (g.l.ptLineDist(prevGratPixelSys.getP1())
                    + (g.l.ptLineDist(prevGratPixelSys.getP2()))) / 2;

            double delta = deltaMapValue / deltaPixelValue;
            stats.addValue(delta);
        } else {
            first = false;
            prevMapValue = g.realValue();
            prevGratPixelSys = (Line2D) g.l.clone();
        }
    }

    double y = stats.getMean();
    if (debug)
        System.out.println("y range = " + (stats.getMax() - stats.getMin()));
    stats.clear();

    first = true;
    for (Graticule g : lonGrats) {
        if (!first) {
            int deltaMapValue = g.realValue() - prevMapValue;
            double deltaPixelValue = (g.l.ptLineDist(prevGratPixelSys.getP1())
                    + (g.l.ptLineDist(prevGratPixelSys.getP2()))) / 2;

            double delta = deltaMapValue / deltaPixelValue;
            stats.addValue(delta);
        } else {
            first = false;
            prevMapValue = g.realValue();
            prevGratPixelSys = (Line2D) g.l.clone();
        }
    }

    double x = stats.getMean();
    if (debug)
        System.out.println("x range = " + (stats.getMax() - stats.getMin()));
    stats.clear();

    if (debug) {
        System.out.println("x = " + x);
        System.out.println("y = " + y);
    }

    SimpleRegression regression = new SimpleRegression();

    // C, F are translation terms: x, y map coordinates of the center of the upper-left pixel
    for (Graticule g : latGrats) {
        // find perp dist to pixel space 0,0
        Double perpPixelDist = g.l.ptLineDist(new Point2D.Double(0, 0));

        // find the map space distance from this graticule to the center of the 0,0 pixel
        Double perpMapDist = perpPixelDist * y; // perpMapDist / perpPixelDist = y

        regression.addData(perpMapDist, g.realValue());
    }

    double F = regression.getIntercept();
    regression.clear();

    for (Graticule g : lonGrats) {
        // find perp dist to pixel space 0,0
        Double perpPixelDist = g.l.ptLineDist(new Point2D.Double(0, 0));

        // find the map space distance from this graticule to the center of the 0,0 pixel
        Double perpMapDist = perpPixelDist * x; // perpMapDist / perpPixelDist = x

        regression.addData(perpMapDist, g.realValue());
    }

    double C = regression.getIntercept();
    regression.clear();

    if (debug) {
        System.out.println("Upper Left pixel has coordinates " + C + ", " + F);
    }

    // convert to meters
    C *= unitsToMeters;
    F *= unitsToMeters;

    // C,F store the projected (in map units) coordinates of the upper left pixel.
    // originNorthing,originEasting is the offset we need to apply to 0,0 to push the offsets into our global coordinate system 
    C = originEasting + C;
    F = originNorthing + F;

    // calculate the affine transformation matrix elements
    double D = -1 * x * unitsToMeters * Math.sin(theta);
    double A = x * unitsToMeters * Math.cos(theta);
    double B = y * unitsToMeters * Math.sin(phi); // if should be negative, it'll formed by negative sin
    double E = -1 * y * unitsToMeters * Math.cos(phi);

    /*
     * Line 1: A: pixel size in the x-direction in map units/pixel
     * Line 2: D: rotation about y-axis
     * Line 3: B: rotation about x-axis
     * Line 4: E: pixel size in the y-direction in map units, almost always negative[3]
     * Line 5: C: x-coordinate of the center of the upper left pixel
     * Line 6: F: y-coordinate of the center of the upper left pixel
     */
    if (debug) {
        System.out.println("A = " + A);
        System.out.println("D = " + D);
        System.out.println("B = " + B);
        System.out.println("E = " + E);
        System.out.println("C = " + C);
        System.out.println("F = " + F);

        // write the world file
        System.out.println();
        System.out.println("World File:");
        System.out.println(A);
        System.out.println(D);
        System.out.println(B);
        System.out.println(E);
        System.out.println(C);
        System.out.println(F);
    }

    // write to the .wld file
    wldWriter.write(A + "\n");
    wldWriter.write(D + "\n");
    wldWriter.write(B + "\n");
    wldWriter.write(E + "\n");
    wldWriter.write(C + "\n");
    wldWriter.write(F + "\n");

    wldWriter.close();
}

From source file:com.act.lcms.v2.MZCollisionCounter.java

public static void main(String[] args) throws Exception {
    CLIUtil cliUtil = new CLIUtil(MassChargeCalculator.class, HELP_MESSAGE, OPTION_BUILDERS);
    CommandLine cl = cliUtil.parseCommandLine(args);

    File inputFile = new File(cl.getOptionValue(OPTION_INPUT_INCHI_LIST));
    if (!inputFile.exists()) {
        cliUtil.failWithMessage("Input file at does not exist at %s", inputFile.getAbsolutePath());
    }/*from www.j  av  a 2 s. c om*/

    List<MassChargeCalculator.MZSource> sources = new ArrayList<>();
    try (BufferedReader reader = new BufferedReader(new FileReader(inputFile))) {
        String line;
        while ((line = reader.readLine()) != null) {
            line = line.trim();
            sources.add(new MassChargeCalculator.MZSource(line));
            if (sources.size() % 1000 == 0) {
                LOGGER.info("Loaded %d sources from input file", sources.size());
            }
        }
    }

    Set<String> considerIons = Collections.emptySet();
    if (cl.hasOption(OPTION_ONLY_CONSIDER_IONS)) {
        List<String> ions = Arrays.asList(cl.getOptionValues(OPTION_ONLY_CONSIDER_IONS));
        LOGGER.info("Only considering ions for m/z calculation: %s", StringUtils.join(ions, ", "));
        considerIons = new HashSet<>(ions);
    }

    TSVWriter<String, Long> tsvWriter = new TSVWriter<>(Arrays.asList("collisions", "count"));
    tsvWriter.open(new File(cl.getOptionValue(OPTION_OUTPUT_FILE)));

    try {
        LOGGER.info("Loaded %d sources in total from input file", sources.size());

        MassChargeCalculator.MassChargeMap mzMap = MassChargeCalculator.makeMassChargeMap(sources,
                considerIons);

        if (!cl.hasOption(OPTION_COUNT_WINDOW_INTERSECTIONS)) {
            // Do an exact analysis of the m/z collisions if windowing is not specified.

            LOGGER.info("Computing precise collision histogram.");
            Iterable<Double> mzs = mzMap.ionMZIter();
            Map<Integer, Long> collisionHistogram = histogram(
                    StreamSupport.stream(mzs.spliterator(), false).map(mz -> { // See comment about Iterable below.
                        try {
                            return mzMap.ionMZToMZSources(mz).size();
                        } catch (NoSuchElementException e) {
                            LOGGER.error("Caught no such element exception for mz %f: %s", mz, e.getMessage());
                            throw e;
                        }
                    }));
            List<Integer> sortedCollisions = new ArrayList<>(collisionHistogram.keySet());
            Collections.sort(sortedCollisions);
            for (Integer collision : sortedCollisions) {
                tsvWriter.append(new HashMap<String, Long>() {
                    {
                        put("collisions", collision.longValue());
                        put("count", collisionHistogram.get(collision));
                    }
                });
            }
        } else {
            /* After some deliberation (thanks Gil!), the windowed variant of this calculation counts the number of
             * structures whose 0.01 Da m/z windows (for some set of ions) overlap with each other.
             *
             * For example, let's assume we have five total input structures, and are only searching for one ion.  Let's
             * also assume that three of those structures have m/z A and the remaining two have m/z B.  The windows might
             * look like this in the m/z domain:
             * |----A----|
             *        |----B----|
             * Because A represents three structures and overlaps with B, which represents two, we assign A a count of 5--
             * this is the number of structures we believe could fall into the range of A given our current peak calling
             * approach.  Similarly, B is assigned a count of 5, as the possibility for collision/confusion is symmetric.
             *
             * Note that this is an over-approximation of collisions, as we could more precisely only consider intersections
             * when the exact m/z of B falls within the window around A and vice versa.  However, because we have observed
             * cases where the MS sensor doesn't report structures at exactly the m/z we predict, we employ this weaker
             * definition of intersection to give a slightly pessimistic view of what confusions might be possible. */
            // Compute windows for every m/z.  We don't care about the original mz values since we just want the count.
            List<Double> mzs = mzMap.ionMZsSorted();

            final Double windowHalfWidth;
            if (cl.hasOption(OPTION_WINDOW_HALFWIDTH)) {
                // Don't use get with default for this option, as we want the exact FP value of the default tolerance.
                windowHalfWidth = Double.valueOf(cl.getOptionValue(OPTION_WINDOW_HALFWIDTH));
            } else {
                windowHalfWidth = DEFAULT_WINDOW_TOLERANCE;
            }

            /* Window = (lower bound, upper bound), counter of represented m/z's that collide with this window, and number
             * of representative structures (which will be used in counting collisions). */
            LinkedList<CollisionWindow> allWindows = new LinkedList<CollisionWindow>() {
                {
                    for (Double mz : mzs) {
                        // CPU for memory trade-off: don't re-compute the window bounds over and over and over and over and over.
                        try {
                            add(new CollisionWindow(mz, windowHalfWidth, mzMap.ionMZToMZSources(mz).size()));
                        } catch (NoSuchElementException e) {
                            LOGGER.error("Caught no such element exception for mz %f: %s", mz, e.getMessage());
                            throw e;
                        }
                    }
                }
            };

            // Sweep line time!  The window ranges are the interesting points.  We just accumulate overlap counts as we go.
            LinkedList<CollisionWindow> workingSet = new LinkedList<>();
            List<CollisionWindow> finished = new LinkedList<>();

            while (allWindows.size() > 0) {
                CollisionWindow thisWindow = allWindows.pop();
                // Remove any windows from the working set that don't overlap with the next window.
                while (workingSet.size() > 0 && workingSet.peekFirst().getMaxMZ() < thisWindow.getMinMZ()) {
                    finished.add(workingSet.pop());
                }

                for (CollisionWindow w : workingSet) {
                    /* Add the size of the new overlapping window's structure count to each of the windows in the working set,
                     * which represents the number of possible confused structures that fall within the overlapping region.
                     * We exclude the window itself as it should already have counted the colliding structures it represents. */
                    w.getAccumulator().add(thisWindow.getStructureCount());

                    /* Reciprocally, add the structure counts of all windows with which the current window overlaps to it. */
                    thisWindow.getAccumulator().add(w.getStructureCount());
                }

                // Now that accumulation is complete, we can safely add the current window.
                workingSet.add(thisWindow);
            }

            // All the interesting events are done, so drop the remaining windows into the finished set.
            finished.addAll(workingSet);

            Map<Long, Long> collisionHistogram = histogram(
                    finished.stream().map(w -> w.getAccumulator().longValue()));
            List<Long> sortedCollisions = new ArrayList<>(collisionHistogram.keySet());
            Collections.sort(sortedCollisions);
            for (Long collision : sortedCollisions) {
                tsvWriter.append(new HashMap<String, Long>() {
                    {
                        put("collisions", collision);
                        put("count", collisionHistogram.get(collision));
                    }
                });
            }
        }
    } finally {
        if (tsvWriter != null) {
            tsvWriter.close();
        }
    }
}

From source file:net.orzo.App.java

/**
 *
 *///from   ww  w.  j av a  2 s  .  c  om
public static void main(final String[] args) {
    final App app = new App();
    Logger log = null;
    CommandLine cmd;

    try {
        cmd = app.init(args);

        if (cmd.hasOption("h")) {
            HelpFormatter formatter = new HelpFormatter();
            formatter.printHelp(
                    "orzo [options] user_script [user_arg1 [user_arg2 [...]]]\n(to generate a template: orzo -t [file path])",
                    app.cliOptions);

        } else if (cmd.hasOption("v")) {
            System.out.printf("Orzo.js version %s\n", app.props.get("orzo.version"));

        } else if (cmd.hasOption("t")) {
            String templateSrc = new ResourceLoader().getResourceAsString("net/orzo/template1.js");

            File tplFile = new File(cmd.getOptionValue("t"));
            FileWriter tplWriter = new FileWriter(tplFile);
            tplWriter.write(templateSrc);
            tplWriter.close();

            File dtsFile = new File(
                    String.format("%s/orzojs.d.ts", new File(tplFile.getAbsolutePath()).getParent()));
            FileWriter dtsWriter = new FileWriter(dtsFile);
            String dtsSrc = new ResourceLoader().getResourceAsString("net/orzo/orzojs.d.ts");
            dtsWriter.write(dtsSrc);
            dtsWriter.close();

        } else if (cmd.hasOption("T")) {
            String templateSrc = new ResourceLoader().getResourceAsString("net/orzo/template1.js");
            System.out.println(templateSrc);

        } else {

            // Logger initialization
            if (cmd.hasOption("g")) {
                System.setProperty("logback.configurationFile", cmd.getOptionValue("g"));

            } else {
                System.setProperty("logback.configurationFile", "./logback.xml");
            }
            log = LoggerFactory.getLogger(App.class);
            if (cmd.hasOption("s")) { // Orzo.js as a REST and AMQP service
                FullServiceConfig conf = new Gson().fromJson(new FileReader(cmd.getOptionValue("s")),
                        FullServiceConfig.class);
                Injector injector = Guice.createInjector(new CoreModule(conf), new RestServletModule());
                HttpServer httpServer = new HttpServer(conf, new JerseyGuiceServletConfig(injector));
                app.services.add(httpServer);

                if (conf.getAmqpResponseConfig() != null) {
                    // response AMQP service must be initialized before receiving one
                    app.services.add(injector.getInstance(AmqpResponseConnection.class));
                }

                if (conf.getAmqpConfig() != null) {
                    app.services.add(injector.getInstance(AmqpConnection.class));
                    app.services.add(injector.getInstance(AmqpService.class));
                }

                if (conf.getRedisConf() != null) {
                    app.services.add(injector.getInstance(RedisStorage.class));
                }

                Runtime.getRuntime().addShutdownHook(new ShutdownHook(app));
                app.startServices();

            } else if (cmd.hasOption("d")) { // Demo mode
                final String scriptId = "demo";
                final SourceCode demoScript = SourceCode.fromResource(DEMO_SCRIPT);
                System.err.printf("Running demo script %s.", demoScript.getName());
                CmdConfig conf = new CmdConfig(scriptId, demoScript, null, cmd.getOptionValue("p", null));
                TaskManager tm = new TaskManager(conf);
                tm.startTaskSync(tm.registerTask(scriptId, new String[0]));

            } else if (cmd.getArgs().length > 0) { // Command line mode
                File userScriptFile = new File(cmd.getArgs()[0]);
                String optionalModulesPath = null;
                String[] inputValues;
                SourceCode userScript;

                // custom CommonJS modules path
                if (cmd.hasOption("m")) {
                    optionalModulesPath = cmd.getOptionValue("m");
                }

                if (cmd.getArgs().length > 0) {
                    inputValues = Arrays.copyOfRange(cmd.getArgs(), 1, cmd.getArgs().length);
                } else {
                    inputValues = new String[0];
                }

                userScript = SourceCode.fromFile(userScriptFile);
                CmdConfig conf = new CmdConfig(userScript.getName(), userScript, optionalModulesPath,
                        cmd.getOptionValue("p", null));
                TaskManager tm = new TaskManager(conf);
                String taskId = tm.registerTask(userScript.getName(), inputValues);
                tm.startTaskSync(taskId);
                if (tm.getTask(taskId).getStatus() == TaskStatus.ERROR) {
                    tm.getTask(taskId).getFirstError().getErrors().stream().forEach(System.err::println);
                }

            } else {
                System.err.println("Invalid parameters. Try -h for more information.");
                System.exit(1);
            }
        }

    } catch (Exception ex) {
        System.err.printf("Orzo.js crashed with error: %s\nSee the log for details.\n", ex.getMessage());
        if (log != null) {
            log.error(ex.getMessage(), ex);

        } else {
            ex.printStackTrace();
        }
    }
}

From source file:biomine.nodeimportancecompression.ImportanceCompressionReport.java

public static void main(String[] args) throws IOException, java.text.ParseException {
    opts.addOption("algorithm", true,
            "Used algorithm for compression. Possible values are 'brute-force', "
                    + "'brute-force-edges','brute-force-merges','randomized','randomized-merges',"
                    + "'randomized-edges'," + "'fast-brute-force',"
                    + "'fast-brute-force-merges','fast-brute-force-merge-edges'. Default is 'brute-force'.");
    opts.addOption("query", true, "Query nodes ids, separated by comma.");
    opts.addOption("queryfile", true, "Read query nodes from file.");
    opts.addOption("ratio", true, "Goal ratio");
    opts.addOption("importancefile", true, "Read importances straight from file");
    opts.addOption("keepedges", false, "Don't remove edges during merges");
    opts.addOption("connectivity", false, "Compute and output connectivities in edge oriented case");
    opts.addOption("paths", false, "Do path oriented compression");
    opts.addOption("edges", false, "Do edge oriented compression");
    // opts.addOption( "a",

    double sigma = 1.0;
    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;/*from  ww  w  .  j  a v  a2 s  .  c  om*/

    try {
        cmd = parser.parse(opts, args);
    } catch (ParseException e) {
        e.printStackTrace();
        System.exit(0);
    }

    String queryStr = cmd.getOptionValue("query");
    String[] queryNodeIDs = {};
    double[] queryNodeIMP = {};
    if (queryStr != null) {
        queryNodeIDs = queryStr.split(",");
        queryNodeIMP = new double[queryNodeIDs.length];
        for (int i = 0; i < queryNodeIDs.length; i++) {
            String s = queryNodeIDs[i];
            String[] es = s.split("=");
            queryNodeIMP[i] = 1;
            if (es.length == 2) {
                queryNodeIDs[i] = es[0];
                queryNodeIMP[i] = Double.parseDouble(es[1]);
            } else if (es.length > 2) {
                System.out.println("Too many '=' in querynode specification: " + s);
            }
        }
    }

    String queryFile = cmd.getOptionValue("queryfile");
    Map<String, Double> queryNodes = Collections.EMPTY_MAP;
    if (queryFile != null) {
        File in = new File(queryFile);
        BufferedReader read = new BufferedReader(new FileReader(in));

        queryNodes = readMap(read);
        read.close();
    }

    String impfile = cmd.getOptionValue("importancefile");
    Map<String, Double> importances = null;
    if (impfile != null) {
        File in = new File(impfile);
        BufferedReader read = new BufferedReader(new FileReader(in));

        importances = readMap(read);
        read.close();
    }

    String algoStr = cmd.getOptionValue("algorithm");
    CompressionAlgorithm algo = null;

    if (algoStr == null || algoStr.equals("brute-force")) {
        algo = new BruteForceCompression();
    } else if (algoStr.equals("brute-force-edges")) {
        algo = new BruteForceCompressionOnlyEdges();
    } else if (algoStr.equals("brute-force-merges")) {
        algo = new BruteForceCompressionOnlyMerges();
    } else if (algoStr.equals("fast-brute-force-merges")) {
        //algo = new FastBruteForceCompressionOnlyMerges();
        algo = new FastBruteForceCompression(true, false);
    } else if (algoStr.equals("fast-brute-force-edges")) {
        algo = new FastBruteForceCompression(false, true);
        //algo = new FastBruteForceCompressionOnlyEdges();
    } else if (algoStr.equals("fast-brute-force")) {
        algo = new FastBruteForceCompression(true, true);
    } else if (algoStr.equals("randomized-edges")) {
        algo = new RandomizedCompressionOnlyEdges(); //modified
    } else if (algoStr.equals("randomized")) {
        algo = new RandomizedCompression();
    } else if (algoStr.equals("randomized-merges")) {
        algo = new RandomizedCompressionOnlyMerges();
    } else {
        System.out.println("Unsupported algorithm: " + algoStr);
        printHelp();
    }

    String ratioStr = cmd.getOptionValue("ratio");
    double ratio = 0;
    if (ratioStr != null) {
        ratio = Double.parseDouble(ratioStr);
    } else {
        System.out.println("Goal ratio not specified");
        printHelp();
    }

    String infile = null;
    if (cmd.getArgs().length != 0) {
        infile = cmd.getArgs()[0];
    } else {
        printHelp();
    }

    BMGraph bmg = BMGraphUtils.readBMGraph(new File(infile));
    HashMap<BMNode, Double> queryBMNodes = new HashMap<BMNode, Double>();
    for (String id : queryNodes.keySet()) {
        queryBMNodes.put(bmg.getNode(id), queryNodes.get(id));
    }

    long startMillis = System.currentTimeMillis();
    ImportanceGraphWrapper wrap = QueryImportance.queryImportanceGraph(bmg, queryBMNodes);

    if (importances != null) {
        for (String id : importances.keySet()) {
            wrap.setImportance(bmg.getNode(id), importances.get(id));
        }
    }

    ImportanceMerger merger = null;
    if (cmd.hasOption("edges")) {
        merger = new ImportanceMergerEdges(wrap.getImportanceGraph());
    } else if (cmd.hasOption("paths")) {
        merger = new ImportanceMergerPaths(wrap.getImportanceGraph());
    } else {
        System.out.println("Specify either 'paths' or 'edges'.");
        System.exit(1);
    }

    if (cmd.hasOption("keepedges")) {
        merger.setKeepEdges(true);
    }

    algo.compress(merger, ratio);
    long endMillis = System.currentTimeMillis();

    // write importance

    {
        BufferedWriter wr = new BufferedWriter(new FileWriter("importance.txt", false));
        for (BMNode nod : bmg.getNodes()) {
            wr.write(nod + " " + wrap.getImportance(nod) + "\n");
        }
        wr.close();
    }

    // write sum of all pairs of node importance    added by Fang
    /*   {
    BufferedWriter wr = new BufferedWriter(new FileWriter("sum_of_all_pairs_importance.txt", true));
    ImportanceGraph orig = wrap.getImportanceGraph();
    double sum = 0;
            
    for (int i = 0; i <= orig.getMaxNodeId(); i++) {
        for (int j = i+1; j <= orig.getMaxNodeId(); j++) {
            sum = sum+ wrap.getImportance(i)* wrap.getImportance(j);
        }
    }
            
    wr.write(""+sum);
    wr.write("\n");
    wr.close();
       }
            
    */

    // write uncompressed edges
    {
        BufferedWriter wr = new BufferedWriter(new FileWriter("edges.txt", false));
        ImportanceGraph orig = wrap.getImportanceGraph();
        ImportanceGraph ucom = merger.getUncompressedGraph();
        for (int i = 0; i <= orig.getMaxNodeId(); i++) {
            String iname = wrap.intToNode(i).toString();
            HashSet<Integer> ne = new HashSet<Integer>();
            ne.addAll(orig.getNeighbors(i));
            ne.addAll(ucom.getNeighbors(i));
            for (int j : ne) {
                if (i < j)
                    continue;
                String jname = wrap.intToNode(j).toString();
                double a = orig.getEdgeWeight(i, j);
                double b = ucom.getEdgeWeight(i, j);
                wr.write(iname + " " + jname + " " + a + " " + b + " " + Math.abs(a - b));
                wr.write("\n");
            }
        }
        wr.close();
    }
    // write distance
    {
        // BufferedWriter wr = new BufferedWriter(new
        // FileWriter("distance.txt",false));
        BufferedWriter wr = new BufferedWriter(new FileWriter("distance.txt", true)); //modified by Fang

        ImportanceGraph orig = wrap.getImportanceGraph();
        ImportanceGraph ucom = merger.getUncompressedGraph();
        double error = 0;
        for (int i = 0; i <= orig.getMaxNodeId(); i++) {
            HashSet<Integer> ne = new HashSet<Integer>();
            ne.addAll(orig.getNeighbors(i));
            ne.addAll(ucom.getNeighbors(i));
            for (int j : ne) {
                if (i <= j)
                    continue;
                double a = orig.getEdgeWeight(i, j);
                double b = ucom.getEdgeWeight(i, j);
                error += (a - b) * (a - b) * wrap.getImportance(i) * wrap.getImportance(j);
                // modify by Fang: multiply imp(u)imp(v)

            }
        }
        error = Math.sqrt(error);
        //////////error = Math.sqrt(error / 2); // modified by Fang: the error of each
        // edge is counted twice
        wr.write("" + error);
        wr.write("\n");
        wr.close();
    }
    // write sizes
    {
        ImportanceGraph orig = wrap.getImportanceGraph();
        ImportanceGraph comp = merger.getCurrentGraph();
        // BufferedWriter wr = new BufferedWriter(new
        // FileWriter("sizes.txt",false));
        BufferedWriter wr = new BufferedWriter(new FileWriter("sizes.txt", true)); //modified by Fang

        wr.write(orig.getNodeCount() + " " + orig.getEdgeCount() + " " + comp.getNodeCount() + " "
                + comp.getEdgeCount());
        wr.write("\n");
        wr.close();
    }
    //write time
    {
        System.out.println("writing time");
        BufferedWriter wr = new BufferedWriter(new FileWriter("time.txt", true)); //modified by Fang
        double secs = (endMillis - startMillis) * 0.001;
        wr.write("" + secs + "\n");
        wr.close();
    }

    //write change of connectivity for edge-oriented case       // added by Fang
    {
        if (cmd.hasOption("connectivity")) {

            BufferedWriter wr = new BufferedWriter(new FileWriter("connectivity.txt", true));
            ImportanceGraph orig = wrap.getImportanceGraph();
            ImportanceGraph ucom = merger.getUncompressedGraph();

            double diff = 0;

            for (int i = 0; i <= orig.getMaxNodeId(); i++) {
                ProbDijkstra pdori = new ProbDijkstra(orig, i);
                ProbDijkstra pducom = new ProbDijkstra(ucom, i);

                for (int j = i + 1; j <= orig.getMaxNodeId(); j++) {
                    double oriconn = pdori.getProbTo(j);
                    double ucomconn = pducom.getProbTo(j);

                    diff = diff + (oriconn - ucomconn) * (oriconn - ucomconn) * wrap.getImportance(i)
                            * wrap.getImportance(j);

                }
            }

            diff = Math.sqrt(diff);
            wr.write("" + diff);
            wr.write("\n");
            wr.close();

        }
    }

    //write output graph
    {
        BMGraph output = bmg;//new BMGraph(bmg);

        int no = 0;
        BMNode[] nodes = new BMNode[merger.getGroups().size()];
        for (ArrayList<Integer> gr : merger.getGroups()) {
            BMNode bmgroup = new BMNode("Group", "" + (no + 1));
            bmgroup.setAttributes(new HashMap<String, String>());
            bmgroup.put("autoedges", "0");
            nodes[no] = bmgroup;
            no++;
            if (gr.size() == 0)
                continue;
            for (int x : gr) {
                BMNode nod = output.getNode(wrap.intToNode(x).toString());
                BMEdge belongs = new BMEdge(nod, bmgroup, "belongs_to");
                output.ensureHasEdge(belongs);
            }
            output.ensureHasNode(bmgroup);
        }
        for (int i = 0; i < nodes.length; i++) {
            for (int x : merger.getCurrentGraph().getNeighbors(i)) {
                if (x == i) {
                    nodes[x].put("selfedge", "" + merger.getCurrentGraph().getEdgeWeight(i, x));
                    //ge.put("goodness", ""+merger.getCurrentGraph().getEdgeWeight(i, x));
                    continue;
                }
                BMEdge ge = new BMEdge(nodes[x], nodes[i], "groupedge");
                ge.setAttributes(new HashMap<String, String>());
                ge.put("goodness", "" + merger.getCurrentGraph().getEdgeWeight(i, x));
                output.ensureHasEdge(ge);
            }
        }
        System.out.println(output.getGroupNodes());

        BMGraphUtils.writeBMGraph(output, "output.bmg");
    }
}

From source file:com.soulgalore.velocity.MergeXMLWithVelocity.java

/**
 * Merge a xml file with Velocity template. The third parameter is the properties file, where the
 * key/value is added to the velocity context. The fourth parameter is the output file. If not
 * included, the result is printed to system.out.
 * //from   w w w . ja  v  a2 s .  co m
 * @param args are file.xml template.vm properties.prop [output.file]
 * @throws JDOMException if the xml file couldn't be parsed
 * @throws IOException couldn't find one of the files
 */
public static void main(String[] args) throws JDOMException, IOException {

    if (args.length < 3) {
        System.out.println(
                "Missing input files. XMLToVelocity file.xml template.vm prop.properties [output.file]");
        return;
    }

    final Properties properties = new Properties();
    final File prop = new File(args[2]);

    if (prop.exists())
        properties.load(new FileReader(prop));
    else
        throw new IOException("Couldn't find the property file:" + prop.getAbsolutePath());

    final MergeXMLWithVelocity xtv = new MergeXMLWithVelocity(properties);
    xtv.create(args);
}

From source file:com.twentyn.patentScorer.ScoreMerger.java

public static void main(String[] args) throws Exception {
    System.out.println("Starting up...");
    System.out.flush();/*from  w  w  w .j  av  a2 s.  com*/
    Options opts = new Options();
    opts.addOption(Option.builder("h").longOpt("help").desc("Print this help message and exit").build());

    opts.addOption(Option.builder("r").longOpt("results").required().hasArg()
            .desc("A directory of search results to read").build());
    opts.addOption(Option.builder("s").longOpt("scores").required().hasArg()
            .desc("A directory of patent classification scores to read").build());
    opts.addOption(Option.builder("o").longOpt("output").required().hasArg()
            .desc("The output file where results will be written.").build());

    HelpFormatter helpFormatter = new HelpFormatter();
    CommandLineParser cmdLineParser = new DefaultParser();
    CommandLine cmdLine = null;
    try {
        cmdLine = cmdLineParser.parse(opts, args);
    } catch (ParseException e) {
        System.out.println("Caught exception when parsing command line: " + e.getMessage());
        helpFormatter.printHelp("DocumentIndexer", opts);
        System.exit(1);
    }

    if (cmdLine.hasOption("help")) {
        helpFormatter.printHelp("DocumentIndexer", opts);
        System.exit(0);
    }
    File scoresDirectory = new File(cmdLine.getOptionValue("scores"));
    if (cmdLine.getOptionValue("scores") == null || !scoresDirectory.isDirectory()) {
        LOGGER.error("Not a directory of score files: " + cmdLine.getOptionValue("scores"));
    }

    File resultsDirectory = new File(cmdLine.getOptionValue("results"));
    if (cmdLine.getOptionValue("results") == null || !resultsDirectory.isDirectory()) {
        LOGGER.error("Not a directory of results files: " + cmdLine.getOptionValue("results"));
    }

    FileWriter outputWriter = new FileWriter(cmdLine.getOptionValue("output"));

    ObjectMapper objectMapper = new ObjectMapper();
    objectMapper.enable(SerializationFeature.INDENT_OUTPUT);
    objectMapper.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY);

    FilenameFilter jsonFilter = new FilenameFilter() {
        public final Pattern JSON_PATTERN = Pattern.compile("\\.json$");

        public boolean accept(File dir, String name) {
            return JSON_PATTERN.matcher(name).find();
        }
    };

    Map<String, PatentScorer.ClassificationResult> scores = new HashMap<>();
    LOGGER.info("Reading scores from directory at " + scoresDirectory.getAbsolutePath());
    for (File scoreFile : scoresDirectory.listFiles(jsonFilter)) {
        BufferedReader reader = new BufferedReader(new FileReader(scoreFile));
        int count = 0;
        String line;
        while ((line = reader.readLine()) != null) {
            PatentScorer.ClassificationResult res = objectMapper.readValue(line,
                    PatentScorer.ClassificationResult.class);
            scores.put(res.docId, res);
            count++;
        }
        LOGGER.info("Read " + count + " scores from " + scoreFile.getAbsolutePath());
    }

    Map<String, List<DocumentSearch.SearchResult>> synonymsToResults = new HashMap<>();
    Map<String, List<DocumentSearch.SearchResult>> inchisToResults = new HashMap<>();
    LOGGER.info("Reading results from directory at " + resultsDirectory);
    // With help from http://stackoverflow.com/questions/6846244/jackson-and-generic-type-reference.
    JavaType resultsType = objectMapper.getTypeFactory().constructCollectionType(List.class,
            DocumentSearch.SearchResult.class);

    List<File> resultsFiles = Arrays.asList(resultsDirectory.listFiles(jsonFilter));
    Collections.sort(resultsFiles, new Comparator<File>() {
        @Override
        public int compare(File o1, File o2) {
            return o1.getName().compareTo(o2.getName());
        }
    });
    for (File resultsFile : resultsFiles) {
        BufferedReader reader = new BufferedReader(new FileReader(resultsFile));
        CharBuffer buffer = CharBuffer.allocate(Long.valueOf(resultsFile.length()).intValue());
        int bytesRead = reader.read(buffer);
        LOGGER.info("Read " + bytesRead + " bytes from " + resultsFile.getName() + " (length is "
                + resultsFile.length() + ")");
        List<DocumentSearch.SearchResult> results = objectMapper.readValue(new CharArrayReader(buffer.array()),
                resultsType);

        LOGGER.info("Read " + results.size() + " results from " + resultsFile.getAbsolutePath());

        int count = 0;
        for (DocumentSearch.SearchResult sres : results) {
            for (DocumentSearch.ResultDocument resDoc : sres.getResults()) {
                String docId = resDoc.getDocId();
                PatentScorer.ClassificationResult classificationResult = scores.get(docId);
                if (classificationResult == null) {
                    LOGGER.warn("No classification result found for " + docId);
                } else {
                    resDoc.setClassifierScore(classificationResult.getScore());
                }
            }
            if (!synonymsToResults.containsKey(sres.getSynonym())) {
                synonymsToResults.put(sres.getSynonym(), new ArrayList<DocumentSearch.SearchResult>());
            }
            synonymsToResults.get(sres.getSynonym()).add(sres);
            count++;
            if (count % 1000 == 0) {
                LOGGER.info("Processed " + count + " search result documents");
            }
        }
    }

    Comparator<DocumentSearch.ResultDocument> resultDocumentComparator = new Comparator<DocumentSearch.ResultDocument>() {
        @Override
        public int compare(DocumentSearch.ResultDocument o1, DocumentSearch.ResultDocument o2) {
            int cmp = o2.getClassifierScore().compareTo(o1.getClassifierScore());
            if (cmp != 0) {
                return cmp;
            }
            cmp = o2.getScore().compareTo(o1.getScore());
            return cmp;
        }
    };

    for (Map.Entry<String, List<DocumentSearch.SearchResult>> entry : synonymsToResults.entrySet()) {
        DocumentSearch.SearchResult newSearchRes = null;
        // Merge all result documents into a single search result.
        for (DocumentSearch.SearchResult sr : entry.getValue()) {
            if (newSearchRes == null) {
                newSearchRes = sr;
            } else {
                newSearchRes.getResults().addAll(sr.getResults());
            }
        }
        if (newSearchRes == null || newSearchRes.getResults() == null) {
            LOGGER.error("Search results for " + entry.getKey() + " are null.");
            continue;
        }
        Collections.sort(newSearchRes.getResults(), resultDocumentComparator);
        if (!inchisToResults.containsKey(newSearchRes.getInchi())) {
            inchisToResults.put(newSearchRes.getInchi(), new ArrayList<DocumentSearch.SearchResult>());
        }
        inchisToResults.get(newSearchRes.getInchi()).add(newSearchRes);
    }

    List<String> sortedKeys = new ArrayList<String>(inchisToResults.keySet());
    Collections.sort(sortedKeys);
    List<GroupedInchiResults> orderedResults = new ArrayList<>(sortedKeys.size());
    Comparator<DocumentSearch.SearchResult> synonymSorter = new Comparator<DocumentSearch.SearchResult>() {
        @Override
        public int compare(DocumentSearch.SearchResult o1, DocumentSearch.SearchResult o2) {
            return o1.getSynonym().compareTo(o2.getSynonym());
        }
    };
    for (String inchi : sortedKeys) {
        List<DocumentSearch.SearchResult> res = inchisToResults.get(inchi);
        Collections.sort(res, synonymSorter);
        orderedResults.add(new GroupedInchiResults(inchi, res));
    }

    objectMapper.writerWithView(Object.class).writeValue(outputWriter, orderedResults);
    outputWriter.close();
}

From source file:TestBufferStreamGenomicsDBImporter.java

/**
 * Sample driver code for testing Java VariantContext write API for GenomicsDB
 * The code shows two ways of using the API
 *   (a) Iterator<VariantContext>//from   ww  w .  ja v a 2  s.  c  o  m
 *   (b) Directly adding VariantContext objects
 * If "-iterators" is passed as the second argument, method (a) is used.
 */
public static void main(final String[] args) throws IOException, GenomicsDBException, ParseException {
    if (args.length < 2) {
        System.err.println("For loading: [-iterators] <loader.json> "
                + "<stream_name_to_file.json> [bufferCapacity rank lbRowIdx ubRowIdx useMultiChromosomeIterator]");
        System.exit(-1);
    }
    int argsLoaderFileIdx = 0;
    if (args[0].equals("-iterators"))
        argsLoaderFileIdx = 1;
    //Buffer capacity
    long bufferCapacity = (args.length >= argsLoaderFileIdx + 3) ? Integer.parseInt(args[argsLoaderFileIdx + 2])
            : 1024;
    //Specify rank (or partition idx) of this process
    int rank = (args.length >= argsLoaderFileIdx + 4) ? Integer.parseInt(args[argsLoaderFileIdx + 3]) : 0;
    //Specify smallest row idx from which to start loading.
    // This is useful for incremental loading into existing array
    long lbRowIdx = (args.length >= argsLoaderFileIdx + 5) ? Long.parseLong(args[argsLoaderFileIdx + 4]) : 0;
    //Specify largest row idx up to which loading should be performed - for completeness
    long ubRowIdx = (args.length >= argsLoaderFileIdx + 6) ? Long.parseLong(args[argsLoaderFileIdx + 5])
            : Long.MAX_VALUE - 1;
    //Boolean to use MultipleChromosomeIterator
    boolean useMultiChromosomeIterator = (args.length >= argsLoaderFileIdx + 7)
            ? Boolean.parseBoolean(args[argsLoaderFileIdx + 6])
            : false;
    //<loader.json> first arg
    String loaderJSONFile = args[argsLoaderFileIdx];
    GenomicsDBImporter loader = new GenomicsDBImporter(loaderJSONFile, rank, lbRowIdx, ubRowIdx);
    //<stream_name_to_file.json> - useful for the driver only
    //JSON file that contains "stream_name": "vcf_file_path" entries
    FileReader mappingReader = new FileReader(args[argsLoaderFileIdx + 1]);
    JSONParser parser = new JSONParser();
    LinkedHashMap streamNameToFileName = (LinkedHashMap) parser.parse(mappingReader, new LinkedHashFactory());
    ArrayList<VCFFileStreamInfo> streamInfoVec = new ArrayList<VCFFileStreamInfo>();
    long rowIdx = 0;
    for (Object currObj : streamNameToFileName.entrySet()) {
        Map.Entry<String, String> entry = (Map.Entry<String, String>) currObj;
        VCFFileStreamInfo currInfo = new VCFFileStreamInfo(entry.getValue(), loaderJSONFile, rank,
                useMultiChromosomeIterator);

        /** The following 2 lines are not mandatory - use initializeSampleInfoMapFromHeader()
         * iff you know for sure that sample names in the VCF header are globally unique
         * across all streams/files. If not, you have 2 options:
         *   (a) specify your own mapping from sample index in the header to SampleInfo object
         *       (unique_name, rowIdx) OR
         *   (b) specify the mapping in the callset_mapping_file (JSON) and pass null to
         *       addSortedVariantContextIterator()
         */
        LinkedHashMap<Integer, GenomicsDBImporter.SampleInfo> sampleIndexToInfo = new LinkedHashMap<Integer, GenomicsDBImporter.SampleInfo>();
        rowIdx = GenomicsDBImporter.initializeSampleInfoMapFromHeader(sampleIndexToInfo, currInfo.mVCFHeader,
                rowIdx);
        int streamIdx = -1;
        if (args[0].equals("-iterators"))
            streamIdx = loader.addSortedVariantContextIterator(entry.getKey(), currInfo.mVCFHeader,
                    currInfo.mIterator, bufferCapacity, VariantContextWriterBuilder.OutputType.BCF_STREAM,
                    sampleIndexToInfo); //pass sorted VC iterators
        else
            //use buffers - VCs will be provided by caller
            streamIdx = loader.addBufferStream(entry.getKey(), currInfo.mVCFHeader, bufferCapacity,
                    VariantContextWriterBuilder.OutputType.BCF_STREAM, sampleIndexToInfo);
        currInfo.mStreamIdx = streamIdx;
        streamInfoVec.add(currInfo);
    }
    if (args[0].equals("-iterators")) {
        //Much simpler interface if using Iterator<VariantContext>
        loader.importBatch();
        assert loader.isDone();
    } else {
        //Must be called after all iterators/streams added - no more iterators/streams
        // can be added once this function is called
        loader.setupGenomicsDBImporter();
        //Counts and tracks buffer streams for which new data must be supplied
        //Initialized to all the buffer streams
        int numExhaustedBufferStreams = streamInfoVec.size();
        int[] exhaustedBufferStreamIdxs = new int[numExhaustedBufferStreams];
        for (int i = 0; i < numExhaustedBufferStreams; ++i)
            exhaustedBufferStreamIdxs[i] = i;
        while (!loader.isDone()) {
            //Add data for streams that were exhausted in the previous round
            for (int i = 0; i < numExhaustedBufferStreams; ++i) {
                VCFFileStreamInfo currInfo = streamInfoVec.get(exhaustedBufferStreamIdxs[i]);
                boolean added = true;
                while (added && (currInfo.mIterator.hasNext() || currInfo.mNextVC != null)) {
                    if (currInfo.mNextVC != null)
                        added = loader.add(currInfo.mNextVC, currInfo.mStreamIdx);
                    if (added)
                        if (currInfo.mIterator.hasNext())
                            currInfo.mNextVC = currInfo.mIterator.next();
                        else
                            currInfo.mNextVC = null;
                }
            }
            loader.importBatch();
            numExhaustedBufferStreams = (int) loader.getNumExhaustedBufferStreams();
            for (int i = 0; i < numExhaustedBufferStreams; ++i)
                exhaustedBufferStreamIdxs[i] = loader.getExhaustedBufferStreamIndex(i);
        }
    }
}

From source file:com.chargebee.JDBC.PhoneBook.Phnbk.java

public static void main(String[] args) throws IOException, Exception {
    String source = System.getProperty("user.home") + "/input.csv";
    com.chargebee.JDBC.PhoneBook.Phnbk pb = new com.chargebee.JDBC.PhoneBook.Phnbk();
    CSVParser parser = new CSVParser(new FileReader(source), CSVFormat.EXCEL.withHeader());
    pb.directory(parser);/*from  ww  w  .j av  a 2s  . co m*/
    parser.close();

}