Example usage for java.io PrintStream close

List of usage examples for java.io PrintStream close

Introduction

In this page you can find the example usage for java.io PrintStream close.

Prototype

public void close() 

Source Link

Document

Closes the stream.

Usage

From source file:iDynoOptimizer.MOEAFramework26.src.org.moeaframework.Analyzer.java

/**
 * Saves the analysis of all data recorded in this analyzer to the
 * specified file./*from   w w  w.  j  a v  a2s . c om*/
 * 
 * @param file the file to which the analysis is saved
 * @return a reference to this analyzer
 * @throws IOException if an I/O error occurred
 */
public Analyzer saveAnalysis(File file) throws IOException {
    PrintStream ps = null;

    try {
        ps = new PrintStream(new BufferedOutputStream(new FileOutputStream(file)));

        printAnalysis(ps);
    } finally {
        if (ps != null) {
            ps.close();
        }
    }

    return this;
}

From source file:fr.inrialpes.exmo.align.cli.GroupEval.java

/**
 * This does not only print the results but compute the average as well
 *
 * @param result: the vector of vector result to be printed
 *//* w  w  w  .  j  a v  a  2  s  . com*/
public void print(Vector<Vector<Object>> result) {
    PrintStream writer = null;
    if (outputfilename == null) {
        writer = System.out;
    } else {
        try {
            writer = new PrintStream(new FileOutputStream(outputfilename));
        } catch (FileNotFoundException fnfex) {
            logger.error("Cannot open file", fnfex);
            return;
        }
    }
    try {
        if (type.equals("html"))
            printHTML(result, writer);
        else if (type.equals("tex"))
            printLATEX(result, writer);
        else if (type.equals("triangle"))
            printTRIANGLE(result, writer);
    } finally {
        writer.close();
    }
}

From source file:com.crawljax.plugins.aji.executiontracer.JSExecutionTracer.java

@Override
public void postCrawling(CrawlSession session) {
    try {//from  ww  w.  j  a va 2s  .co  m
        PrintStream output = new PrintStream(getOutputFolder() + getAssertionFilename());

        /* save the current System.out for later usage */
        PrintStream oldOut = System.out;
        /* redirect it to the file */
        System.setOut(output);

        /* don't print all the useless stuff */
        Daikon.dkconfig_quiet = true;
        Daikon.noversion_output = true;

        List<String> arguments = allTraceFiles();

        /*
         * TODO: Frank, fix this hack (it is done because of Daikon calling cleanup before init)
         */
        arguments.add("-o");
        arguments.add(getOutputFolder() + "daikon.inv.gz");
        arguments.add("--format");
        arguments.add("javascript");
        arguments.add("--config_option");
        arguments.add("daikon.FileIO.unmatched_procedure_entries_quiet=true");
        arguments.add("--config_option");
        arguments.add("daikon.FileIO.ignore_missing_enter=true");

        /* start daikon */
        Daikon.mainHelper(arguments.toArray(new String[0]));

        /* Restore the old system.out */
        System.setOut(oldOut);

        /* close the output file */
        output.close();
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:edu.msu.cme.rdp.classifier.train.validation.distance.PairwiseSeqDistance.java

public void printSummary(PrintStream outStream) {
    HashMap<String, ArrayList<Double>> rankDistanceMap = new HashMap<String, ArrayList<Double>>();
    outStream.println("\nrank\ttaxonname\ttotalcount\tmean_distance\tstdev");

    for (Taxonomy taxon : distanceMap.keySet()) {
        StdevCal.Std result = StdevCal.calStd(distanceMap.get(taxon));
        outStream.println(taxon.getHierLevel() + "\t" + taxon.getName() + "\t" + result.getTotalCount() + "\t"
                + String.format("%.3f", result.getMean()) + "\t" + String.format("%.3f", result.getStdev()));
        ArrayList<Double> distList = rankDistanceMap.get(taxon.getHierLevel());
        if (distList == null) {
            distList = new ArrayList<Double>();
            distList.addAll(distanceMap.get(taxon));
            rankDistanceMap.put(taxon.getHierLevel(), distList);
        } else {//w ww. ja v  a  2  s  . c o m
            distList.addAll(distanceMap.get(taxon));
        }
    }

    outStream.println("\nrank\ttotalcount\tmean_distance\tstdev");
    for (String rank : rankDistanceMap.keySet()) {
        StdevCal.Std result = StdevCal.calStd(rankDistanceMap.get(rank));
        outStream.println(rank + "\t" + result.getTotalCount() + "\t" + String.format("%.3f", result.getMean())
                + "\t" + String.format("%.3f", result.getStdev()));

    }
    outStream.close();
}

From source file:org.apache.hadoop.tools.rumen.TestRumenJobTraces.java

@Test
public void testHadoop20JHParser() throws Exception {
    // Disabled/*from   w w w.j  ava  2 s .c o  m*/
    if (true)
        return;

    final Configuration conf = new Configuration();
    final FileSystem lfs = FileSystem.getLocal(conf);

    boolean success = false;

    final Path rootInputDir = new Path(System.getProperty("test.tools.input.dir", "")).makeQualified(lfs);
    final Path rootTempDir = new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(lfs);

    final Path rootInputPath = new Path(rootInputDir, "rumen/small-trace-test");
    final Path tempDir = new Path(rootTempDir, "TestHadoop20JHParser");
    lfs.delete(tempDir, true);

    final Path inputPath = new Path(rootInputPath, "v20-single-input-log.gz");
    final Path goldPath = new Path(rootInputPath, "v20-single-input-log-event-classes.text.gz");

    InputStream inputLogStream = new PossiblyDecompressedInputStream(inputPath, conf);

    InputStream inputGoldStream = new PossiblyDecompressedInputStream(goldPath, conf);

    BufferedInputStream bis = new BufferedInputStream(inputLogStream);
    bis.mark(10000);
    Hadoop20JHParser parser = new Hadoop20JHParser(bis);

    final Path resultPath = new Path(tempDir, "result.text");

    System.out.println("testHadoop20JHParser sent its output to " + resultPath);

    Compressor compressor;

    FileSystem fs = resultPath.getFileSystem(conf);
    CompressionCodec codec = new CompressionCodecFactory(conf).getCodec(resultPath);
    OutputStream output;
    if (codec != null) {
        compressor = CodecPool.getCompressor(codec);
        output = codec.createOutputStream(fs.create(resultPath), compressor);
    } else {
        output = fs.create(resultPath);
    }

    PrintStream printStream = new PrintStream(output);

    try {
        assertEquals("Hadoop20JHParser can't parse the test file", true,
                Hadoop20JHParser.canParse(inputLogStream));

        bis.reset();

        HistoryEvent event = parser.nextEvent();

        while (event != null) {
            printStream.println(event.getClass().getCanonicalName());
            event = parser.nextEvent();
        }

        printStream.close();

        LineReader goldLines = new LineReader(inputGoldStream);
        LineReader resultLines = new LineReader(new PossiblyDecompressedInputStream(resultPath, conf));

        int lineNumber = 1;

        try {
            Text goldLine = new Text();
            Text resultLine = new Text();

            int goldRead = goldLines.readLine(goldLine);
            int resultRead = resultLines.readLine(resultLine);

            while (goldRead * resultRead != 0) {
                if (!goldLine.equals(resultLine)) {
                    assertEquals("Type mismatch detected", goldLine, resultLine);
                    break;
                }

                goldRead = goldLines.readLine(goldLine);
                resultRead = resultLines.readLine(resultLine);

                ++lineNumber;
            }

            if (goldRead != resultRead) {
                assertEquals("the " + (goldRead > resultRead ? "gold" : resultRead)
                        + " file contains more text at line " + lineNumber, goldRead, resultRead);
            }

            success = true;
        } finally {
            goldLines.close();
            resultLines.close();

            if (success) {
                lfs.delete(resultPath, false);
            }
        }

    } finally {
        if (parser == null) {
            inputLogStream.close();
        } else {
            if (parser != null) {
                parser.close();
            }
        }

        if (inputGoldStream != null) {
            inputGoldStream.close();
        }

        // it's okay to do this twice [if we get an error on input]
        printStream.close();
    }
}

From source file:org.apache.accumulo.examples.simple.mapreduce.bulk.BulkIngestExample.java

public int run(String[] args) {
    if (args.length != 7) {
        System.out.println("ERROR: Wrong number of parameters: " + args.length + " instead of 7.");
        return printUsage();
    }//from w w w. j av a2s  . co m

    Configuration conf = getConf();
    PrintStream out = null;
    try {
        Job job = new Job(conf, "bulk ingest example");
        job.setJarByClass(this.getClass());

        job.setInputFormatClass(TextInputFormat.class);

        job.setMapperClass(MapClass.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        job.setReducerClass(ReduceClass.class);
        job.setOutputFormatClass(AccumuloFileOutputFormat.class);

        Instance instance = new ZooKeeperInstance(args[0], args[1]);
        String user = args[2];
        byte[] pass = args[3].getBytes();
        String tableName = args[4];
        String inputDir = args[5];
        String workDir = args[6];

        Connector connector = instance.getConnector(user, pass);

        TextInputFormat.setInputPaths(job, new Path(inputDir));
        AccumuloFileOutputFormat.setOutputPath(job, new Path(workDir + "/files"));

        FileSystem fs = FileSystem.get(conf);
        out = new PrintStream(new BufferedOutputStream(fs.create(new Path(workDir + "/splits.txt"))));

        Collection<Text> splits = connector.tableOperations().getSplits(tableName, 100);
        for (Text split : splits)
            out.println(new String(Base64.encodeBase64(TextUtil.getBytes(split))));

        job.setNumReduceTasks(splits.size() + 1);
        out.close();

        job.setPartitionerClass(RangePartitioner.class);
        RangePartitioner.setSplitFile(job, workDir + "/splits.txt");

        job.waitForCompletion(true);
        Path failures = new Path(workDir, "failures");
        fs.delete(failures, true);
        fs.mkdirs(new Path(workDir, "failures"));
        connector.tableOperations().importDirectory(tableName, workDir + "/files", workDir + "/failures",
                false);

    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (out != null)
            out.close();
    }

    return 0;
}

From source file:iDynoOptimizer.MOEAFramework26.src.org.moeaframework.analysis.sensitivity.ResultFileInfo.java

@Override
public void run(CommandLine commandLine) throws Exception {
    Problem problem = null;//from   ww w  . j  a  v  a  2  s  . c o m
    PrintStream output = null;
    ResultFileReader reader = null;

    try {
        // setup the problem
        if (commandLine.hasOption("problem")) {
            problem = ProblemFactory.getInstance().getProblem(commandLine.getOptionValue("problem"));
        } else {
            problem = new ProblemStub(Integer.parseInt(commandLine.getOptionValue("dimension")));
        }

        try {
            // setup the output stream
            if (commandLine.hasOption("output")) {
                output = new PrintStream(new File(commandLine.getOptionValue("output")));
            } else {
                output = System.out;
            }

            // display info for all result files
            for (String filename : commandLine.getArgs()) {
                try {
                    int count = 0;
                    reader = new ResultFileReader(problem, new File(filename));

                    while (reader.hasNext()) {
                        reader.next();
                        count++;
                    }

                    output.println(filename + " " + count);
                } finally {
                    if (reader != null) {
                        reader.close();
                    }
                }
            }
        } finally {
            if ((output != null) && (output != System.out)) {
                output.close();
            }
        }
    } finally {
        if (problem != null) {
            problem.close();
        }
    }
}

From source file:com.lewa.crazychapter11.MainActivity.java

private void write(String content) {
    try {//from ww w .ja  v a2  s.c  o  m
        /* 1/MODE_APPEND ?? 2?MODE_PRIVATE  */
        FileOutputStream fos = openFileOutput(FILE_NAME, MODE_PRIVATE);// MODE_APPEND
        PrintStream ps = new PrintStream(fos);
        ps.println(content);
        ps.close();

    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:com.yahoo.ycsb.bulk.hbase.BulkDataGeneratorJob.java

int createSplitsFile(Configuration conf, String splitsFile) throws IOException, InvalidInputException {
    int splitCount = conf.getInt(ARG_KEY_SPLIT_COUNT, 0);

    if (splitCount <= 0) {
        throw new InvalidInputException(
                "Invalid or unspecified split count:" + splitCount + "\nSpecify it in: " + ARG_KEY_SPLIT_COUNT);
    }//from  www . j  a  v a 2s  .co  m

    String rowPrefix = conf.get(ARG_KEY_ROW_PREFIX, "row");
    String rowFormat = DataGenerator.getKeyFormat(rowPrefix);
    boolean hashKeys = conf.getBoolean(ARG_KEY_HASH_KEYS, false);
    long start = conf.getInt(ARG_KEY_RANGE_START, 0);
    long end = conf.getInt(ARG_KEY_RANGE_END, 0);

    FileSystem fs = FileSystem.get(conf);
    Path splitsPath = new Path(splitsFile);
    Path plainPath = new Path(splitsFile + "-debug");
    PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(splitsPath)));
    PrintStream plain = new PrintStream(new BufferedOutputStream(fs.create(plainPath)));

    if (hashKeys) {
        start = conf.getInt(ARG_KEY_HASHED_RANGE_START, 0);
        end = conf.getInt(ARG_KEY_HASHED_RANGE_END, Integer.MAX_VALUE);
    }

    long rangeSize = Math.max(1, (end - start + 1) / (splitCount + 1));
    long rangeStart = start + rangeSize;

    System.err.println("Generating splits file: " + splitsFile + "\nrangeStart:" + rangeStart + "\nrangeSize: "
            + rangeSize + "\nsplitCount: " + splitCount + "\nrangeEnd: " + end);

    int i = 0;
    try {
        while (rangeStart < end && splitCount > 0) {
            out.println(new String(Base64.encodeBase64(String.format(rowFormat, rangeStart).getBytes())));
            plain.println(String.format(rowFormat, rangeStart));
            rangeStart += rangeSize;
            splitCount--;
            i++;
        }
    } finally {
        out.close();
        plain.close();
    }
    System.err.println("Splits created: " + i);
    return i;
}

From source file:cz.zcu.kiv.eegdatabase.logic.csv.SimpleCSVFactory.java

/**
 * Generating csv file from experiments/*  w w  w .  j  a v a 2 s  .com*/
 *
 * @return csv file with experiments
 * @throws IOException - error writing to stream
 */
@Transactional(readOnly = true)
public OutputStream generateExperimentsCsvFile() throws IOException {
    log.debug("Generating csv file from experiments");
    log.debug("Creating output stream");
    String usedHardware = "";
    OutputStream out = new ByteArrayOutputStream();
    PrintStream printStream = new PrintStream(out);
    log.debug("Loading all experiments");
    List<Experiment> experimentList = experimentDao.getAllRecords();
    log.debug("Creating table header");
    printStream.println(CSVUtils.EXPERIMENT_SUBJECT + CSVUtils.SEMICOLON + CSVUtils.EXPERIMENT_GENDER
            + CSVUtils.SEMICOLON + CSVUtils.EXPERIMENT_YEAR_OF_BIRTH + CSVUtils.SEMICOLON
            + CSVUtils.SCENARIO_TITLE + CSVUtils.SEMICOLON + CSVUtils.EXPERIMENT_USED_HARDWARE
            + CSVUtils.SEMICOLON + CSVUtils.EXPERIMENT_DETAILS);
    log.debug("Printing experiments to outputstream");
    for (int i = 0; i < experimentList.size(); i++) {
        usedHardware = getHardware(experimentList.get(i).getHardwares());
        printStream.println(CSVUtils.EEGBASE_SUBJECT_PERSON
                + experimentList.get(i).getPersonBySubjectPersonId().getPersonId() + CSVUtils.SEMICOLON
                + experimentList.get(i).getPersonBySubjectPersonId().getGender() + CSVUtils.SEMICOLON
                + getDateOfBirth(experimentList.get(i).getPersonBySubjectPersonId().getDateOfBirth())
                + CSVUtils.SEMICOLON + experimentList.get(i).getScenario().getTitle() + CSVUtils.SEMICOLON
                + usedHardware + CSVUtils.SEMICOLON + CSVUtils.PROTOCOL_HTTP + domain + CSVUtils.EXPERIMENT_URL
                + experimentList.get(i).getExperimentId());
    }
    log.debug("Close printing stream");
    printStream.close();

    return out;
}