Example usage for java.io PrintStream close

List of usage examples for java.io PrintStream close

Introduction

In this page you can find the example usage for java.io PrintStream close.

Prototype

public void close() 

Source Link

Document

Closes the stream.

Usage

From source file:com.mtnfog.idyl.e3.sdk.IdylE3StreamingClient.java

@Override
public EntityExtractionResponse stream(final String text) throws IOException {

    if (socket.isClosed()) {
        throw new IllegalStateException("The socket is closed.");
    }//from   www  .ja  v  a2 s  . c o m

    PrintStream out = new PrintStream(socket.getOutputStream());
    BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream()));

    out.print(text);

    String json = in.readLine();

    System.out.println(json);

    in.close();
    out.close();

    return gson.fromJson(json, EntityExtractionResponse.class);

}

From source file:fr.cs.examples.propagation.VisibilityCircle.java

private void run(final File input, final File output, final String separator)
        throws IOException, IllegalArgumentException, OrekitException {

    // read input parameters
    KeyValueFileParser<ParameterKey> parser = new KeyValueFileParser<ParameterKey>(ParameterKey.class);
    parser.parseInput(new FileInputStream(input));

    double minElevation = parser.getAngle(ParameterKey.MIN_ELEVATION);
    double radius = Constants.WGS84_EARTH_EQUATORIAL_RADIUS
            + parser.getDouble(ParameterKey.SPACECRAFT_ALTITUDE);
    int points = parser.getInt(ParameterKey.POINTS_NUMBER);

    // station properties
    double latitude = parser.getAngle(ParameterKey.STATION_LATITUDE);
    double longitude = parser.getAngle(ParameterKey.STATION_LONGITUDE);
    double altitude = parser.getDouble(ParameterKey.STATION_ALTITUDE);
    String name = parser.getString(ParameterKey.STATION_NAME);

    // compute visibility circle
    List<GeodeticPoint> circle = computeCircle(latitude, longitude, altitude, name, minElevation, radius,
            points);/* ww  w . j  av a 2s  .c o  m*/

    // create a 2 columns csv file representing the visibility circle
    // in the user home directory, with latitude in column 1 and longitude in column 2
    DecimalFormat format = new DecimalFormat("#00.00000", new DecimalFormatSymbols(Locale.US));
    PrintStream csvFile = new PrintStream(output);
    for (GeodeticPoint p : circle) {
        csvFile.println(format.format(FastMath.toDegrees(p.getLatitude())) + ","
                + format.format(FastMath.toDegrees(p.getLongitude())));
    }
    csvFile.close();

}

From source file:net.spfbl.core.Server.java

private static void log(long time, Core.Level level, String type, Throwable ex) {
    if (ex != null) {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        PrintStream printStream = new PrintStream(baos);
        ex.printStackTrace(printStream);
        printStream.close();
        log(time, level, type, baos.toString(), (String) null);
    }//  w  ww  . j  a  v  a2  s . co m
}

From source file:edu.oregonstate.eecs.mcplan.experiments.PolicyComparison.java

@Override
public void finish() {
    try {/*from   www  .  j  a  va  2s.c  o  m*/
        final PrintStream rout = new PrintStream(new File(env_.root_directory, "result.csv"));
        rout.println("key,value");
        rout.println("winner," + end_state.win);
        rout.println("score," + end_state.score);
        rout.close();
    } catch (final FileNotFoundException ex) {
        throw new RuntimeException(ex);
    }
}

From source file:com.meetingninja.csse.database.TaskDatabaseAdapter.java

public static Task createTask(Task t) throws IOException {
    String _url = getBaseUri().build().toString();
    URL url = new URL(_url);
    HttpURLConnection conn = (HttpURLConnection) url.openConnection();

    // add request header
    conn.setRequestMethod(IRequest.POST);
    addRequestHeader(conn, false);/*  w  ww  .j av a2  s.  co  m*/
    ByteArrayOutputStream json = new ByteArrayOutputStream();
    // this type of print stream allows us to get a string easily
    PrintStream ps = new PrintStream(json);
    // Create a generator to build the JSON string
    JsonGenerator jgen = JFACTORY.createGenerator(ps, JsonEncoding.UTF8);
    // Build JSON Object for Title
    jgen.writeStartObject();
    jgen.writeStringField(Keys.Task.TITLE, t.getTitle());
    jgen.writeStringField(Keys.Task.COMPLETED, Boolean.toString(t.getIsCompleted()));
    jgen.writeStringField(Keys.Task.DESC, t.getDescription());
    jgen.writeStringField(Keys.Task.DEADLINE, Long.toString(t.getEndTimeInMillis()));
    jgen.writeStringField(Keys.Task.DATE_CREATED, t.getDateCreated());
    jgen.writeStringField(Keys.Task.DATE_ASSIGNED, t.getDateAssigned());
    jgen.writeStringField(Keys.Task.CRITERIA, t.getCompletionCriteria());
    jgen.writeStringField(Keys.Task.ASSIGNED_TO, t.getAssignedTo());
    jgen.writeStringField(Keys.Task.ASSIGNED_FROM, t.getAssignedFrom());
    jgen.writeStringField(Keys.Task.CREATED_BY, t.getCreatedBy());
    jgen.writeEndObject();
    jgen.close();

    String payload = json.toString("UTF8");
    ps.close();
    // Get server response
    sendPostPayload(conn, payload);
    String response = getServerResponse(conn);
    Map<String, String> responseMap = new HashMap<String, String>();
    if (responseMap.containsKey(Keys.Task.ID)) {
        t.setID(responseMap.get(Keys.Task.ID));
    }
    return t;
}

From source file:com.adaptris.util.stream.Slf4jLoggingOutputStreamTest.java

@Test
public void testLogGreaterThanBuffer() throws Exception {
    PrintStream out = new PrintStream(new Slf4jLoggingOutputStream(LogLevel.INFO));
    StringBuffer sb = new StringBuffer();
    while (sb.length() < 2048) {
        sb.append(TEXT);//  w  ww . j ava  2 s. c  o  m
    }
    out.println(sb.toString());
    out.flush();
    out.close();
}

From source file:com.facebook.stetho.dumpapp.RawDumpappHandler.java

@Override
protected HttpEntity getResponseEntity(HttpRequest request, InputStream bufferedInput, HttpResponse response)
        throws IOException {
    ByteArrayOutputStream stdoutBuffer = new ByteArrayOutputStream();

    try {/* w  w w  .  j  av a  2s .c  o  m*/
        PrintStream stdout = new PrintStream(stdoutBuffer);
        try {
            ByteArrayOutputStream stderrBuffer = new ByteArrayOutputStream();
            PrintStream stderr = new PrintStream(stderrBuffer);

            try {
                int exitCode = getDumper().dump(bufferedInput, stdout, stderr, getArgs(request));
                response.addHeader(RESPONSE_HEADER_EXIT_CODE, String.valueOf(exitCode));
            } finally {
                stderr.close();
                if (stderrBuffer.size() > 0) {
                    System.err.write(stderrBuffer.toByteArray());
                }
            }
        } finally {
            stdout.close();
        }
    } finally {
        bufferedInput.close();
    }

    return createResponseEntity(stdoutBuffer.toByteArray());
}

From source file:edu.toronto.cs.ontools.taxonomy.AbstractTaxonomy.java

public void display(File out) {
    try {/* w ww . j a v  a2  s .  c  o m*/
        PrintStream p = new PrintStream(out);
        display(p);
        p.close();
    } catch (FileNotFoundException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        display(System.out);
    }
}

From source file:org.apache.hadoop.fs.dfsioe.TestDFSIOEnh.java

protected static void runAnalyse(FileSystem fs, Configuration fsConfig, int testType, long execTime,
        String resFileName, int nrFiles, long fileSize, long tStart, int plotInterval, long sampleUnit,
        int threshold, String tputResFileName, boolean tputReportEach, boolean tputReportTotal)
        throws IOException {
    long t1 = System.currentTimeMillis();
    Path reduceFile;/* w  ww . j  a  v  a 2s  .c om*/
    if (testType == TEST_TYPE_WRITE)
        reduceFile = new Path(DfsioeConfig.getInstance().getWriteDir(fsConfig), "part-00000");
    else
        reduceFile = new Path(DfsioeConfig.getInstance().getReadDir(fsConfig), "part-00000");

    int maxslot = (int) (execTime / plotInterval) + 1;
    int[] concurrency = new int[maxslot + 1];
    double[] bytesTotal = new double[maxslot + 1];
    for (int i = 0; i < maxslot + 1; i++) {
        bytesTotal[i] = 0;
        concurrency[i] = 0;
    }

    BufferedReader rd = null;
    long tasks = 0;
    long size = 0;
    long time = 0;
    float rate = 0;
    float sqrate = 0;
    float loggingTime = 0;
    try {
        rd = new BufferedReader(new InputStreamReader(new DataInputStream(fs.open(reduceFile))));
        String s = null;
        while ((s = rd.readLine()) != null) {
            StringTokenizer tokens = new StringTokenizer(s, " \t\n\r\f%");
            String lable = tokens.nextToken();
            if (lable.endsWith(":tasks")) {
                tasks = Long.parseLong(tokens.nextToken());
            } else if (lable.endsWith(":size")) {
                size = Long.parseLong(tokens.nextToken());
            } else if (lable.endsWith(":time")) {
                time = Long.parseLong(tokens.nextToken());
            } else if (lable.endsWith(":rate")) {
                rate = Float.parseFloat(tokens.nextToken());
            } else if (lable.endsWith(":sqrate")) {
                sqrate = Float.parseFloat(tokens.nextToken());
            } else if (lable.endsWith(":io_start_end")) {
                String[] t = tokens.nextToken().split(";");
                int start = (int) ((Long.parseLong(t[0]) - tStart) / plotInterval) + 1;
                int end = (int) ((Long.parseLong(t[1]) - tStart) / plotInterval) - 1;
                if (start < 0)
                    start = 0;
                for (int i = start; i <= end; i++) {
                    if (i > concurrency.length - 1)
                        break;
                    concurrency[i]++;
                }
            } else if (lable.endsWith(":logging_time")) {
                loggingTime = Float.parseFloat(tokens.nextToken());
            } else if (lable.endsWith(":tput_samples")) {
                break;
            }
        }
    } finally {
        rd.close();
    }
    double med = rate / 1000 / tasks;
    double stdDev = Math.sqrt(Math.abs(sqrate / 1000 / tasks - med * med));
    String resultLines[] = { "----- TestDFSIO ----- : "
            + ((testType == TEST_TYPE_WRITE) ? "write" : (testType == TEST_TYPE_READ) ? "read" : "unknown"),
            "           Date & time: " + new Date(System.currentTimeMillis()),
            "       Number of files: " + tasks, "Total MBytes processed: " + size / MEGA,
            "     Throughput mb/sec: " + size * 1000.0 / (time * MEGA), "Average IO rate mb/sec: " + med,
            " IO rate std deviation: " + stdDev, "    Test exec time sec: " + (float) execTime / 1000, "" };
    String enhResultLines[] = { "-- Extended Metrics --   : "
            + ((testType == TEST_TYPE_WRITE) ? "write" : (testType == TEST_TYPE_READ) ? "read" : "unknown"),
            "Result file name         : " + tputResFileName,
            "Sampling overhead        : " + (loggingTime / time) * 100 + "%",
            "Reference Start Time     : " + String.valueOf(tStart) };

    PrintStream res = new PrintStream(new FileOutputStream(new File(resFileName), true));
    for (int i = 0; i < resultLines.length; i++) {
        LOG.info(resultLines[i]);
        res.println(resultLines[i]);
    }
    for (int i = 0; i < enhResultLines.length; i++) {
        LOG.info(enhResultLines[i]);
        res.println(enhResultLines[i]);
    }

    try {
        fs.delete(DfsioeConfig.getInstance().getReportDir(fsConfig), true);
        //set up env
        Configuration conf2 = new Configuration(fsConfig);
        conf2.setLong("ana_tStart", tStart);
        conf2.setInt("ana_plotInterval", plotInterval);
        conf2.setLong("ana_sampleUnit", sampleUnit);
        conf2.setLong("ana_execTime", execTime);
        conf2.setLong("ana_fileSize", fileSize);

        Job job = new Job(conf2, "Result Analyzer");
        job.setJarByClass(Analyzer.class);
        job.setMapperClass(_Mapper.class);
        job.setReducerClass(_Reducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        //          job.setNumReduceTasks(1);
        org.apache.hadoop.mapreduce.lib.input.FileInputFormat.addInputPath(job, reduceFile);
        org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job,
                DfsioeConfig.getInstance().getReportDir(fsConfig));
        job.waitForCompletion(true);
    } catch (InterruptedException e) {
        e.printStackTrace();
    } catch (ClassNotFoundException e) {
        e.printStackTrace();
    } finally {
        fs.delete(DfsioeConfig.getInstance().getReportTmp(fsConfig), true);
        FileUtil.copyMerge(fs, DfsioeConfig.getInstance().getReportDir(fsConfig), fs,
                DfsioeConfig.getInstance().getReportTmp(fsConfig), false, fsConfig, null);
        LOG.info("remote report file " + DfsioeConfig.getInstance().getReportTmp(fsConfig) + " merged.");
        BufferedReader lines = new BufferedReader(new InputStreamReader(
                new DataInputStream(fs.open(DfsioeConfig.getInstance().getReportTmp(fsConfig)))));
        String line = null;
        while ((line = lines.readLine()) != null) {
            StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%");
            tokens.nextToken();
            String ss = tokens.nextToken();
            String[] str = ss.split(",");
            int idx = Integer.parseInt(str[0]);
            double val = Double.parseDouble(str[1]);
            assert idx <= maxslot;
            bytesTotal[idx] += val;
        }
        lines.close();
        if (tputReportEach) {
            FileUtil.copy(fs, DfsioeConfig.getInstance().getReportTmp(fsConfig),
                    new File(tputResFileName.split(".")[0] + "test_io_.csv"), false, fsConfig);
            LOG.info("*test_io_.csv fetched to local fs.");
        }
        //calculate the aggregated throughput
        double[] bytesChanged = new double[maxslot + 1];
        for (int i = 0; i <= maxslot - 1; i++)
            bytesChanged[i] = bytesTotal[i + 1] - bytesTotal[i];
        bytesChanged[maxslot] = 0;

        if (tputReportTotal) {
            PrintStream res2 = new PrintStream(new FileOutputStream(new File(tputResFileName), true));
            for (int ri = 0; ri <= maxslot; ri++)
                res2.println(ri + "," + bytesTotal[ri] + "," + bytesChanged[ri]);
            res2.close();
        }
        String unit = "";
        if (sampleUnit == KILO)
            unit = "kb";
        else if (sampleUnit == MEGA)
            unit = "mb";
        else if (sampleUnit == 1)
            unit = "b";
        else if (sampleUnit == GIGA)
            unit = "gb";

        String[] tputResultLines = calcSummary(bytesChanged, concurrency, threshold, unit);
        for (int j = 0; j < tputResultLines.length; j++) {
            LOG.info(tputResultLines[j]);
            res.println(tputResultLines[j]);
        }
    }
    res.println("\n-- Result Analyse -- : " + ((System.currentTimeMillis() - t1) / 1000) + "s");
    res.close();
}

From source file:gov.llnl.ontology.mapreduce.stats.WordnetShortestPathMR.java

/**
 * {@inheritDoc}//from   w  ww . j  a  v a 2 s .  c  om
 */
public int run(String[] args) throws Exception {
    // Setup and valdiate the arguments.
    ArgOptions options = new ArgOptions();
    options.addOption('w', "wordnetDir", "The directory path to the wordnet data files", true, "PATH",
            "Required");

    options.parseOptions(args);
    if (!options.hasOption('w')) {
        System.err.println("usage: java WordnetShortestPathMR [OPTIONS] <outdir>\n" + options.prettyPrint());
    }

    // Open the wordnet reader and gather the set of all Synsets known by
    // the ontology.
    OntologyReader reader = WordNetCorpusReader.initialize(options.getStringOption('w'));
    Set<Synset> synsetSet = new HashSet<Synset>();
    for (String lemma : reader.wordnetTerms())
        for (Synset synset : reader.getSynsets(lemma))
            synsetSet.add(synset);

    // Compute each pairing of Synsets and write that pairing to a file in
    // HDFS.
    Synset[] synsets = synsetSet.toArray(new Synset[0]);
    PrintStream outStream = createPrintStream();
    for (int i = 0; i < synsets.length; ++i)
        for (int j = i + 1; j < synsets.length; ++j)
            outStream.printf("%s|%s\n", synsets[i].getName(), synsets[j].getName());
    outStream.close();

    // Store the wordnet directory information so that the mappers can load
    // it up.  They need it to figure out the shortest path information.
    Configuration conf = getConf();
    conf.set(WORDNET, options.getStringOption('w'));

    // Setup the job information.
    Job job = new Job(conf, "Compute Wordnet Shortest Paths");
    job.setJarByClass(WordnetShortestPathMR.class);

    job.setMapperClass(WordnetShortestPathMapper.class);

    // The input file will be the temporary file created with the synset
    // pairings.
    job.setInputFormatClass(LineDocInputFormat.class);
    FileInputFormat.addInputPath(job, new Path(TEMP_TERM_PAIR_PATH));

    // The mappers do all of the real work, so we just write their output
    // straight to disk.
    job.setCombinerClass(Reducer.class);
    job.setReducerClass(Reducer.class);
    job.setOutputFormatClass(TextOutputFormat.class);
    TextOutputFormat.setOutputPath(job, new Path(options.getPositionalArg(0)));

    // Start the job.
    job.waitForCompletion(true);

    return 0;
}