Example usage for java.io BufferedWriter write

List of usage examples for java.io BufferedWriter write

Introduction

In this page you can find the example usage for java.io BufferedWriter write.

Prototype

public void write(int c) throws IOException 

Source Link

Document

Writes a single character.

Usage

From source file:com.depas.utils.FileUtils.java

public static void writeTextToFile(String fileName, String contents) throws IOException {
    BufferedWriter out = null;
    try {//from   ww  w. jav  a  2 s .c o  m
        File outFile = new File(fileName);
        out = new BufferedWriter(new FileWriter(outFile));
        out.write(contents);
        out.close();
        out = null;
    } finally {
        if (out != null) {
            out.close();
        }
    }
}

From source file:com.hp.test.framework.generatejellytess.GenerateJellyTests.java

public static void maptestcasesapi(String GID, String Testcase, int expresults, String Model_xml_path)
        throws IOException, ClassNotFoundException {
    Map<String, String> Locators1;
    Map<String, Map<String, String>> mapping_list = new HashMap<>();
    BufferedWriter fw;
    File f = null;/*  ww  w . ja  v  a2  s .  c  om*/
    f = File.createTempFile("tmp", ".xml", new File(mp.getProperty("TEMP_LOCATION")));

    //        String update_exp_results_temp = mp.getProperty("UPDATE_RESULTS");
    //
    //        boolean update_exp_results = false;
    //        if (update_exp_results_temp.equals("yes")) {
    //            update_exp_results = true;
    //
    //        }
    fw = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(f), "UTF-8"));
    fw.write("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>");
    fw.write(Testcase);
    fw.close();

    String path = f.getAbsolutePath();
    Locators.clear();
    Locators1 = GetLocators(path);
    // f.delete();
    for (String key : Locators1.keySet()) {
        //  System.out.println("key" + key);
        String temp_ar[] = key.split("_");
        String parent = temp_ar[0];
        String child = temp_ar[1];
        String UI_object = temp_ar[2];

        if (!mapping_list.containsKey(parent)) {
            // System.out.println("parent"+parent);
            Map<String, String> innerMap = mapping_list.get(key);
            if (innerMap == null) {
                mapping_list.put(parent, innerMap = new HashMap<>()); // Java version >= 1.7
            }
            innerMap.put(child + ":" + UI_object, Locators1.get(key));

            mapping_list.put(parent, innerMap);
        } else {
            Map<String, String> innerMap = mapping_list.get(parent);
            innerMap.put(child + ":" + UI_object, Locators1.get(key));
            mapping_list.put(parent, innerMap);
            //  mapping_list.put(parent, mapping_list.get(parent)+ "^"+child +":"+ UI_object + ":"+ Locators1.get(key));
        }
    }

    Locators.clear();
    //Map<String, String> mapping_api_list = GetLocators(mp.getProperty("UI_API_MAPPING_XML_PATH"));
    // Generating jelly scripts
    //        String generate_jelly_tests = mp.getProperty("GENERATE_JELLY_TESTS").toLowerCase();

    //   ArrayList order_list = GetRootnodes.GetrootLocators(mp.getProperty("MODEL_XML_PATH"));---Remove
    ArrayList order_list = GetRootnodes.GetrootLocators(Model_xml_path);

    try {

        String jellyFile = temp_jelly_Tests_location + "\\" + GID + "_" + glb_Feature_Name + ".xml";// mp.getProperty("JELLY_TESTS_LOCATION") + "Jelly_" + model_name + "_GID" + GID;
        String[] temp_ar = GID.split("_");
        int TestCase_GID = Integer.valueOf(temp_ar[3]);
        CreateJellyTestCase.createUITests(TestCase_GID, order_list, mapping_list, jellyFile,
                mp.getProperty("OBJECT_REPOSITORY_XML_PATH"));
        CreateReadableTestCase.createRedableTests(order_list, mapping_list,
                mp.getProperty("OBJECT_REPOSITORY_XML_PATH"));

    } catch (SQLException e) {
        log.error("Exception in updating Expected Results" + e.getMessage());
    }

    //**** end of Generating Jelly script file
}

From source file:com.idiro.utils.db.mysql.MySqlUtils.java

public static boolean changeFormatAfterExport(File in, File out, char delimiter, Collection<String> header,
        Collection<String> quotes) {
    //We expect that in is a csv file and out a file
    boolean ok = true;
    FileChecker fChIn = new FileChecker(in), fChOut = new FileChecker(out);

    if (!fChIn.isFile()) {
        logger.error(fChIn.getFilename() + " is not a directory or does not exist");
        return false;
    }// www. j a  va2 s  .c  om

    if (fChOut.exists()) {
        if (fChOut.isDirectory()) {
            logger.error(fChOut.getFilename() + " is a directory");
            return false;
        }
        logger.warn(fChOut.getFilename() + " already exists, it will be removed");
        String out_str = out.getAbsolutePath();
        out.delete();
        out = new File(out_str);
    }

    BufferedWriter bw = null;
    BufferedReader br = null;

    try {
        bw = new BufferedWriter(new FileWriter(out));

        logger.debug("read the file" + in.getAbsolutePath());
        br = new BufferedReader(new FileReader(in));
        String strLine;
        if (header != null && !header.isEmpty()) {
            Iterator<String> it = header.iterator();
            String headerLine = it.next();
            while (it.hasNext()) {
                headerLine += delimiter + it.next();
            }
            bw.write(headerLine + "\n");
        }

        //Read File Line By Line
        while ((strLine = br.readLine()) != null) {
            bw.write(DataFileUtils.addQuotesToLine(
                    DataFileUtils.getCleanLine(strLine.replace(',', delimiter), delimiter, delimiter), quotes,
                    delimiter) + "\n");
        }
        br.close();

        bw.close();
    } catch (FileNotFoundException e1) {
        logger.error(e1.getCause() + " " + e1.getMessage());
        logger.error("Fail to read " + in.getAbsolutePath());
        ok = false;
    } catch (IOException e1) {
        logger.error("Error writting, reading on the filesystem from the directory" + fChIn.getFilename()
                + " to the file " + fChOut.getFilename());
        ok = false;
    }
    if (ok) {
        in.delete();
    }
    return ok;
}

From source file:com.ing.connector.util.WStringUtil.java

public static void appendFile(String source, String filetoappend) {

    try {//ww w  .  j a va2s. com
        boolean append = true;
        File filesrc = new File(source);
        BufferedWriter output = new BufferedWriter(new FileWriter(filesrc, append));

        File ftoappend = new File(filetoappend);
        BufferedReader br = new BufferedReader(new FileReader(ftoappend));
        String line = br.readLine();

        while (line != null) {
            output.write(line);
            output.newLine();
            line = br.readLine();
        }
        output.close();
        br.close();
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

}

From source file:com.revorg.goat.IndexManager.java

/**
 * Creates documents inside of the Lucene Index
 *
 * @param writer            Index Writer Class
 * @param rs                Result Set for the row of data
 * @param columnNamesArray  The array of column names to be added to the document
 * @param indexTypeArray    The array of column types to be added to the document
 * @param tempHTMLDir       The temporary HTML directory for HTML publishing
 * @throws Exception//from w ww. j a v a2  s  . c  o m
 * @return ActionResult
 */
private static String createDocument(IndexWriter writer, ResultSet rs, String columnNamesArray[],
        String indexTypeArray[], String tempHTMLDir) {

    try {
        final Document doc = new Document();
        int columns = columnNamesArray.length;

        /*
        public Field(String name, String value, Field.Store store, Field.Index index)
        Store:
               COMPRESS - Store the original field value in the index in a compressed form. This is useful for long documents and for binary valued fields. 
               YES -Store the original field value in the index. This is useful for short texts like a document's title which should be displayed with the results. 
        The value is stored in its original form, i.e. no analyzer is used before it is stored. 
               NO - Do not store the field value in the index. 
                                                           
        Index:
               ANALYZED -  Index the tokens produced by running the field's value through an Analyzer. This is useful for common text
               NOT_ANALYZED - Index the field's value without using an Analyzer, so it can be searched. As no analyzer is used the value will be stored as a single term. 
        This is useful for unique Ids like product numbers.
               NO - Do not index the field value. This field can thus not be searched, but one can still access its contents provided it is stored. 
        */

        for (int i = 0; i < columns; i++) {
            String columnName = columnNamesArray[i].trim().toLowerCase();
            String columnIndexType = indexTypeArray[i]; //Map Column Type To Array
            String columnValue = rs.getString(columnName); //Get Value But Result Sets are at 1 Not 0
            if (columnValue == null) { //Lucene Does Not Like Nulls
                columnValue = "";
            }
            //System.out.println("   Values: " + columnName +  " " + columnIndexType + " " + columnValue + " " + columnValue.length());
            //Can't Add Triggers
            if (columnIndexType.equalsIgnoreCase("TriggerUpdate") == false
                    || columnIndexType.equalsIgnoreCase("TriggerDelete") == false) {
                if (columnIndexType.equalsIgnoreCase("PrimaryKey")
                        || columnIndexType.equalsIgnoreCase("Keyword")
                        || columnIndexType.equalsIgnoreCase("Date")) {
                    //Format Dates to Correct for Sorting
                    if (columnIndexType.equalsIgnoreCase("Date")) {
                        columnValue = columnValue.replace("/", "");
                    }

                    doc.add(new Field(columnName, columnValue, Field.Store.YES, Field.Index.NOT_ANALYZED));
                }
                //UnIndexed of UnIndexed
                else if (columnIndexType.equalsIgnoreCase("UnIndexed")) {
                    doc.add(new Field(columnName, columnValue, Field.Store.YES, Field.Index.NO));
                } else if (columnIndexType.equalsIgnoreCase("Text")) {
                    doc.add(new Field(columnName, columnValue, Field.Store.YES, Field.Index.ANALYZED));
                } else if (columnIndexType.equalsIgnoreCase("UnStored")
                        || columnIndexType.equalsIgnoreCase("HTML")) {
                    if (columnIndexType.equalsIgnoreCase("HTML") && columnValue.length() != 0) {
                        String htmlString = tempHTMLDir + Utilities.CreateUUID() + ".html";
                        File htmlFile = new File(htmlString);
                        BufferedWriter out = new BufferedWriter(new FileWriter(htmlString));
                        out.write(columnValue);
                        out.close();

                        //Parse Document              
                        FileInputStream fis = new FileInputStream(htmlFile);
                        HTMLParser parser = new HTMLParser(fis);
                        // Add the tag-stripped contents as a Reader-valued Text field so it will
                        // get tokenized and indexed.
                        doc.add(new Field(columnName, parser.getReader()));

                        //Parse HTML
                    }
                    //UnStored Field
                    else {
                        doc.add(new Field(columnName, columnValue, Field.Store.NO, Field.Index.ANALYZED));
                    }

                } else if (columnIndexType.equalsIgnoreCase("Binary")) {
                    doc.add(new Field(columnName, columnValue, Field.Store.COMPRESS, Field.Index.NO));
                }
            }
        }

        //Add Document Here
        //System.out.println(doc); 
        writer.addDocument(doc);
        ActionResult = "Success";
        return ActionResult;

    }

    catch (Exception e) {
        ActionResultError = " caught a " + e.getClass() + " with message: " + e.getMessage();
        //System.out.println("Failure of DbSchema File: " + xmlFile);
    }
    ActionResult = "Failure";
    return ActionResult + ActionResultError;
}

From source file:it.tizianofagni.sparkboost.DataUtils.java

/**
 * Generate a new LibSvm output file giving each document an index corresponding to the index tha documents had on
 * original input LibSvm file.//from  ww  w. j av a  2  s. com
 *
 * @param sc         The spark context.
 * @param dataFile   The data file.
 * @param outputFile The output file.
 */
public static void generateLibSvmFileWithIDs(JavaSparkContext sc, String dataFile, String outputFile) {
    if (sc == null)
        throw new NullPointerException("The Spark Context is 'null'");
    if (dataFile == null || dataFile.isEmpty())
        throw new IllegalArgumentException("The dataFile is 'null'");

    ArrayList<MultilabelPoint> points = new ArrayList<>();
    try {
        Path pt = new Path(dataFile);
        FileSystem fs = FileSystem.get(pt.toUri(), new Configuration());
        BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(pt)));

        Path ptOut = new Path(outputFile);
        BufferedWriter bw = new BufferedWriter((new OutputStreamWriter(fs.create(ptOut))));

        try {
            int docID = 0;
            String line = br.readLine();
            while (line != null) {
                bw.write("" + docID + "\t" + line + "\n");
                line = br.readLine();
                docID++;
            }
        } finally {
            br.close();
            bw.close();
        }
    } catch (Exception e) {
        throw new RuntimeException("Reading input LibSVM data file", e);
    }

}

From source file:com.dtolabs.rundeck.ExpandRunServer.java

/**
 * Copy from file to toFile, expanding properties in the contents
 *
 * @param inputStream  input stream//from w  w  w .java 2s  .  c  o m
 * @param outputStream output stream
 * @param props        properties
 */
private static void expandTemplate(final InputStream inputStream, final OutputStream outputStream,
        final Properties props) throws IOException {

    final BufferedReader read = new BufferedReader(new InputStreamReader(inputStream));
    final BufferedWriter write = new BufferedWriter(new OutputStreamWriter(outputStream));
    String line = read.readLine();
    while (null != line) {
        write.write(expandProperties(props, line));
        write.write(LINESEP);
        line = read.readLine();
    }
    write.flush();
    write.close();
    read.close();
}

From source file:com.tencent.wetest.common.util.ReportUtil.java

public static void delRecord(String name) {

    path = WTApplication.getContext().getFilesDir().getPath();

    File f = new File(path + "/wtIndex");
    String fileDir = Environment.getExternalStorageDirectory().getAbsolutePath() + "/wetest";
    File tmp = new File(fileDir + "/" + name);
    if (tmp.exists()) {
        tmp.delete();//w w w  .j a  va 2 s.c o m
    }
    List<String> content = new ArrayList<String>();

    if (f.exists() && f.isFile()) {
        try {
            BufferedReader indexreader = new BufferedReader(new FileReader(f));
            String br = "";
            while ((br = indexreader.readLine()) != null) {
                if (!br.split("/")[0].equals(name))
                    content.add(br);
            }

            indexreader.close();
            if (content.size() != 0) {
                BufferedWriter indexwriter = new BufferedWriter(new FileWriter(f, false));
                int i = 0;
                for (String temp : content) {

                    if (i == content.size() - 1)
                        indexwriter.write(temp);
                    else
                        indexwriter.write(temp + "\t\n");
                    i++;
                }

                indexwriter.flush();
                indexwriter.close();

            } else {

                f.delete();

            }

        } catch (Exception e) {
            Logger.error("delException:" + e.toString());
            e.printStackTrace();
        }

    }
}

From source file:com.thoughtmetric.tl.TLLib.java

public static Object[] parseEditText(HtmlCleaner cleaner, URL url, TLHandler handler, Context context)
        throws IOException {
    // Although probably not THE worst hack I've written, this function ranks near the top.
    // TODO: rework this routine get rid of code duplication.

    DefaultHttpClient httpclient = new DefaultHttpClient();
    httpclient.setCookieStore(cookieStore);

    HttpGet httpGet = new HttpGet(url.toExternalForm());
    HttpResponse response = httpclient.execute(httpGet);

    handler.sendEmptyMessage(PROGRESS_DOWNLOADING);
    InputStream is = response.getEntity().getContent();

    InputStreamReader isr = new InputStreamReader(is);
    BufferedReader br = new BufferedReader(isr);

    FileOutputStream fos = context.openFileOutput(TEMP_FILE_NAME, Context.MODE_PRIVATE);
    BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fos));

    String line;//w ww  .j a  v a  2s.c  o m
    String formStart = "<form action=\"/forum/edit.php";
    while ((line = br.readLine()) != null) {
        if (line.startsWith(formStart)) {
            Log.d(TAG, line);
            bw.write(line);
            break;
        }
    }

    String start = "\t\t<textarea";
    String end = "\t\t<p>";
    StringBuffer sb = new StringBuffer();
    while ((line = br.readLine()) != null) {
        if (line.startsWith(start)) {
            bw.write("</form>");
            int i = line.lastIndexOf('>');
            sb.append(Html.fromHtml(line.substring(i + 1)).toString());
            sb.append("\n");
            break;
        } else {
            bw.write(line);
        }
    }

    while ((line = br.readLine()) != null) {
        if (line.startsWith(end)) {
            break;
        }
        sb.append(Html.fromHtml(line).toString());
        sb.append("\n");
    }

    bw.flush();
    bw.close();

    if (handler != null)
        handler.sendEmptyMessage(PROGRESS_PARSING);

    Object[] ret = new Object[2];

    ret[0] = sb.toString();
    ret[1] = cleaner.clean(context.openFileInput(TEMP_FILE_NAME));
    return ret;
}

From source file:dk.clarin.tools.workflow.java

public static void got200(String result, bracmat BracMat, String filename, String jobID, InputStream input) {
    logger.debug("got200");
    /**/*  w w w.j  ava2s  .  c  o m*/
     * toolsdata$
     *
     * Return the full file system path to Tool's staging area.
     * The input can be a file name: this name is appended to the returned value.
     */
    String destdir = BracMat.Eval("toolsdata$");
    /**
     * toolsdataURL$
     *
     * Return the full URL to Tool's staging area.
     * The input can be a file name: this name is appended to the returned value.
     */
    //String toolsdataURL = BracMat.Eval("toolsdataURL$");
    try {
        byte[] buffer = new byte[4096];
        int n = -1;
        int N = 0;
        //int Nbuf = 0;
        OutputStream outputF = new FileOutputStream(destdir + FilenameNoMetadata(filename));
        StringWriter outputM = new StringWriter();

        boolean isTextual = false;
        String textable = BracMat.Eval("getJobArg$(" + result + "." + jobID + ".isText)");
        if (textable.equals("y"))
            isTextual = true;
        logger.debug("textable:" + (isTextual ? "ja" : "nej"));

        while ((n = input.read(buffer)) != -1) {
            if (n > 0) {
                N = N + n;
                //++Nbuf;
                outputF.write(buffer, 0, n);
                if (isTextual) {
                    String toWrite = new String(buffer, 0, n);
                    try {
                        outputM.write(toWrite);
                    } catch (Exception e) {
                        logger.error("Could not write to StringWriter. Reason:" + e.getMessage());
                    }
                }
            }
        }
        outputF.close();
        String requestResult = outputM.toString();

        Calendar cal = Calendar.getInstance();
        SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMdd");
        String date = sdf.format(cal.getTime());
        logger.debug("Calling doneJob(" + result + "," + jobID + "," + date + ")");
        /**
         * doneJob$
         *
         * Marks a job as 'done' in jobs.table in jboss/server/default/data/tools
         * Constructs a CTBID from date, JobNr and jobID
         * Makes sure there is a row in table CTBs connecting
         *      JobNr, jobID, email and CTBID
         * Creates isDependentOf and isAnnotationOf relations
         * Affected tables:
         *      jobs.table
         *      CTBs.table
         *      relations.table
         * Arguments: jobNR, JobID, spangroup with annotation and date. 
         *
         * Notice that this function currently only can generate output of type 
         * TEIDKCLARIN_ANNO
         */
        String newResource = BracMat.Eval(
                "doneJob$(" + result + "." + jobID + "." + quote(requestResult) + "." + quote(date) + ")");
        // Create file plus metadata
        logger.debug("Going to write {}", destdir + Filename(filename));
        FileWriter fstream = new FileWriter(destdir + Filename(filename));
        BufferedWriter Out = new BufferedWriter(fstream);
        Out.write(newResource);
        Out.close();
        /**
         * relationFile$
         *
         * Create a relation file ready for deposition together with an annotation.
         *
         * Input: JobNr and jobID
         * Output: String that can be saved as a semicolon separated file.
         * Consulted tables:
         *      relations.table     (for relation type, ctb and ctbid
         *      CTBs.table          (for ContentProvider and CTBID)
         */
        String relations = BracMat.Eval("relationFile$(" + result + "." + jobID + ")");
        // Create relation file
        fstream = new FileWriter(destdir + FilenameRelations(filename));
        Out = new BufferedWriter(fstream);
        Out.write(relations);
        Out.close();
    } catch (Exception e) {//Catch exception if any
        logger.error("Could not write result to file. Aborting job " + jobID + ". Reason:" + e.getMessage());
        /**
         * abortJob$
         *
         * Abort, given a JobNr and a jobID, the specified job and all
         * pending jobs that depend on the output from the (now aborted) job.
         * Rather than removing the aborted jobs from the jobs.table list, they are
         * marked 'aborted'.
         * Result (as XML): a list of (JobNr, jobID, toolName, items)
         */
        /*filelist =*/ BracMat.Eval("abortJob$(" + result + "." + jobID + ")");
    }
}