Example usage for java.io BufferedWriter append

List of usage examples for java.io BufferedWriter append

Introduction

In this page you can find the example usage for java.io BufferedWriter append.

Prototype

public Writer append(CharSequence csq) throws IOException 

Source Link

Document

Appends the specified character sequence to this writer.

Usage

From source file:org.kuali.kfs.gl.batch.BatchSortUtil.java

private static void mergeFiles(File tempSortDir, int numFiles, String outputFileName,
        Comparator<String> comparator) {
    try {/*from www  . j  a  v a2  s  .co  m*/
        ArrayList<FileReader> mergefr = new ArrayList<FileReader>(numFiles);
        ArrayList<BufferedReader> mergefbr = new ArrayList<BufferedReader>(numFiles);
        // temp buffer for writing - contains the minimum record from each file
        ArrayList<String> fileRows = new ArrayList<String>(numFiles);

        BufferedWriter bw = new BufferedWriter(new FileWriter(outputFileName));
        //LOG.info("Successfully opened output file " + outputFileName);

        boolean someFileStillHasRows = false;

        // Iterate over all the files, getting the first line in each file
        for (int i = 0; i < numFiles; i++) {
            // open a file reader for each file
            mergefr.add(new FileReader(new File(tempSortDir, "chunk_" + i)));
            mergefbr.add(new BufferedReader(mergefr.get(i)));

            // get the first row
            String line = mergefbr.get(i).readLine();
            if (line != null) {
                fileRows.add(line);
                someFileStillHasRows = true;
            } else {
                fileRows.add(null);
            }
        }

        while (someFileStillHasRows) {
            String min = null;
            int minIndex = 0; // index of the file with the minimum record

            // init for later compare - assume the first file has the minimum
            String line = fileRows.get(0);
            if (line != null) {
                min = line;
                minIndex = 0;
            } else {
                min = null;
                minIndex = -1;
            }

            // determine the minimum record of the top lines of each file
            // check which one is min
            for (int i = 1; i < fileRows.size(); i++) {
                line = fileRows.get(i);
                if (line != null) {
                    if (min != null) {
                        if (comparator.compare(line, min) < 0) {
                            minIndex = i;
                            min = line;
                        }
                    } else {
                        min = line;
                        minIndex = i;
                    }
                }
            }

            if (minIndex < 0) {
                someFileStillHasRows = false;
            } else {
                // write to the sorted file
                bw.append(fileRows.get(minIndex)).append('\n');

                // get another row from the file that had the min
                line = mergefbr.get(minIndex).readLine();
                if (line != null) {
                    fileRows.set(minIndex, line);
                } else { // file is out of rows, set to null so it is ignored
                    fileRows.set(minIndex, null);
                }
            }
            // check if one still has rows
            for (int i = 0; i < fileRows.size(); i++) {
                someFileStillHasRows = false;
                if (fileRows.get(i) != null) {
                    if (minIndex < 0) {
                        throw new RuntimeException(
                                "minIndex < 0 and row found in chunk file " + i + " : " + fileRows.get(i));
                    }
                    someFileStillHasRows = true;
                    break;
                }
            }

            // check the actual files one more time
            if (!someFileStillHasRows) {
                //write the last one not covered above
                for (int i = 0; i < fileRows.size(); i++) {
                    if (fileRows.get(i) == null) {
                        line = mergefbr.get(i).readLine();
                        if (line != null) {
                            someFileStillHasRows = true;
                            fileRows.set(i, line);
                        }
                    }
                }
            }
        }

        // close all the files
        bw.close();
        //LOG.info("Successfully closed output file " + outputFileName);

        for (BufferedReader br : mergefbr) {
            br.close();
        }
        for (FileReader fr : mergefr) {
            fr.close();
        }
    } catch (Exception ex) {
        LOG.error("Exception merging the sorted files", ex);
        throw new RuntimeException("Exception merging the sorted files", ex);
    }
}

From source file:org.ihtsdo.classifier.ClassificationRunner.java

/**
 * Compare and write back.//  ww  w .j a v a 2 s. c  o  m
 *
 * @param snorelA the snorel a
 * @param snorelB the snorel b
 * @return the string
 * @throws java.io.IOException Signals that an I/O exception has occurred.
 */
private String compareAndWriteBack(List<Relationship> snorelA, List<Relationship> snorelB) throws IOException {

    retiredSet = new HashSet<String>();
    // STATISTICS COUNTERS
    int countConSeen = 0;
    int countSame = 0;
    int countSameISA = 0;
    int countA_Diff = 0;
    int countA_DiffISA = 0;
    int countA_Total = 0;
    int countB_Diff = 0;
    int countB_DiffISA = 0;
    int countB_Total = 0;
    FileOutputStream fos = new FileOutputStream(tempRelationshipStore);
    OutputStreamWriter osw = new OutputStreamWriter(fos, "UTF-8");
    BufferedWriter bw = new BufferedWriter(osw);

    bw.append("id");
    bw.append("\t");
    bw.append("effectiveTime");
    bw.append("\t");
    bw.append("active");
    bw.append("\t");
    bw.append("moduleId");
    bw.append("\t");
    bw.append("sourceId");
    bw.append("\t");
    bw.append("destinationId");
    bw.append("\t");
    bw.append("relationshipGroup");
    bw.append("\t");
    bw.append("typeId");
    bw.append("\t");
    bw.append("characteristicTypeId");
    bw.append("\t");
    bw.append("modifierId");
    bw.append("\r\n");

    long startTime = System.currentTimeMillis();
    Collections.sort(snorelA);
    Collections.sort(snorelB);

    // Typically, A is the Classifier Path (for previously inferred)
    // Typically, B is the SnoRocket Results Set (for newly inferred)
    Iterator<Relationship> itA = snorelA.iterator();
    Iterator<Relationship> itB = snorelB.iterator();
    Relationship rel_A = null;
    boolean done_A = false;
    if (itA.hasNext()) {
        rel_A = itA.next();
    } else {
        done_A = true;
    }
    Relationship rel_B = null;
    boolean done_B = false;
    if (itB.hasNext()) {
        rel_B = itB.next();
    } else {
        done_B = true;
    }

    logger.info("\r\n::: [SnorocketMojo]" + "\r\n::: snorelA.size() = \t" + snorelA.size()
            + "\r\n::: snorelB.size() = \t" + snorelB.size());

    // BY SORT ORDER, LOWER NUMBER ADVANCES FIRST
    while (!done_A && !done_B) {
        if (++countConSeen % 25000 == 0) {
            logger.info("::: [SnorocketMojo] compareAndWriteBack @ #\t" + countConSeen);
        }

        if (rel_A.sourceId == rel_B.sourceId) {
            // COMPLETELY PROCESS ALL C1 FOR BOTH IN & OUT
            // PROCESS C1 WITH GROUP == 0
            int thisC1 = rel_A.sourceId;

            // PROCESS WHILE BOTH HAVE GROUP 0
            while (rel_A.sourceId == thisC1 && rel_B.sourceId == thisC1 && rel_A.group == 0 && rel_B.group == 0
                    && !done_A && !done_B) {

                // PROGESS GROUP ZERO
                switch (compareSnoRel(rel_A, rel_B)) {
                case 1: // SAME
                    // GATHER STATISTICS
                    countA_Total++;
                    countB_Total++;
                    countSame++;
                    // NOTHING TO WRITE IN THIS CASE
                    if (rel_A.typeId == isa) {
                        countSameISA++;
                    }
                    if (itA.hasNext()) {
                        rel_A = itA.next();
                    } else {
                        done_A = true;
                    }
                    if (itB.hasNext()) {
                        rel_B = itB.next();
                    } else {
                        done_B = true;
                    }
                    break;

                case 2: // REL_A > REL_B -- B has extra stuff
                    // WRITEBACK REL_B (Classifier Results) AS CURRENT
                    countB_Diff++;
                    countB_Total++;
                    if (rel_B.typeId == isa) {
                        countB_DiffISA++;
                    }
                    writeRel(bw, rel_B);

                    if (itB.hasNext()) {
                        rel_B = itB.next();
                    } else {
                        done_B = true;
                    }
                    break;

                case 3: // REL_A < REL_B -- A has extra stuff
                    // WRITEBACK REL_A (Classifier Input) AS RETIRED
                    // GATHER STATISTICS
                    countA_Diff++;
                    countA_Total++;
                    if (rel_A.typeId == isa) {
                        countA_DiffISA++;
                    }
                    writeBackRetired(bw, rel_A);

                    if (itA.hasNext()) {
                        rel_A = itA.next();
                    } else {
                        done_A = true;
                    }
                    break;
                } // switch
            }

            // REMAINDER LIST_A GROUP 0 FOR C1
            while (rel_A.sourceId == thisC1 && rel_A.group == 0 && !done_A) {

                countA_Diff++;
                countA_Total++;
                if (rel_A.typeId == isa) {
                    countA_DiffISA++;
                }
                writeBackRetired(bw, rel_A);
                if (itA.hasNext()) {
                    rel_A = itA.next();
                } else {
                    done_A = true;
                    break;
                }
            }

            // REMAINDER LIST_B GROUP 0 FOR C1
            while (rel_B.sourceId == thisC1 && rel_B.group == 0 && !done_B) {
                countB_Diff++;
                countB_Total++;
                if (rel_B.typeId == isa) {
                    countB_DiffISA++;
                }
                writeRel(bw, rel_B);
                if (itB.hasNext()) {
                    rel_B = itB.next();
                } else {
                    done_B = true;
                    break;
                }
            }

            // ** SEGMENT GROUPS **
            RelationshipGroupList groupList_A = new RelationshipGroupList();
            RelationshipGroupList groupList_B = new RelationshipGroupList();
            RelationshipGroup groupA = null;
            RelationshipGroup groupB = null;

            // SEGMENT GROUPS IN LIST_A
            int prevGroup = Integer.MIN_VALUE;
            while (rel_A.sourceId == thisC1 && !done_A) {
                if (rel_A.group != prevGroup) {
                    groupA = new RelationshipGroup();
                    groupList_A.add(groupA);
                }

                groupA.add(rel_A);

                prevGroup = rel_A.group;
                if (itA.hasNext()) {
                    rel_A = itA.next();
                } else {
                    done_A = true;
                }
            }
            // SEGMENT GROUPS IN LIST_B
            prevGroup = Integer.MIN_VALUE;
            while (rel_B.sourceId == thisC1 && !done_B) {
                if (rel_B.group != prevGroup) {
                    groupB = new RelationshipGroup();
                    groupList_B.add(groupB);
                }

                groupB.add(rel_B);

                prevGroup = rel_B.group;
                if (itB.hasNext()) {
                    rel_B = itB.next();
                } else {
                    done_B = true;
                }
            }

            // FIND GROUPS IN GROUPLIST_A WITHOUT AN EQUAL IN GROUPLIST_B
            // WRITE THESE GROUPED RELS AS "RETIRED"
            RelationshipGroupList groupList_NotEqual;
            if (groupList_A.size() > 0) {
                groupList_NotEqual = groupList_A.whichNotEqual(groupList_B);
                for (RelationshipGroup sg : groupList_NotEqual) {
                    for (Relationship sr_A : sg) {
                        writeBackRetired(bw, sr_A);
                    }
                }
                countA_Total += groupList_A.countRels();
                countA_Diff += groupList_NotEqual.countRels();
            }

            // FIND GROUPS IN GROUPLIST_B WITHOUT AN EQUAL IN GROUPLIST_A
            // WRITE THESE GROUPED RELS AS "NEW, CURRENT"
            int rgNum = 0; // USED TO DETERMINE "AVAILABLE" ROLE GROUP NUMBERS
            if (groupList_B.size() > 0) {
                groupList_NotEqual = groupList_B.whichNotEqual(groupList_A);
                for (RelationshipGroup sg : groupList_NotEqual) {
                    if (sg.get(0).group != 0) {
                        rgNum = nextRoleGroupNumber(groupList_A, rgNum);
                        for (Relationship sr_B : sg) {
                            sr_B.group = rgNum;
                            writeRel(bw, sr_B);
                        }
                    } else {
                        for (Relationship sr_B : sg) {
                            writeRel(bw, sr_B);
                        }
                    }
                }
                countB_Total += groupList_A.countRels();
                countB_Diff += groupList_NotEqual.countRels();
            }
        } else if (rel_A.sourceId > rel_B.sourceId) {
            // CASE 2: LIST_B HAS CONCEPT NOT IN LIST_A
            // COMPLETELY *ADD* ALL THIS C1 FOR REL_B AS NEW, CURRENT
            int thisC1 = rel_B.sourceId;
            while (rel_B.sourceId == thisC1) {
                countB_Diff++;
                countB_Total++;
                if (rel_B.typeId == isa) {
                    countB_DiffISA++;
                }
                writeRel(bw, rel_B);
                if (itB.hasNext()) {
                    rel_B = itB.next();
                } else {
                    done_B = true;
                    break;
                }
            }

        } else {
            // CASE 3: LIST_A HAS CONCEPT NOT IN LIST_B
            // COMPLETELY *RETIRE* ALL THIS C1 FOR REL_A
            int thisC1 = rel_A.sourceId;
            while (rel_A.sourceId == thisC1) {
                countA_Diff++;
                countA_Total++;
                if (rel_A.typeId == isa) {
                    countA_DiffISA++;
                }
                writeBackRetired(bw, rel_A);
                if (itA.hasNext()) {
                    rel_A = itA.next();
                } else {
                    done_A = true;
                    break;
                }
            }
        }
    }

    // AT THIS POINT, THE PREVIOUS C1 HAS BE PROCESSED COMPLETELY
    // AND, EITHER REL_A OR REL_B HAS BEEN COMPLETELY PROCESSED
    // AND, ANY REMAINDER IS ONLY ON REL_LIST_A OR ONLY ON REL_LIST_B
    // AND, THAT REMAINDER HAS A "STANDALONE" C1 VALUE
    // THEREFORE THAT REMAINDER WRITEBACK COMPLETELY
    // AS "NEW CURRENT" OR "OLD RETIRED"
    //
    // LASTLY, IF .NOT.DONE_A THEN THE NEXT REL_A IN ALREADY IN PLACE
    while (!done_A) {
        countA_Diff++;
        countA_Total++;
        if (rel_A.typeId == isa) {
            countA_DiffISA++;
        }
        // COMPLETELY UPDATE ALL REMAINING REL_A AS RETIRED
        writeBackRetired(bw, rel_A);
        if (itA.hasNext()) {
            rel_A = itA.next();
        } else {
            done_A = true;
            break;
        }
    }

    while (!done_B) {
        countB_Diff++;
        countB_Total++;
        if (rel_B.typeId == isa) {
            countB_DiffISA++;
        }
        // COMPLETELY UPDATE ALL REMAINING REL_B AS NEW, CURRENT
        writeRel(bw, rel_B);
        if (itB.hasNext()) {
            rel_B = itB.next();
        } else {
            done_B = true;
            break;
        }
    }

    bw.close();
    bw = null;
    osw = null;
    fos = null;
    // CHECKPOINT DATABASE

    StringBuilder s = new StringBuilder();
    s.append("\r\n::: [Snorocket] compareAndWriteBack()");
    long lapseTime = System.currentTimeMillis() - startTime;
    s.append("\r\n::: [Time] Sort/Compare Input & Output: \t").append(lapseTime);
    s.append("\t(mS)\t").append(((float) lapseTime / 1000) / 60).append("\t(min)");
    s.append("\r\n");
    s.append("\r\n::: ");
    s.append("\r\n::: countSame:     \t").append(countSame);
    s.append("\r\n::: countSameISA:  \t").append(countSameISA);
    s.append("\r\n::: A == Classifier Output Path");
    s.append("\r\n::: countA_Diff:   \t").append(countA_Diff);
    s.append("\r\n::: countA_DiffISA:\t").append(countA_DiffISA);
    s.append("\r\n::: countA_Total:  \t").append(countA_Total);
    s.append("\r\n::: B == Classifier Solution Set");
    s.append("\r\n::: countB_Diff:   \t").append(countB_Diff);
    s.append("\r\n::: countB_DiffISA:\t").append(countB_DiffISA);
    s.append("\r\n::: countB_Total:  \t").append(countB_Total);
    s.append("\r\n::: ");

    return s.toString();
}

From source file:msi.gama.outputs.layers.ChartLayerStatement.java

public void saveHistory() {
    IScope scope = output.getScope().copy();
    if (scope == null) {
        return;/*from  ww  w .  j ava  2 s .  c  o m*/
    }
    try {
        Files.newFolder(scope, chartFolder);
        String file = chartFolder + "/" + "chart_" + getName() + ".csv";
        BufferedWriter bw;
        file = FileUtils.constructAbsoluteFilePath(scope, file, false);
        bw = new BufferedWriter(new FileWriter(file));
        bw.append(history);
        bw.close();
    } catch (final Exception e) {
        e.printStackTrace();
        return;
    } finally {
        GAMA.releaseScope(scope);
    }
}

From source file:org.openhie.openempi.matching.fellegisunter.ProbabilisticMatchingServiceBase.java

public void calculateMarginalProbabilities(List<LeanRecordPair> pairs,
        FellegiSunterParameters fellegiSunterParams, boolean writeStat, String pathPrefix) {
    double mpsum = 0.0;
    double upsum = 0.0;

    try {//from w  w w.j a  v a 2  s .c o m
        FileWriter fw = null;
        BufferedWriter bw = null;
        if (writeStat) {
            fw = new FileWriter(pathPrefix + "_" + Constants.MARGINAL_PROBABILITIES_FILE_NAME);
            bw = new BufferedWriter(fw);
        }
        try {
            for (LeanRecordPair pair : pairs) {
                //               log.trace("Pair: " + getRecordPairMatchFields(pair));
                String marginalProbs = "";
                ComparisonVector vector = pair.getComparisonVector();
                vector.calculateProbabilityGivenMatch(fellegiSunterParams.getMValues(), useBinaryScores());
                vector.calculateProbabilityGivenNonmatch(fellegiSunterParams.getUValues(), useBinaryScores());
                mpsum += vector.getVectorProbGivenM();
                upsum += vector.getVectorProbGivenU();
                if (writeStat) {
                    marginalProbs += (pair.getWeight() + "," + vector.getVectorProbGivenM() + ","
                            + vector.getVectorProbGivenU());
                    bw.append(marginalProbs);
                    bw.newLine(); // System.getProperty("line.separator")
                }
            }
        } finally {
            if (writeStat)
                bw.close();
        }
    } catch (IOException ex) {
        ex.printStackTrace();
    }
    log.trace("mpsum: " + mpsum + " upsum: " + upsum);
}

From source file:edu.isi.pfindr.learn.util.PairsFileIO.java

public void readDistinctElementsFromPairsAddClass(String pairsFilepath) {
    //readDistinctElementsIntoList
    List<Object> distinctElements = readDistinctElementsIntoList(pairsFilepath);
    System.out.println("Size of distinctElements" + distinctElements.size());
    for (int i = 0; i < distinctElements.size(); i++) {
        System.out.println("distinctElements " + i + " " + distinctElements.get(i));
    }//w w w  .j a  v  a  2 s  .c  o m

    //get class for those distinct elements from original cohort file
    String originalFile = "data/cohort1/bio_nlp/cohort1_s.txt";
    BufferedReader br = null;
    String thisLine;
    String[] lineArray;
    LinkedMap originalMap = new LinkedMap();
    BufferedWriter distinctPriorityPairsWriter = null;

    try {
        br = new BufferedReader(new FileReader(originalFile));
        while ((thisLine = br.readLine()) != null) {
            thisLine = thisLine.trim();
            if (thisLine.equals(""))
                continue;

            lineArray = thisLine.split("\t");
            originalMap.put(lineArray[3], lineArray[1]);
        }

        //write distinct elements with class to an output file
        StringBuffer outfileBuffer = new StringBuffer();
        for (int i = 0; i < distinctElements.size(); i++)
            outfileBuffer.append(distinctElements.get(i)).append("\t")
                    .append(originalMap.get(distinctElements.get(i)) + "\n");

        distinctPriorityPairsWriter = new BufferedWriter(
                new FileWriter(pairsFilepath.split("\\.")[0] + "_distinct_with_class.txt"));

        distinctPriorityPairsWriter.append(outfileBuffer.toString());
        outfileBuffer.setLength(0);
        distinctPriorityPairsWriter.flush();

    } catch (IOException io) {
        try {
            if (br != null)
                br.close();
            io.printStackTrace();
        } catch (IOException e) {
            System.out.println("Problem occured while closing output stream " + br);
            e.printStackTrace();
        }
    } catch (Exception e) {
        e.printStackTrace();
    }

}

From source file:com.digitalpebble.behemoth.mahout.util.Mahout2LibSVM.java

public int run(String[] args) throws Exception {

    Options options = new Options();
    // automatically generate the help statement
    HelpFormatter formatter = new HelpFormatter();
    // create the parser
    CommandLineParser parser = new GnuParser();

    options.addOption("h", "help", false, "print this message");
    options.addOption("v", "vector", true, "input vector sequencefile");
    options.addOption("l", "label", true, "input vector sequencefile");
    options.addOption("o", "output", true, "output Behemoth corpus");

    // parse the command line arguments
    CommandLine line = null;//  www.  java2  s  .co m
    try {
        line = parser.parse(options, args);
        if (line.hasOption("help")) {
            formatter.printHelp("CorpusGenerator", options);
            return 0;
        }
        if (!line.hasOption("v") | !line.hasOption("o") | !line.hasOption("l")) {
            formatter.printHelp("CorpusGenerator", options);
            return -1;
        }
    } catch (ParseException e) {
        formatter.printHelp("CorpusGenerator", options);
    }

    Path vectorPath = new Path(line.getOptionValue("v"));
    Path labelPath = new Path(line.getOptionValue("l"));
    String output = line.getOptionValue("o");

    Path tempOutput = new Path(vectorPath.getParent(), "temp-" + System.currentTimeMillis());

    // extracts the string representations from the vectors
    int retVal = vectorToString(vectorPath, tempOutput);
    if (retVal != 0) {
        HadoopUtil.delete(getConf(), tempOutput);
        return retVal;
    }

    Path tempOutput2 = new Path(vectorPath.getParent(), "temp-" + System.currentTimeMillis());

    retVal = convert(tempOutput, labelPath, tempOutput2);

    // delete the temp output
    HadoopUtil.delete(getConf(), tempOutput);

    if (retVal != 0) {
        HadoopUtil.delete(getConf(), tempOutput2);
        return retVal;
    }

    // convert tempOutput to standard file
    BufferedWriter bow = new BufferedWriter(new FileWriter(new File(output)));

    // the label dictionary is not dumped to text
    int labelMaxIndex = 0;
    Map<String, Integer> labelIndex = new HashMap<String, Integer>();

    Configuration conf = getConf();
    FileSystem fs = FileSystem.get(conf);
    FileStatus[] fss = fs.listStatus(tempOutput2);
    try {
        for (FileStatus status : fss) {
            Path path = status.getPath();
            // skips the _log or _SUCCESS files
            if (!path.getName().startsWith("part-") && !path.getName().equals(tempOutput2.getName()))
                continue;
            SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf);
            // read the key + values in that file
            Text key = new Text();
            Text value = new Text();
            while (reader.next(key, value)) {
                String label = key.toString();
                // replace the label by its index
                Integer indexLabel = labelIndex.get(label);
                if (indexLabel == null) {
                    indexLabel = new Integer(labelMaxIndex);
                    labelIndex.put(label, indexLabel);
                    labelMaxIndex++;
                }
                String val = value.toString();
                bow.append(indexLabel.toString()).append(val).append("\n");
            }
            reader.close();
        }
        bow.flush();
    } catch (Exception e) {
        e.printStackTrace();
        return -1;
    } finally {
        bow.close();
        fs.delete(tempOutput2, true);
    }
    return 0;
}

From source file:voldemort.tools.KeyVersionFetcherCLI.java

public boolean sampleStore(StoreDefinition storeDefinition) {
    String storeName = storeDefinition.getName();

    String keysFileName = inDir + System.getProperty("file.separator") + storeName + ".keys";
    File keysFile = new File(keysFileName);
    if (!keysFile.exists()) {
        logger.error("Keys file " + keysFileName + " does not exist!");
        return false;
    }/*www .j a v  a  2s  . c o  m*/

    String kvFileName = outDir + System.getProperty("file.separator") + storeName + ".kvs";
    File kvFile = new File(kvFileName);
    if (kvFile.exists()) {
        logger.info("Key-Version file " + kvFileName + " exists, so will not sample keys from file "
                + keysFileName + ".");
        return true;
    }

    BaseStoreRoutingPlan storeRoutingPlan = new BaseStoreRoutingPlan(cluster, storeDefinition);
    BufferedReader keyReader = null;
    BufferedWriter kvWriter = null;
    try {
        keyReader = new BufferedReader(new FileReader(keysFileName));
        kvWriter = new BufferedWriter(new FileWriter(kvFileName));

        boolean readAllKeys = false;
        while (!readAllKeys) {
            Queue<Future<String>> futureKVs = new LinkedList<Future<String>>();
            for (int numFetchTasks = 0; numFetchTasks < this.outputBatchSize; numFetchTasks++) {
                String keyLine = keyReader.readLine();
                if (keyLine == null) {
                    readAllKeys = true;
                    break;
                }
                byte[] keyInBytes = ByteUtils.fromHexString(keyLine.trim());
                FetchKeyVersionsTask kvFetcher = new FetchKeyVersionsTask(storeRoutingPlan, keyInBytes);
                Future<String> future = kvFetcherService.submit(kvFetcher);
                futureKVs.add(future);
            }

            if (futureKVs.size() > 0) {
                while (!futureKVs.isEmpty()) {
                    Future<String> future = futureKVs.poll();
                    String keyVersions = future.get();
                    kvWriter.append(keyVersions);
                }
            }
        }
        return true;
    } catch (DecoderException de) {
        logger.error("Could not decode key to sample for store " + storeName, de);
        return false;
    } catch (IOException ioe) {
        logger.error("IOException caught while sampling store " + storeName, ioe);
        return false;
    } catch (InterruptedException ie) {
        logger.error("InterruptedException caught while sampling store " + storeName, ie);
        return false;
    } catch (ExecutionException ee) {
        logger.error("Encountered an execution exception while sampling " + storeName, ee);
        ee.printStackTrace();
        return false;
    } finally {
        if (keyReader != null) {
            try {
                keyReader.close();
            } catch (IOException e) {
                logger.error("IOException caught while trying to close keyReader for store " + storeName, e);
                e.printStackTrace();
            }
        }
        if (kvWriter != null) {
            try {
                kvWriter.close();
            } catch (IOException e) {
                logger.error("IOException caught while trying to close kvWriter for store " + storeName, e);
                e.printStackTrace();
            }
        }
    }
}

From source file:analytics.storage.store2csv.java

@Override
public void storeElementData(HashMap<String, Double> data, String metricName, String dataProvider,
        String analysisType, String headerColumn, Boolean fed) {
    // TODO Auto-generated method stub

    String sFileName = dataProvider + analysisType + ".csv";

    Properties props = new Properties();
    try {//w  w  w  . j ava  2 s .c o  m
        props.load(new FileInputStream("configure.properties"));
    } catch (FileNotFoundException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
        System.exit(-1);
    } catch (IOException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
        System.exit(-1);
    }

    File anls = new File(props.getProperty(AnalyticsConstants.resultsPath) + "Analysis_Results");

    if (!anls.exists())
        anls.mkdir();

    File dir = new File(anls, dataProvider);
    if (!dir.exists())
        dir.mkdir();

    File file = new File(dir, sFileName);

    this.setElementDataFilePath(file.getAbsolutePath());
    FileWriter writer = null;
    BufferedWriter bw = null;

    BufferedReader reader = null;
    try {

        if (file.exists() && isAppendData() == false) {

            if (fed == false)
                file.delete();
            setAppend(true);
        } else if (!file.exists() && isAppendData() == false)
            setAppend(true);

        if (!file.exists() && isAppendData() == true) {
            writer = new FileWriter(file);
            bw = new BufferedWriter(writer);
            createHeaders(bw, metricName, headerColumn);

            Set<String> keySet = data.keySet();
            Iterator<String> iterator = keySet.iterator();

            StringBuffer key = new StringBuffer();
            while (iterator.hasNext()) {
                // String key = iterator.next();
                key.append(iterator.next());
                // System.out.println(key);
                bw.append(key.toString());
                bw.append(',');
                Double value = data.get(key.toString());
                bw.append(String.valueOf(value));
                bw.newLine();
                key.delete(0, key.length());
            }

            bw.close();
            writer.close();
        } else if (file.exists() && isAppendData() == true) {

            reader = new BufferedReader(new FileReader(file));

            File temp = new File(dir, "temp.csv");

            writer = new FileWriter(temp);
            bw = new BufferedWriter(writer);

            String line;
            int counter = 0;

            // Set<String> keySet = data.keySet();
            // Iterator<String> iterator = keySet.iterator();

            StringBuffer key = new StringBuffer();
            while ((line = reader.readLine()) != null) {

                String[] split = line.split(",");

                // String key = split[0];
                key.append(split[0]);

                if (counter == 0) {
                    line = line + "," + metricName;
                    bw.append(line);
                    bw.newLine();

                } else {

                    Double value = data.get(key.toString());
                    // System.out.println("Appending key:" + key +
                    // " value:"
                    // + value);
                    line = line + "," + value;
                    // /System.out.println("Appending line:" + line);
                    bw.append(line);
                    bw.newLine();
                }

                counter += 1;
                key.delete(0, key.length());

            }
            bw.close();
            writer.close();

            FileUtils.copyFile(temp, file);
            temp.delete();
            reader.close();

        }

    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } finally {
        try {
            if (bw != null)
                bw.close();
            if (reader != null)
                reader.close();
            if (writer != null)
                writer.close();
        } catch (IOException ex) {
            ex.printStackTrace();
        }
    }
}

From source file:com.ibm.bi.dml.test.utils.TestUtils.java

public static void writeCSVTestMatrix(String file, double[][] matrix) {
    try {/*from w  ww.j  ava  2  s  . c  om*/
        //create outputstream to HDFS / FS and writer
        DataOutputStream out = null;
        FileSystem fs = FileSystem.get(conf);
        out = fs.create(new Path(file), true);

        BufferedWriter pw = new BufferedWriter(new OutputStreamWriter(out));

        //writer actual matrix
        StringBuilder sb = new StringBuilder();
        for (int i = 0; i < matrix.length; i++) {
            sb.setLength(0);
            if (matrix[i][0] != 0)
                sb.append(matrix[i][0]);
            for (int j = 1; j < matrix[i].length; j++) {
                sb.append(",");
                if (matrix[i][j] == 0)
                    continue;
                sb.append(matrix[i][j]);
            }
            sb.append('\n');
            pw.append(sb.toString());
        }

        //close writer and streams
        pw.close();
        out.close();
    } catch (IOException e) {
        fail("unable to write (csv) test matrix (" + file + "): " + e.getMessage());
    }
}