Example usage for org.apache.commons.csv CSVFormat EXCEL

List of usage examples for org.apache.commons.csv CSVFormat EXCEL

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVFormat EXCEL.

Prototype

CSVFormat EXCEL

To view the source code for org.apache.commons.csv CSVFormat EXCEL.

Click Source Link

Document

Excel file format (using a comma as the value delimiter).

Usage

From source file:edu.uri.egr.hermes.manipulators.FileLog.java

public void flush() {
    try {//from   w w w.  j  av a2  s  .c  om
        CSVPrinter printer = CSVFormat.EXCEL.print(new BufferedWriter(new FileWriter(file, true)));

        printer.printRecord(valuePool);
        printer.close();

        if (enableAutoVisible)
            Hermes.File.makeVisible(file);
    } catch (IOException e) {
        Timber.e("Failed to write log file: %s", e.getMessage());
    }
}

From source file:br.edimarmanica.trinity.intrasitemapping.manual.OffsetToRule.java

public void execute() {
    readMappings();/*from www  . j a va2s  .c om*/

    File dir = new File(Paths.PATH_TRINITY + site.getPath() + "/offset");

    for (File offset : dir.listFiles(new FilenameFilter() {
        @Override
        public boolean accept(File dir, String name) {
            return name.endsWith(".csv");
        }
    })) {

        try (Reader in = new FileReader(offset)) {
            try (CSVParser parser = new CSVParser(in, CSVFormat.EXCEL)) {
                for (CSVRecord record : parser) {
                    String page = record.get(0);
                    if (pages.contains(page)) {
                        continue;
                    } else {
                        pages.add(page);
                    }

                    List<String> dataRecord = new ArrayList<>();
                    for (Attribute attr : site.getDomain().getAttributes()) {
                        try {
                            int group = mappings.get(offset.getName()).get(attr.getAttributeID());

                            if (group != -1) {
                                dataRecord.add(record.get(group));
                            } else {
                                dataRecord.add("");
                            }
                        } catch (Exception ex) {
                            dataRecord.add("");
                        }
                    }
                    print(page, dataRecord);
                }
            }
        } catch (FileNotFoundException ex) {
            Logger.getLogger(Mapping.class.getName()).log(Level.SEVERE, null, ex);
        } catch (IOException ex) {
            Logger.getLogger(Mapping.class.getName()).log(Level.SEVERE, null, ex);
        }

    }

}

From source file:com.blackducksoftware.integration.hubdiff.HubDiffTest.java

@Test
public void csvTest() throws IOException, IllegalArgumentException, EncryptionException,
        HubIntegrationException, JSONException {
    HubDiff hubDiff = new HubDiff(doc1, doc2);

    hubDiff.writeDiffAsCSV(actualFile);// w  ww  . j  a v  a 2  s  .  co m

    CSVParser expectedParser = new CSVParser(new FileReader(expectedFile), CSVFormat.EXCEL);
    CSVParser actualParser = new CSVParser(new FileReader(actualFile), CSVFormat.EXCEL);
    List<CSVRecord> expectedRecords = expectedParser.getRecords();
    List<CSVRecord> actualRecords = actualParser.getRecords();

    assertEquals(expectedRecords.size(), actualRecords.size());

    for (int i = 0; i < expectedRecords.size(); i++) {
        String expected = expectedRecords.get(i).toString();
        String actual = actualRecords.get(i).toString();
        assertEquals(expected, actual);
    }

    expectedParser.close();
    actualParser.close();
}

From source file:com.xoriant.akka.mongodb.bulkimport.actor.FileReaderActor.java

private void readAndInsertCSV(String filePath) {
    FileReader fileReader = null;

    CSVParser csvFileParser = null;/* w  ww  .  j a v a  2  s  .  c o m*/

    // Create the CSVFormat object with the header mapping
    CSVFormat csvFileFormat = CSVFormat.EXCEL.withHeader(FILE_HEADER_MAPPING);

    try {

        fileReader = new FileReader(filePath);

        csvFileParser = new CSVParser(fileReader, csvFileFormat);

        List<CSVRecord> csvRecords = csvFileParser.getRecords();
        CSVRecordBatchMsg csvRecordBatch = new CSVRecordBatchMsg();
        boolean batchSent = false;
        // Skip the header row and start reading CSV records
        for (int i = 1; i < csvRecords.size(); i++) {
            CSVRecord record = csvRecords.get(i);
            BasicDBObject person = new BasicDBObject();
            person.put(PERSON_GENDER, record.get(PERSON_GENDER));
            person.put(PERSON_TITLE, record.get(PERSON_TITLE));
            person.put(PERSON_NAMESET, record.get(PERSON_NAMESET));
            person.put(PERSON_SURNAME, record.get(PERSON_SURNAME));
            person.put(PERSON_CITY, record.get(PERSON_CITY));
            person.put(PERSON_STATE, record.get(PERSON_STATE));
            person.put(PERSON_ZIPCODE, record.get(PERSON_ZIPCODE));
            csvRecordBatch.add(person);
            batchSent = false;
            if (i % batchSize == 0) {
                batchSentCounter++;
                csvRecordBatch.setBatchNo(batchSentCounter);
                mongoInsertionActor.tell(csvRecordBatch, getSelf());
                csvRecordBatch = new CSVRecordBatchMsg();
                batchSent = true;
            }

        }

        // Last batch maybe pending if there are less than batch size left over records. Sending last batch of such records explicitly
        if (!batchSent) {
            batchSentCounter++;
            csvRecordBatch.setBatchNo(batchSentCounter);
            mongoInsertionActor.tell(csvRecordBatch, getSelf());
        }
        mongoInsertionActor.tell(new EndOfFileMsg(), getSelf());
        System.out.println("FileReaderActor: EOF sent");

    } catch (Exception e) {
        System.out.println("Error in CsvFileReader !!!" + e.getMessage());
    } finally {
        try {
            fileReader.close();
            csvFileParser.close();
        } catch (IOException e) {
            System.out.println("Error while closing fileReader/csvFileParser : " + e.getMessage());
        }
    }

}

From source file:io.heming.accountbook.util.Importer.java

public int exportRecords(File file) throws Exception {
    int count = 0;
    try (Writer out = new FileWriter(file)) {
        CSVPrinter printer = new CSVPrinter(out, CSVFormat.EXCEL.withQuote('"'));
        DateFormat format = new SimpleDateFormat("yyyy-MM-dd");
        List<Record> records = recordFacade.list();
        int total = records.size();
        for (Record record : records) {
            Integer id = record.getId();
            String category = record.getCategory().getName();
            Double price = record.getPrice();
            String phone = record.getPhone();
            String date = format.format(record.getDate());
            String note = record.getNote();
            printer.printRecord(id, category, price, phone, date, note);
            count++;/*from  w ww  .  j a  v  a2  s  .com*/
        }
    }
    return count;
}

From source file:br.edimarmanica.weir2.rule.filter.RulesFilter.java

public static List<String> loadFilteredRules(Site site) {
    List<String> remainingRules = new ArrayList<>();

    try (Reader in = new FileReader(new File(Paths.PATH_WEIR_V2 + "/" + site.getPath() + "/filter.csv"))) {
        try (CSVParser parser = new CSVParser(in, CSVFormat.EXCEL.withHeader())) {
            for (CSVRecord record : parser) { //para cada value
                remainingRules.add(record.get("RULE"));
            }/*from  ww w  .j a v a2  s  .c o m*/
        }
    } catch (FileNotFoundException ex) {
        Logger.getLogger(RulesDataTypeController.class.getName()).log(Level.SEVERE, null, ex);
    } catch (IOException ex) {
        Logger.getLogger(RulesDataTypeController.class.getName()).log(Level.SEVERE, null, ex);
    }
    return remainingRules;
}

From source file:com.nuevebit.miroculus.mrna.cli.DatabasePopulator.java

private void parseCSV(String csv) throws IOException {
    CSVParser csvParser = CSVParser.parse(csv, CSVFormat.EXCEL);

    Iterator<CSVRecord> records = csvParser.iterator();
    // ignore headers
    records.next();//www  .ja va2 s  .  c o  m

    // read line by line
    while (records.hasNext()) {
        CSVRecord record = records.next();

        // normalize the name (remove *)
        String miRNAName = MiRNA.normalizeName(record.get(0));
        MiRNA miRNA = miRNARepository.findByName(miRNAName);

        if (miRNA == null) { // primera vez que se agrega
            miRNA = miRNARepository.save(new MiRNA(miRNAName));
        }

        String diseaseName = record.get(1).toLowerCase().trim();
        Disease disease = diseaseRepository.findByName(diseaseName);

        if (disease == null) {
            disease = diseaseRepository.save(new Disease(diseaseName));
            disease.setMortalityRate(0d);
        }

        String authorName = record.get(4).trim();
        Author author = authorRepository.findByName(authorName);

        if (author == null) {
            author = authorRepository.save(new Author(authorName));
        }

        String publicationTitle = record.get(6).trim();
        String publicationJournal = record.get(5).trim();

        Publication pub = publicationRepository.findByNameAndJournal(publicationTitle, publicationJournal);

        if (pub == null) {
            pub = new Publication(publicationTitle, publicationJournal);
            pub.setAuthor(author);
            String year = record.get(7);
            pub.setYear(Integer.valueOf(year));
            pub.setDescription(record.get(9).trim());

            pub = publicationRepository.save(pub);

        }

        String methodName = record.get(8).trim();
        DiscoveryMethod method = discoveryMethodRepository.findByName(methodName);

        if (method == null) {
            method = discoveryMethodRepository.save(new DiscoveryMethod(methodName));
        }

        CorrelationDiscovery correlation = new CorrelationDiscovery(miRNA, disease,
                Integer.valueOf(record.get(2)));

        correlation.setPublication(pub);
        correlation.setMethod(method);

        // save the found correlation
        correlationDiscoveryRepository.save(correlation);
    }
}

From source file:br.edimarmanica.weir2.rule.filter.RulesFilter.java

private void persiste() {
    File dirOutput = new File(Paths.PATH_WEIR_V2 + "/" + site.getPath());
    dirOutput.mkdirs();//www  .ja v a 2  s. c  o  m

    File file = new File(dirOutput.getAbsolutePath() + "/filter.csv");
    String[] HEADER = { "RULE" };
    CSVFormat format = CSVFormat.EXCEL.withHeader(HEADER);

    try (Writer out = new FileWriter(file)) {
        try (CSVPrinter csvFilePrinter = new CSVPrinter(out, format)) {
            for (String rule : remainingRules) {
                List<String> dataRecord = new ArrayList<>();
                dataRecord.add(rule);
                csvFilePrinter.printRecord(dataRecord);
            }
        }
    } catch (IOException ex) {
        Logger.getLogger(RulesFilter.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:br.edimarmanica.trinity.intrasitemapping.auto.MergeOffsets.java

private void print(int indexOffset, List<List<String>> lines) {
    for (int indexRule = 1; indexRule < lines.get(0).size(); indexRule++) {
        int map = mc.getSpecificMap(indexOffset, indexRule);
        if (map == -1) { //no tem mapeamento
            continue;
        }/*from  ww  w.ja va  2s.co  m*/

        File dir = new File(Paths.PATH_TRINITY + site.getPath() + "/extracted_values");
        dir.mkdirs();
        File file = new File(dir.getAbsolutePath() + "/rule_" + map + ".csv");

        CSVFormat format;
        if (append) {
            format = CSVFormat.EXCEL;
        } else {
            String[] header = { "URL", "EXTRACTED VALUE" };
            format = CSVFormat.EXCEL.withHeader(header);
        }

        try (Writer out = new FileWriter(file, append)) {
            try (CSVPrinter csvFilePrinter = new CSVPrinter(out, format)) {

                for (int indexRegistro = 0; indexRegistro < lines.size(); indexRegistro++) {
                    String page = lines.get(indexRegistro).get(0);
                    String value = lines.get(indexRegistro).get(indexRule);

                    if (value.isEmpty()) {//no tem valor
                        continue;
                    }

                    List<String> dataRecord = new ArrayList<>();
                    dataRecord.add(page);
                    dataRecord.add(value);
                    csvFilePrinter.printRecord(dataRecord);
                }
            }
        } catch (IOException ex) {
            Logger.getLogger(MergeOffsets.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    append = true;
}

From source file:com.hack23.cia.service.external.vdem.impl.VdemServiceImpl.java

/**
 * Gets the country question data./*from  ww  w.java 2  s.co  m*/
 *
 * @return the country question data
 */
@Override
public List<CountryQuestionData> getCountryQuestionData() {
    final List<CountryQuestionData> list = new ArrayList<>();

    final List<Question> questions = getQuestions();

    try {
        final Reader in = new InputStreamReader(new URL(VDEM_DATA_DOWNLOAD_URL).openStream());

        final CSVParser parser = new CSVParser(in, CSVFormat.EXCEL.withHeader().withDelimiter(','));

        for (final CSVRecord record : parser) {
            final String countryName = record.get("country_name");
            final String countryId = record.get("country_id");
            final String countryTextId = record.get("country_text_id");
            final String year = record.get("year");
            final String gapStart = record.get("gapstart");
            final String gapEnd = record.get("gapend");
            final String codingEnd = record.get("codingend");
            final String cowCode = record.get("COWcode");

            final int currentSize = list.size();
            LOGGER.info("Loading vdem data for country:{} year {} ", countryName, year);

            for (final Question question : questions) {
                addQuestionDataToList(list, record, countryName, countryId, countryTextId, year, gapStart,
                        gapEnd, codingEnd, cowCode, question);
            }

            final int afterSize = list.size();
            LOGGER.info("Found vdem data for country:{} year:{} data points:{}", countryName, year,
                    afterSize - currentSize);

        }
        parser.close();

    } catch (final IOException e) {

        LOGGER.warn("Problem loading vdem data", e);
    }

    return list;
}