Example usage for org.apache.commons.csv CSVParser close

List of usage examples for org.apache.commons.csv CSVParser close

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVParser close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Closes resources.

Usage

From source file:edu.clemson.lph.utils.CSVParserWrapper.java

public CSVParserWrapper(CSVParser pIn) throws IOException {
    if (pIn == null)
        return;//  w  ww .j a va 2  s . co  m
    try {
        for (CSVRecord r : pIn.getRecords()) {
            List<String> aRow = new ArrayList<String>();
            for (int i = 0; i < r.size(); i++) {
                String sField = r.get(i);
                aRow.add(sField);
            }
            aRows.add(aRow);
        }
        iRows = aRows.size();
        iCurrent = 1;
    } finally {
        pIn.close();
    }
}

From source file:edu.emory.mathcs.nlp.zzz.CSVRadiology.java

public void tokenize(String inputFile, int outputStart) throws Exception {
    CSVParser parser = new CSVParser(IOUtils.createBufferedReader(inputFile), CSVFormat.DEFAULT);
    String inputPath = FileUtils.getPath(inputFile) + "/";
    List<CSVRecord> records = parser.getRecords();
    Tokenizer tokenizer = new EnglishTokenizer();

    P_BEFORE = new ArrayList<>();
    P_AFTER = new ArrayList<>();
    for (String s : BEFORE)
        P_BEFORE.add(new Pair<>(Pattern.compile(s), "\n" + s));
    for (String s : AFTER)
        P_AFTER.add(new Pair<>(Pattern.compile(s), s + "\n"));

    for (int i = 0; i < records.size(); i++) {
        PrintStream fout = IOUtils.createBufferedPrintStream(getOuputFilename(inputPath, i + outputStart));

        for (List<Token> tokens : tokenizer.segmentize(records.get(i).get(0)))
            print(fout, tokens);/*w w w .jav a2 s  .  c  om*/

        fout.close();
    }

    parser.close();
}

From source file:GUI.ReadFile.java

public List<Phase> readPhase(String fileName) {
    FileReader fileReader;//from ww  w .  j a  v  a  2  s .c o m
    CSVParser csvFileParser;
    CSVFormat csvFileFormat = CSVFormat.DEFAULT.withHeader(PHASE_HEADER_MAPPING);
    List<Phase> phase_list = new ArrayList<>();
    try {
        fileReader = new FileReader(fileName);
        //initialize CSVParser object
        csvFileParser = new CSVParser(fileReader, csvFileFormat);
        //Get a list of CSV file records
        List<CSVRecord> csvRecords = csvFileParser.getRecords();
        //Read the CSV file records starting from the second record to skip the header
        for (int i = 1; i < csvRecords.size(); i++) {
            CSVRecord record = csvRecords.get(i);
            Phase p = new Phase(record.get(st_time), record.get(end_time), record.get(category));
            phase_list.add(p);
        }
        fileReader.close();
        csvFileParser.close();
        System.out.println(fileName + " Phase file read!");
    } catch (FileNotFoundException e) {
        System.out.println(fileName + " Phase file missing ...");
        return null;
    } catch (IOException ex) {
        System.out.println(fileName + " csv file error !!!");
        return null;
    } catch (ParseException ex) {
        System.out.println(fileName + " phase parsing error !!!");
        return null;
    }
    return phase_list;
}

From source file:edu.emory.mathcs.nlp.zzz.CSVSentiment.java

public void categorize(String inputFile) throws Exception {
    CSVParser parser = new CSVParser(IOUtils.createBufferedReader(inputFile), CSVFormat.DEFAULT);
    List<CSVRecord> records = parser.getRecords();
    List<NLPNode[]> document;
    String outputDir;//from  www .  ja v  a 2s.co m
    PrintStream fout;
    CSVRecord record;

    System.out.println(inputFile);

    for (int i = 0; i < records.size(); i++) {
        if (i == 0)
            continue;
        record = records.get(i);
        document = decode.decodeDocument(record.get(6));
        document.get(0)[1].putFeat("sent", record.get(0));

        outputDir = inputFile.substring(0, inputFile.length() - 4);
        fout = IOUtils.createBufferedPrintStream(
                outputDir + "/" + FileUtils.getBaseName(outputDir) + "_" + i + ".nlp");
        for (NLPNode[] nodes : document)
            fout.println(decode.toString(nodes) + "\n");
        fout.close();
    }

    parser.close();
}

From source file:com.archimatetool.csv.importer.CSVImporter.java

/**
 * Get all records for a CSV file./*from   w ww.  jav  a  2s  . c  o m*/
 * This is a brute-force approach to try with a comma delimiter first. If that fails then
 * try a semicolon, and if that fails, a tab.
 * 
 * @param file The file to open
 * @return Records, which may be empty but never null
 * @throws IOException
 */
List<CSVRecord> getRecords(File file) throws IOException {
    List<CSVRecord> records = new ArrayList<CSVRecord>();
    CSVParser parser = null;

    String errorMessage = "invalid char between encapsulated token and delimiter"; //$NON-NLS-1$

    try {
        parser = new CSVParser(new FileReader(file), CSVFormat.DEFAULT);
        records = parser.getRecords();
    } catch (IOException ex) {
        if (parser != null) {
            parser.close();
        }
        if (ex.getMessage() != null && ex.getMessage().contains(errorMessage)) {
            try {
                parser = new CSVParser(new FileReader(file), CSVFormat.DEFAULT.withDelimiter(';'));
                records = parser.getRecords();
            } catch (IOException ex2) {
                if (parser != null) {
                    parser.close();
                }
                if (ex2.getMessage() != null && ex2.getMessage().contains(errorMessage)) {
                    parser = new CSVParser(new FileReader(file), CSVFormat.DEFAULT.withDelimiter('\t'));
                    records = parser.getRecords();
                } else {
                    throw ex2;
                }
            }
        } else {
            throw ex;
        }
    } finally {
        if (parser != null) {
            parser.close();
        }
    }

    return records;
}

From source file:com.mahisoft.elasticsearchprediction.engine.ElasticsearchGenericIndexEngine.java

private void loadData(File dataFile, Client client, String indexName, String mappingFilename)
        throws IOException {
    CSVParser parser = null;
    PrintWriter mappingFileWriter = null;
    List<String> headers = new ArrayList<String>();

    try {/*ww  w.ja  v  a2s. c  o m*/
        mappingFileWriter = new PrintWriter(mappingFilename, Constants.UTF8);
        parser = CSVParser.parse(dataFile, Charset.forName(Constants.UTF8), CSVFormat.RFC4180);

        for (CSVRecord csvRecord : parser) {
            if (csvRecord.getRecordNumber() == 1) {
                addHeaders(csvRecord, headers);
                continue;
            }

            if (csvRecord.getRecordNumber() == 2) {
                createIndex(client, indexName, mappingFileWriter, headers, csvRecord);
            }
            addValue(client, indexName, headers, csvRecord);
        }
    } finally {
        if (mappingFileWriter != null)
            mappingFileWriter.close();
        if (parser != null)
            parser.close();
    }

    LOGGER.info("Done!");
}

From source file:com.github.jferard.pgloaderutils.sniffer.csv.CSVOptionalHeaderSniffer.java

@Override
public void sniff(final InputStream inputStream, final int size) throws IOException {
    final Reader streamReader = new InputStreamReader(inputStream, this.charset);

    final CSVParser parser = new CSVParser(streamReader, this.csvFormat);
    try {//from   www  .j av  a2s. c  om
        final Iterator<CSVRecord> iterator = parser.iterator();

        if (iterator.hasNext()) {
            final CSVRecord firstRowRecord = iterator.next();
            final int firstRowSize = firstRowRecord.size();

            final char[] firstRowSignature = this.rowSignaturesAnalyzer.getSignature(firstRowRecord,
                    firstRowSize);

            if (this.containsAtLeastOneOnlyDigitsValue(firstRowSignature)) {
                this.header = null;
            } else {
                final char[] remainingRowsSignature = this.rowSignaturesAnalyzer
                        .getRemainingRowsSignature(iterator, firstRowSize);
                if (this.containsAtLeastOneColumnWithLetterHeaderAndDigitValues(firstRowSignature,
                        remainingRowsSignature, firstRowSize)) {
                    // copy firstRow in header
                    for (final String s : firstRowRecord)
                        this.header.add(s);
                }
            }
        } else
            this.header = null;
    } finally {
        parser.close();
    }
}

From source file:com.hack23.cia.service.external.vdem.impl.VdemServiceImpl.java

/**
 * Gets the country question data.//  w w w .ja va2s . c  om
 *
 * @return the country question data
 */
@Override
public List<CountryQuestionData> getCountryQuestionData() {
    final List<CountryQuestionData> list = new ArrayList<>();

    final List<Question> questions = getQuestions();

    try {
        final Reader in = new InputStreamReader(new URL(VDEM_DATA_DOWNLOAD_URL).openStream());

        final CSVParser parser = new CSVParser(in, CSVFormat.EXCEL.withHeader().withDelimiter(','));

        for (final CSVRecord record : parser) {
            final String countryName = record.get("country_name");
            final String countryId = record.get("country_id");
            final String countryTextId = record.get("country_text_id");
            final String year = record.get("year");
            final String gapStart = record.get("gapstart");
            final String gapEnd = record.get("gapend");
            final String codingEnd = record.get("codingend");
            final String cowCode = record.get("COWcode");

            final int currentSize = list.size();
            LOGGER.info("Loading vdem data for country:{} year {} ", countryName, year);

            for (final Question question : questions) {
                addQuestionDataToList(list, record, countryName, countryId, countryTextId, year, gapStart,
                        gapEnd, codingEnd, cowCode, question);
            }

            final int afterSize = list.size();
            LOGGER.info("Found vdem data for country:{} year:{} data points:{}", countryName, year,
                    afterSize - currentSize);

        }
        parser.close();

    } catch (final IOException e) {

        LOGGER.warn("Problem loading vdem data", e);
    }

    return list;
}

From source file:com.wx3.galacdecks.Bootstrap.java

private void importSystems(GameDatastore datastore, String path) throws IOException {
    Reader reader = new FileReader(path);
    CSVParser parser = new CSVParser(reader, CSVFormat.EXCEL.withHeader());
    int count = 0;
    for (CSVRecord record : parser) {
        String id = record.get("id");
        String name = record.get("name");
        String description = record.get("description");
        String pvp = record.get("pvp");
        boolean usePlayerDecks = true;
        if (record.get("usePlayerDecks").toLowerCase().equals("n")) {
            usePlayerDecks = false;//  w ww.  j a v  a2 s.  c o m
        }

        String ruleField = record.get("rootRules");
        String[] ruleIds = ruleField.split(",");
        GameSystem system = new GameSystem();
        system.id = id;
        system.name = name;
        system.description = description;
        system.usePlayerDecks = usePlayerDecks;
        system.rootRules = new ArrayList<>(Arrays.asList(ruleIds));
        if (pvp.toUpperCase().equals("Y")) {
            system.pvp = true;
        } else {
            system.pvp = false;
        }
        datastore.createSystem(system);

        ++count;
    }
    logger.info("Imported " + count + " systems");
    parser.close();
}

From source file:com.hurence.logisland.service.cache.CSVKeyValueCacheService.java

@Override
// @OnEnabled/*  ww w  . j a  v  a 2s . c  o  m*/
public void init(ControllerServiceInitializationContext context) throws InitializationException {
    super.init(context);
    try {

        if (context.getPropertyValue(DATABASE_FILE_URI).isSet()) {
            dbUri = context.getPropertyValue(DATABASE_FILE_URI).asString();
        }

        if (context.getPropertyValue(DATABASE_FILE_PATH).isSet()) {
            dbPath = context.getPropertyValue(DATABASE_FILE_PATH).asString();
        }

        if ((dbUri == null) && (dbPath == null)) {
            throw new Exception(
                    "You must declare " + DATABASE_FILE_URI.getName() + " or " + DATABASE_FILE_PATH.getName());
        }

        InputStream is = null;
        if (dbUri != null) {
            logger.info("opening csv database from hdfs : " + dbUri);
            is = initFromUri(dbUri);
        }

        if (dbPath != null) {
            logger.info("opening csv database from local fs : " + dbPath);
            is = initFromPath(context, dbPath);
        }

        if (is == null) {
            throw new InitializationException("Something went wrong while initializing csv db from "
                    + DATABASE_FILE_URI.getName() + " or " + DATABASE_FILE_PATH.getName());
        }

        // final Reader reader = new InputStreamReader(is);
        CSVFormat format = CSVFormat.DEFAULT;
        if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_EXCEL.getValue())) {
            format = CSVFormat.EXCEL;
        } else if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_EXCEL_FR.getValue())) {
            format = CSVFormat.EXCEL.withDelimiter(';');
        } else if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_MYSQL.getValue())) {
            format = CSVFormat.MYSQL;
        } else if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_RFC4180.getValue())) {
            format = CSVFormat.RFC4180;
        } else if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_TDF.getValue())) {
            format = CSVFormat.TDF;
        }

        if (context.getPropertyValue(CSV_HEADER).isSet()) {
            String[] columnNames = context.getPropertyValue(CSV_HEADER).asString().split(",");
            for (String name : columnNames) {
                headers.get().put(name, "string");
            }
            format = format.withHeader(columnNames);
        } else if (context.getPropertyValue(FIRST_LINE_HEADER).isSet()) {
            format = format.withFirstRecordAsHeader();
        } else {
            throw new InitializationException("unable to get headers from somewhere");
        }

        Charset charset = Charset.forName("UTF-8");
        if (context.getPropertyValue(ENCODING_CHARSET).isSet()) {
            String encoding = context.getPropertyValue(ENCODING_CHARSET).asString();
            charset = Charset.forName(encoding);
        }

        rowKey = context.getPropertyValue(ROW_KEY).asString();
        CSVParser parser = CSVParser.parse(is, charset, format); //new CSVParser(reader, format);

        /*
        *    CSVParser parser = null;
                
        if (context.getPropertyValue(ENCODING_CHARSET).isSet()) {
        String encoding = context.getPropertyValue(ENCODING_CHARSET).asString();
        parser = CSVParser.parse(reader, Charset.forName(encoding), format);
        } else {
        parser = CSVParser.parse(reader, format);
        }
        */
        long count = 0;
        try {
            final Set<String> columnNames = parser.getHeaderMap().keySet();
            for (final CSVRecord record : parser) {

                Record logislandRecord = new StandardRecord();
                for (final String column : columnNames) {
                    logislandRecord.setStringField(column, record.get(column));
                }

                set(logislandRecord.getField(rowKey).asString(), logislandRecord);
                count++;
            }
        } finally {
            logger.info("successfully loaded " + count + " records from CSV file");

            parser.close();
            is.close();
        }

    } catch (Exception e) {
        getLogger().error("Could not load database file: {}", new Object[] { e.getMessage() });
        throw new InitializationException(e);
    }
}