Example usage for org.apache.commons.csv CSVRecord get

List of usage examples for org.apache.commons.csv CSVRecord get

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVRecord get.

Prototype

public String get(final String name) 

Source Link

Document

Returns a value by name.

Usage

From source file:core.reporting.ImportFromFile.java

/**
 * validate the content of csv file against the column definition file. this methos store the valid record into a
 * buffer and record in system log file any error found in input file
 * //from w  w w  .  ja  va2  s  .  c om
 * @param infile - record list parsed from imput file
 * 
 * @return number of error found.
 */
private int validateRecord(Iterable<CSVRecord> ircdlist) {
    // clear all previous log for import flag
    SystemLog.clearLogByFlag("ie");
    // column definition
    Iterable<CSVRecord> coldefl = null;
    try {
        Reader in = new FileReader(TResourceUtils.getFile(columnModelDef + ".csv"));
        coldefl = (new CSVParser(in, CSVFormat.EXCEL.withHeader()).getRecords());
    } catch (Exception e) {
        SystemLog.logException(e);
    }
    SimpleDateFormat dfomat = null;
    tempBuffer.clear();
    int line = 0;
    int error = 0;
    int coldeflen = 0;
    fileColumns = "";
    for (CSVRecord ircd : ircdlist) {
        Record frcd = new Record(recordModel);
        line++;
        coldeflen = 0;
        // premature return if error > coldefl
        if (error > coldeflen) {
            SystemLog.log("inputfile.msg05", "ie", "", error);
            return error;
        }
        for (CSVRecord cdr : coldefl) {
            coldeflen++;
            Object iobj = null;
            String siobj = null;
            String fieldn = null;
            // contain field name
            try {
                fieldn = cdr.get("field");
                ircd.get(fieldn);
            } catch (Exception e) {
                // if field is mandatory, log error
                if (cdr.get("mandatory").equals("true")) {
                    SystemLog.log("inputfile.msg02", "ie", "", line, cdr.get("field"));
                    error++;
                }
                continue;
            }
            // value class.
            try {
                String cls = cdr.get("class");
                // String class by default
                iobj = ircd.get(fieldn);
                if (cls.equals("Integer")) {
                    iobj = Integer.parseInt(ircd.get(fieldn));
                }
                if (cls.equals("Double")) {
                    iobj = Double.parseDouble(ircd.get(fieldn));
                }
                if (cls.equals("Date")) {
                    // date may be not present
                    dfomat = new SimpleDateFormat(cdr.get("format"));
                    Date d = iobj.equals("") ? TStringUtils.ZERODATE : dfomat.parse((String) iobj);
                    iobj = new java.sql.Date(d.getTime());
                }
            } catch (Exception e) {
                SystemLog.log("inputfile.msg03", "ie", "", line, cdr.get("field"), cdr.get("class"),
                        cdr.get("format"));
                error++;
                continue;
            }
            // valid value
            siobj = ircd.get(fieldn);
            boolean vvb = true;
            String vv = cdr.get("valid values");
            if (!vv.equals("")) {
                vvb = false;
                String[] vvlst = vv.split(";");
                for (String vvi : vvlst) {
                    vvb = (siobj.equals(vvi)) ? true : vvb;
                }
            }
            if (vvb == false) {
                SystemLog.log("inputfile.msg04", "ie", "", line, cdr.get("field"), cdr.get("valid values"));
                error++;
                continue;
            }
            // no problem? add field
            String tf = cdr.get("target_field");
            fileColumns += tf + ";";
            frcd.setFieldValue(tf, iobj);
        }
        tempBuffer.add(frcd);
    }
    fileColumns = fileColumns.substring(0, fileColumns.length() - 1);
    return error;
}

From source file:com.ibm.watson.developer_cloud.professor_languo.model.stack_exchange.CorpusBuilderTest.java

private void deserialiezd_duplicate_threads_should_match_original_duplicate_threads()
        throws IngestionException {

    String csvFilePath = dupCorpusBuilder.getDupThreadDirPath()
            + StackExchangeConstants.DUP_THREAD_TSV_FILE_NAME
            + StackExchangeConstants.DUP_THREAD_TSV_FILE_EXTENSION;
    File csvData = new File(csvFilePath);

    CSVParser parser;/*from   ww  w.ja  v a 2s  .  com*/
    List<CSVRecord> records;
    try {
        parser = CSVParser.parse(csvData, Charset.defaultCharset(), CSVFormat.TDF.withHeader());
        records = parser.getRecords();
    } catch (IOException e) {
        throw new IngestionException(e);
    }

    Set<StackExchangeThread> dupThreadSet = dupCorpusBuilder.getDupThreadSetFromBinFiles();
    for (StackExchangeThread thread : dupThreadSet) {
        String binfileName = dupCorpusBuilder.getDupThreadDirPath() + thread.getId()
                + StackExchangeConstants.BIN_FILE_SUFFIX;
        CSVRecord matchRecord = null;
        for (CSVRecord record : records)
            if (Integer.parseInt(record.get(0)) == thread.getId()) {
                matchRecord = record;
                break;
            }
        assertTrue(matchRecord != null);
        // TODO haven't check the originId yet since it requires the new
        // method to get origin id from
        String deserTitle = matchRecord.get(1), deserBody = matchRecord.get(2),
                deserFileName = matchRecord.get(4), deserTags = matchRecord.get(5);
        assertEquals(deserTitle, thread.getQuestion().getTitle());
        assertEquals(deserBody, thread.getQuestion().getBody());
        assertEquals(deserFileName, binfileName);
        assertEquals(deserTags, thread.getConcatenatedTagsText());
    }
}

From source file:geovista.readers.csv.GeogCSVReader_old.java

public Object[] readFileStreaming(InputStream is, ArrayList<Integer> columns) {

    BufferedReader in = new BufferedReader(new InputStreamReader(is));
    Iterable<CSVRecord> parser = null;
    try {/*from  w  w  w . j a  v  a 2  s  .  c om*/
        parser = CSVFormat.DEFAULT.withDelimiter(this.delimiter).parse(in);
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    int count = 0;
    for (CSVRecord rec : parser) {

        // eDays.add(rec.get(0));
        // type.add(rec.get(10) + " - " + rec.get(8));

        System.out.println(rec.get(0));
        System.out.println(rec.toString());
        count++;
    }
    // CSVParser shredder = new CSVParser()
    // CSVParser shredder = new CSVParser(is);

    // shredder.setCommentStart("#;!");
    // shredder.setEscapes("nrtf", "\n\r\t\f");
    String[] headers = null;
    String[] types = null;
    int[] dataTypes = null;
    String[][] fileContent = null;
    int dataBegin;
    Object[] data;
    try {
        // fileContent = shredder.getAllValues();

    } catch (Exception ex) {
        ex.printStackTrace();
    }

    types = fileContent[0];// first line tells us types
    dataTypes = new int[types.length];
    int len;
    if (types[0].equalsIgnoreCase("int") || types[0].equalsIgnoreCase("double")
            || types[0].equalsIgnoreCase("string")) {
        dataBegin = 2;
        headers = fileContent[1];
        data = new Object[headers.length + 1];// plus one for the headers
        // themselves
        len = fileContent.length - dataBegin;
        for (int i = 0; i < headers.length; i++) {
            if (types[i].equalsIgnoreCase("int")) {
                data[i + 1] = new int[len];
                dataTypes[i] = GeogCSVReader_old.DATA_TYPE_INT;
            } else if (types[i].equalsIgnoreCase("double")) {
                data[i + 1] = new double[len];
                dataTypes[i] = GeogCSVReader_old.DATA_TYPE_DOUBLE;
            } else if (types[i].equalsIgnoreCase("string")) {
                data[i + 1] = new String[len];
                dataTypes[i] = GeogCSVReader_old.DATA_TYPE_STRING;
            } else {
                throw new IllegalArgumentException("GeogCSVReader.readFile, unknown type = " + types[i]);
            }
        }
    } else {
        dataBegin = 1;
        headers = fileContent[0];
        data = new Object[headers.length + 1];// plus one for the headers
        // themselves
        len = fileContent.length - dataBegin;
        for (int i = 0; i < headers.length; i++) {
            String firstString = fileContent[1][i];
            String secondString = fileContent[2][i];
            String thirdString = fileContent[3][i];
            String lastString = fileContent[fileContent[0].length][i];

            if (isNumeric(firstString) && isNumeric(secondString) && isNumeric(thirdString)
                    && isNumeric(lastString)) {
                if (isInt(fileContent, i) == false) {
                    // if (isDouble(firstString) || isDouble(secondString)
                    // || isDouble(thirdString) || isDouble(lastString)) {
                    data[i + 1] = new double[len];
                    dataTypes[i] = GeogCSVReader_old.DATA_TYPE_DOUBLE;
                } else {
                    data[i + 1] = new int[len];
                    dataTypes[i] = GeogCSVReader_old.DATA_TYPE_INT;
                }
            } else {
                data[i + 1] = new String[len];
                dataTypes[i] = GeogCSVReader_old.DATA_TYPE_STRING;
            }
        }
    }
    data[0] = headers;

    String[] line = null;

    for (int row = dataBegin; row < len + dataBegin; row++) {

        line = fileContent[row];

        int[] ints = null;
        double[] doubles = null;
        String[] strings = null;

        for (int column = 0; column < line.length; column++) {
            String item = line[column];
            if (dataTypes[column] == GeogCSVReader_old.DATA_TYPE_INT) {

                if (Arrays.binarySearch(GeogCSVReader_old.NULL_STRINGS, item) >= 0) {
                    ints = (int[]) data[column + 1];
                    ints[row - dataBegin] = GeogCSVReader_old.NULL_INT;
                } else {
                    ints = (int[]) data[column + 1];
                    try {
                        ints[row - dataBegin] = Integer.parseInt(item);
                    } catch (NumberFormatException nfe) {
                        logger.warning("could not parse " + item + " in column " + column);
                        // nfe.printStackTrace();
                        ints[row - dataBegin] = GeogCSVReader_old.NULL_INT;
                    }
                }
            } else if (dataTypes[column] == GeogCSVReader_old.DATA_TYPE_DOUBLE) {
                if (Arrays.binarySearch(GeogCSVReader_old.NULL_STRINGS, item) >= 0) {
                    doubles = (double[]) data[column + 1];
                    doubles[row - dataBegin] = GeogCSVReader_old.NULL_DOUBLE;
                } else {
                    doubles = (double[]) data[column + 1];
                    doubles[row - dataBegin] = parseDouble(item);
                }
            } else if (dataTypes[column] == GeogCSVReader_old.DATA_TYPE_STRING) {
                strings = (String[]) data[column + 1];
                strings[row - dataBegin] = item;
            } else {
                throw new IllegalArgumentException("GeogCSVReader.readFile, unknown type = " + types[row]);
            } // end if

        } // next column
    } // next row
    return data;

}

From source file:io.ecarf.core.cloud.task.processor.reason.phase1.DoReasonTask5.java

/**
 * /*from ww  w. j a  v a  2s  .com*/
 * @param term
 * @param select
 * @param schemaTriples
 * @param rows
 * @param table
 * @param writer
 * @return
 * @throws IOException
 */
private int inferAndSaveTriplesToFile(Term term, List<String> select, Set<Triple> schemaTriples,
        BigInteger rows, String table, PrintWriter writer) throws IOException {

    int inferredTriples = 0;
    int failedTriples = 0;

    // loop through the instance triples probably stored in a file and generate all the triples matching the schema triples set
    try (BufferedReader r = new BufferedReader(new FileReader(term.getFilename()), Constants.GZIP_BUF_SIZE)) {

        Iterable<CSVRecord> records = CSVFormat.DEFAULT.parse(r);

        // records will contain lots of duplicates
        Set<String> inferredAlready = new HashSet<String>();

        try {

            for (CSVRecord record : records) {

                String values = ((select.size() == 1) ? record.get(0) : StringUtils.join(record.values(), ','));

                if (!inferredAlready.contains(values)) {
                    inferredAlready.add(values);

                    NTriple instanceTriple = new NTriple();

                    if (select.size() == 1) {
                        instanceTriple.set(select.get(0), record.get(0));
                    } else {

                        instanceTriple.set(select, record.values());
                    }

                    for (Triple schemaTriple : schemaTriples) {
                        Rule rule = GenericRule.getRule(schemaTriple);
                        Triple inferredTriple = rule.head(schemaTriple, instanceTriple);

                        if (inferredTriple != null) {
                            writer.println(inferredTriple.toCsv());
                            inferredTriples++;
                        }
                    }

                    // this is just to avoid any memory issues
                    if (inferredAlready.size() > MAX_CACHE) {
                        inferredAlready.clear();
                        log.info("Cleared cache of inferred terms");
                    }
                } else {
                    this.duplicates++;
                }

            }
        } catch (Exception e) {
            log.error("Failed to parse selected terms", e);
            failedTriples++;
        }
    }

    //inferredFiles.add(inferredTriplesFile);
    log.info("\nSelect Triples: " + rows + ", Inferred: " + inferredTriples + ", Triples for term: " + term
            + ", Failed Triples: " + failedTriples);

    return inferredTriples;
}

From source file:com.objy.se.ClassAccessor.java

public Instance setAttributes(Instance instance, CSVRecord record) {

    // iterate and create attributes
    for (Map.Entry<String, String> entry : mapper.getAttributesMap().entrySet()) {
        //      System.out.println("Entry()" + entry.toString());
        String attrValue = record.get(entry.getValue());
        AttributeInfo attrInfo = attributeMap.get(entry.getKey());
        setAttributeValue(instance, attrInfo.attribute(), getCorrectValue(attrValue, attrInfo.logicalType()));
    }//from www .  j a  v a2  s .co m

    return instance;
}

From source file:it.newfammulfin.api.EntryResource.java

@POST
@Consumes("text/csv")
@Produces(MediaType.TEXT_PLAIN)/*from  w w  w.  j  av  a  2 s. co m*/
public Response importFromCsv(String csvData,
        @DefaultValue("false") @QueryParam("invertSign") final boolean invertSign) {
    final Group group = (Group) requestContext.getProperty(GroupRetrieverRequestFilter.GROUP);
    final Map<String, Key<Chapter>> chapterStringsMap = new HashMap<>();
    final List<CSVRecord> records;
    try {
        records = CSVParser.parse(csvData, CSVFormat.DEFAULT.withHeader()).getRecords();
    } catch (IOException e) {
        return Response.status(Response.Status.INTERNAL_SERVER_ERROR)
                .entity(String.format("Unexpected %s: %s.", e.getClass().getSimpleName(), e.getMessage()))
                .build();
    }
    //check users
    final Set<String> userIds = new HashSet<>();
    for (String columnName : records.get(0).toMap().keySet()) {
        if (columnName.startsWith("by:")) {
            String userId = columnName.replaceFirst("by:", "");
            if (!group.getUsersMap().keySet().contains(Key.create(RegisteredUser.class, userId))) {
                return Response.status(Response.Status.INTERNAL_SERVER_ERROR)
                        .entity(String.format("User %s not found in this group.", userId)).build();
            }
            userIds.add(userId);
        }
    }
    //build chapters
    final Set<String> chapterStringsSet = new HashSet<>();
    for (CSVRecord record : records) {
        chapterStringsSet.add(record.get("chapters"));
    }
    final List<Key<?>> createdKeys = new ArrayList<>();
    try {
        OfyService.ofy().transact(new Work<List<Key<?>>>() {
            @Override
            public List<Key<?>> run() {
                for (String chapterStrings : chapterStringsSet) {
                    List<String> pieces = Arrays.asList(chapterStrings.split(CSV_CHAPTERS_SEPARATOR));
                    Key<Chapter> parentChapterKey = null;
                    for (int i = 0; i < pieces.size(); i++) {
                        String partialChapterString = Joiner.on(CSV_CHAPTERS_SEPARATOR)
                                .join(pieces.subList(0, i + 1));
                        Key<Chapter> chapterKey = chapterStringsMap.get(partialChapterString);
                        if (chapterKey == null) {
                            chapterKey = OfyService.ofy().load().type(Chapter.class).ancestor(group)
                                    .filter("name", pieces.get(i)).filter("parentChapterKey", parentChapterKey)
                                    .keys().first().now();
                            chapterStringsMap.put(partialChapterString, chapterKey);
                        }
                        if (chapterKey == null) {
                            Chapter chapter = new Chapter(pieces.get(i), Key.create(group), parentChapterKey);
                            OfyService.ofy().save().entity(chapter).now();
                            chapterKey = Key.create(chapter);
                            createdKeys.add(chapterKey);
                            LOG.info(String.format("%s created.", chapter));
                        }
                        chapterStringsMap.put(partialChapterString, chapterKey);
                        parentChapterKey = chapterKey;
                    }
                }
                //build entries
                DateTimeFormatter formatter = DateTimeFormat.forPattern("dd/MM/YY");
                Key<Group> groupKey = Key.create(group);
                for (CSVRecord record : records) {
                    Entry entry = new Entry();
                    entry.setGroupKey(groupKey);
                    entry.setDate(LocalDate.parse(record.get("date"), formatter));
                    entry.setAmount(Money.of(CurrencyUnit.of(record.get("currency").toUpperCase()),
                            (invertSign ? -1 : 1) * Double.parseDouble(record.get("value"))));
                    if (!record.get("chapters").isEmpty()) {
                        entry.setChapterKey(chapterStringsMap.get(record.get("chapters")));
                    }
                    entry.setPayee(record.get("payee"));
                    for (String tag : record.get("tags").split(CSV_TAGS_SEPARATOR)) {
                        if (!tag.trim().isEmpty()) {
                            entry.getTags().add(tag);
                        }
                    }
                    entry.setDescription(record.get("description"));
                    entry.setNote(record.get("notes"));
                    int scale = Math.max(DEFAULT_SHARE_SCALE, entry.getAmount().getScale());
                    //by shares
                    for (String userId : userIds) {
                        String share = record.get("by:" + userId);
                        double value;
                        if (share.contains("%")) {
                            entry.setByPercentage(true);
                            value = Double.parseDouble(share.replace("%", ""));
                            value = entry.getAmount().getAmount().doubleValue() * value / 100d;
                        } else {
                            value = (invertSign ? -1 : 1) * Double.parseDouble(share);
                        }
                        entry.getByShares().put(Key.create(RegisteredUser.class, userId),
                                BigDecimal.valueOf(value).setScale(scale, RoundingMode.DOWN));
                    }
                    boolean equalByShares = checkAndBalanceZeroShares(entry.getByShares(),
                            entry.getAmount().getAmount());
                    entry.setByPercentage(entry.isByPercentage() || equalByShares);
                    //for shares
                    for (String userId : userIds) {
                        String share = record.get("for:" + userId);
                        double value;
                        if (share.contains("%")) {
                            entry.setForPercentage(true);
                            value = Double.parseDouble(share.replace("%", ""));
                            value = entry.getAmount().getAmount().doubleValue() * value / 100d;
                        } else {
                            value = (invertSign ? -1 : 1) * Double.parseDouble(share);
                        }
                        entry.getForShares().put(Key.create(RegisteredUser.class, userId),
                                BigDecimal.valueOf(value).setScale(scale, RoundingMode.DOWN));
                    }
                    boolean equalForShares = checkAndBalanceZeroShares(entry.getForShares(),
                            entry.getAmount().getAmount());
                    entry.setForPercentage(entry.isForPercentage() || equalForShares);
                    OfyService.ofy().save().entity(entry).now();
                    createdKeys.add(Key.create(entry));
                    EntryOperation operation = new EntryOperation(Key.create(group), Key.create(entry),
                            new Date(),
                            Key.create(RegisteredUser.class, securityContext.getUserPrincipal().getName()),
                            EntryOperation.Type.IMPORT);
                    OfyService.ofy().save().entity(operation).now();
                    LOG.info(String.format("%s created.", entry));
                }
                return createdKeys;
            }
        });
        //count keys
        int numberOfCreatedChapters = 0;
        int numberOfCreatedEntries = 0;
        for (Key<?> key : createdKeys) {
            if (key.getKind().equals(Entry.class.getSimpleName())) {
                numberOfCreatedEntries = numberOfCreatedEntries + 1;
            } else if (key.getKind().equals(Chapter.class.getSimpleName())) {
                numberOfCreatedChapters = numberOfCreatedChapters + 1;
            }
        }
        return Response.ok(String.format("Done: %d chapters and %d entries created.", numberOfCreatedChapters,
                numberOfCreatedEntries)).build();
    } catch (RuntimeException e) {
        LOG.warning(String.format("Unexpected %s: %s.", e.getClass().getSimpleName(), e.getMessage()));
        return Response.status(Response.Status.INTERNAL_SERVER_ERROR)
                .entity(String.format("Unexpected %s: %s.", e.getClass().getSimpleName(), e.getMessage()))
                .build();
    }
}

From source file:com.github.douglasjunior.simpleCSVEditor.FXMLController.java

private ObservableList<CSVRow> readFile(File csvFile) throws IOException {
    ObservableList<CSVRow> rows = FXCollections.observableArrayList();
    Integer maxColumns = 0;//from ww w. j a  v a 2s. c o m
    try (Reader in = new InputStreamReader(new FileInputStream(csvFile));) {
        CSVParser parse = csvFormat.parse(in);
        for (CSVRecord record : parse.getRecords()) {
            if (maxColumns < record.size()) {
                maxColumns = record.size();
            }
            CSVRow row = new CSVRow();
            for (int i = 0; i < record.size(); i++) {
                row.getColumns().add(new SimpleStringProperty(record.get(i)));
            }
            rows.add(row);
        }
        this.numbeColumns = maxColumns;
    }
    return rows;
}

From source file:eu.fthevenet.binjr.data.codec.CsvDecoder.java

private List<String> parseColumnHeaders(CSVRecord record) throws IOException, DecodingDataFromAdapterException {
    try (Profiler ignored = Profiler.start("Getting hearders from csv data", logger::trace)) {
        if (record == null) {
            throw new DecodingDataFromAdapterException("CSV stream does not contains column header");
        }/*from  w ww  . ja  v a2s  .c  o  m*/
        List<String> headerNames = new ArrayList<>();
        for (int i = 1; i < record.size(); i++) {
            headerNames.add(record.get(i));
        }
        return headerNames;
    }
}

From source file:br.ufg.calendario.components.EventoBean.java

public void uploadEvento(FileUploadEvent event) {
    Map<String, Object> requestMap = FacesContext.getCurrentInstance().getExternalContext().getRequestMap();
    FacesMessage msg;//w  ww  . j a v a  2s  . c o m
    boolean saveStatus = false;
    UploadedFile arquivo = event.getFile();
    try {
        InputStream arquivoReader = arquivo.getInputstream();
        Charset charset = Charset.forName("UTF-8");
        CharsetDecoder decoder = charset.newDecoder();
        Reader reader = new InputStreamReader(arquivoReader, decoder);
        CSVParser parser = new CSVParser(reader,
                CSVFormat.DEFAULT.withHeader().withDelimiter(configBean.getDelimiter()));
        SimpleDateFormat dateFormatter = new SimpleDateFormat(configBean.getDateFormat());
        for (Entry<String, Integer> entry : parser.getHeaderMap().entrySet()) {
            System.out.format("header: %s - %d\n", entry.getKey(), entry.getValue());
        }
        Integer ano;
        Calendario cal = null;
        List<Regional> regionais = regionalDao.listar();
        List<Interessado> interessados = interessadoDao.listar();
        for (CSVRecord record : parser) {
            //adicionar entidade calendario (select box) na tela importar eventos.
            ano = Integer.parseInt(record.get(0));
            Date dataInicio = dateFormatter.parse(record.get(1));
            Date dataTermino = dateFormatter.parse(record.get(2));
            String assunto = record.get(3);
            String descricao = record.get(4);
            String[] interessadoArray = record.get(5).split(configBean.getRegexSplitter());
            String[] regionalArray = record.get(6).split(configBean.getRegexSplitter());
            boolean aprovado = record.get(7) != null && record.get(7).trim().toUpperCase().equals("S");
            if (cal == null) {
                //buscar apenas uma vez
                cal = calendarioDao.buscar(ano);
            }
            Set<Interessado> interessadoSet = new HashSet();
            for (String interessado : interessadoArray) {
                if (!interessado.isEmpty()) {
                    for (Interessado i : interessados) {
                        if (i.getNome().equals(interessado.trim())) {
                            interessadoSet.add(i);
                        }
                    }
                }
            }
            Set<Regional> regionalSet = new HashSet();
            for (String regional : regionalArray) {
                if (!regional.isEmpty()) {
                    for (Regional r : regionais) {
                        if (r.getNome().equals(regional.trim())) {
                            regionalSet.add(r);
                        }
                    }
                }
            }
            Evento evt = new Evento(assunto, dataInicio, dataTermino, descricao, cal, regionalSet,
                    interessadoSet, aprovado);
            eventosImportados.add(evt);
        }
    } catch (IOException | ParseException | ArrayIndexOutOfBoundsException | NullPointerException e) {
        System.out.println("erro: " + e.getMessage());
    }
    System.out.println("arquivo enviado: " + arquivo.getFileName());
    msg = new FacesMessage(FacesMessage.SEVERITY_INFO, "info", LocaleBean.getMessage("arquivoEnviado"));
    FacesContext.getCurrentInstance().addMessage(null, msg);
    RequestContext.getCurrentInstance().addCallbackParam("resultado", saveStatus);
}

From source file:com.denimgroup.threadfix.service.waflog.RiverbedWebAppFirewallLogParser.java

/**
 * @param entryBuffer/*from   w  w w. j av  a 2 s . c  om*/
 * @return
 */
@Override
public SecurityEvent getSecurityEvent(String entry) {
    if (entry == null || entry.isEmpty() || entry.startsWith("#")) {
        return null;
    }

    // a logline is a csv encoded line with the following columns
    //  * [0] a timestamp: YYYYMMDD-HHMMSS in local time
    //  * [1] an internal session id or "default"
    //  * [2] internal cluster node id
    //  * [3] host header
    //  * [4] client ip
    //  * [5] HTTP method
    //  * [6] URL
    //  * [7] HTTP protocol version
    //  * [8] internal ruleset / rule id
    //  * [9] action
    //  * [10] protection or detection mode
    //  * [11] request or response
    //  * [12] handlerName - we only care for the THREADFIX_HANDLER_NAME here
    //  * [13] component which reject the request
    //  * [14] value which rejects the request
    //  * [16] error id (use this together with the timetamp to be unique)
    //  * [17] free text field
    //  * ... aditional stuff

    try {
        // we are using an iterator here because this
        // is the interface of this CSV parser 
        // however, we always feed only one line into
        // this parser so it is ok to return from this
        // loop and never continue
        Iterable<CSVRecord> parser = CSVFormat.DEFAULT.parse(new StringReader(entry));
        for (CSVRecord record : parser) {

            // We access elements 0 .. 17 later, so this has to have at least 18 elements
            if (record.size() < 18) {
                log.error("can't parse logline: " + entry);
                return null;
            }
            String csvTimestamp = record.get(0); // 20140131-172342
            String csvClientIP = record.get(4); // 10.17.23.41
            String csvRulesetMode = record.get(10); // P or D
            String csvHandlerName = record.get(12); // ThreadfixHandler
            String csvComponentName = record.get(13); // protection_ruleset
            String csvComponentValue = record.get(14); // threadfix:100042 or 100042
            String csvErrorId = record.get(16); // 1234567
            String csvFreeText = record.get(17); // free text which describe the action

            if (csvTimestamp == null || csvClientIP == null || csvHandlerName == null || csvRulesetMode == null
                    || csvComponentName == null || csvComponentValue == null || csvErrorId == null
                    || csvFreeText == null) {

                log.error("can't parse logline: " + entry);
                return null;
            }

            // we only care for THREADFIX_HANDLER_NAME here ... ignore all other stuff
            if (!csvHandlerName.equals(THREADFIX_HANDLER_NAME)) {
                log.debug("ignore unknown handler: " + csvHandlerName);
                return null;
            }

            // while the error id act more or less as
            // a unique id for rejected requests, this id
            // is too short to be really unique over a
            // long time. So we combine it here with the
            // timestamp to get a better native id
            String nativeId = csvTimestamp + "-" + csvErrorId;

            log.debug("native id: " + nativeId);

            if (securityEventDao.retrieveByNativeIdAndWafId(nativeId, wafId) != null) {
                return null;
            }

            String wafRuleId = null;
            if (csvComponentName.equals(THREADFIX_HANDLER_COMPONENT)) {
                // allow threadfix:123456 and 123456
                if (csvComponentValue.contains(":")) {
                    wafRuleId = csvComponentValue.split(":", 2)[1];
                } else {
                    wafRuleId = csvComponentValue;
                }
            } else {
                log.debug("ignore unknown component: " + csvComponentName);
                return null;
            }

            log.debug("wafRuleId " + wafRuleId);

            WafRule rule = wafRuleDao.retrieveByWafAndNativeId(wafId, wafRuleId);
            if (rule == null) {
                log.debug("wafRule not found");
                return null;
            }

            Calendar calendar = parseDate(csvTimestamp);

            if (calendar == null) {
                log.error("can't parse logline (timestamp): " + entry);
                return null;
            }

            SecurityEvent event = new SecurityEvent();

            event.setWafRule(rule);
            event.setImportTime(calendar);
            event.setLogText(csvFreeText);

            event.setAttackType("deny");
            //if (csvRulesetMode == WAF_LOG_MODE_PROTECTION)
            //{
            //    event.setAttackType("deny");
            //} else {
            //    event.setAttackType("log"); 
            //}
            event.setNativeId(nativeId);
            event.setAttackerIP(csvClientIP);

            return event;
        }
    } catch (IOException e) {
        return null;
    }
    return null;

}