Example usage for org.apache.commons.csv CSVFormat EXCEL

List of usage examples for org.apache.commons.csv CSVFormat EXCEL

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVFormat EXCEL.

Prototype

CSVFormat EXCEL

To view the source code for org.apache.commons.csv CSVFormat EXCEL.

Click Source Link

Document

Excel file format (using a comma as the value delimiter).

Usage

From source file:org.openmrs.projectbuendia.servlet.DataExportServlet.java

@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {

    // Set the default merge mode
    boolean merge = true;

    // Defines the interval in minutes that will be used to merge encounters.
    int interval = DEFAULT_INTERVAL_MINS;
    String intervalParameter = request.getParameter("interval");
    if (intervalParameter != null) {
        int newInterval = Integer.valueOf(intervalParameter);
        if (newInterval >= 0) {
            interval = newInterval;/*from w w  w . ja v  a2  s  . c  o m*/
            if (interval == 0) {
                merge = false;
            }
        } else {
            log.error("Interval value is less then 0. Default used.");
        }
    }

    CSVPrinter printer = new CSVPrinter(response.getWriter(), CSVFormat.EXCEL.withDelimiter(','));

    //check for authenticated users
    if (!XformsUtil.isAuthenticated(request, response, null))
        return;

    Date now = new Date();
    DateFormat format = new SimpleDateFormat("yyyyMMdd_HHmmss");
    String filename = String.format("buendiadata_%s.csv", format.format(now));
    String contentDispositionHeader = String.format("attachment; filename=%s;", filename);
    response.addHeader("Content-Disposition", contentDispositionHeader);

    PatientService patientService = Context.getPatientService();
    EncounterService encounterService = Context.getEncounterService();

    List<Patient> patients = new ArrayList<>(patientService.getAllPatients());
    Collections.sort(patients, PATIENT_COMPARATOR);

    // We may want to get the observations displayed in the chart/xform, in which case there
    // are a few
    // sensible orders:
    // 1: UUID
    // 2: Order in chart
    // 3: Order in Xform

    // Order in Xform/chart is not good as stuff changes every time we change xform
    // So instead we will use UUID order, but use the Chart form to use the concepts to display.
    Set<Concept> questionConcepts = new HashSet<>();
    for (Form form : ChartResource.getCharts(Context.getFormService())) {
        TreeMap<Integer, TreeSet<FormField>> formStructure = FormUtil.getFormStructure(form);
        for (FormField groupField : formStructure.get(0)) {
            for (FormField fieldInGroup : formStructure.get(groupField.getId())) {
                questionConcepts.add(fieldInGroup.getField().getConcept());
            }
        }
    }
    FixedSortedConceptIndexer indexer = new FixedSortedConceptIndexer(questionConcepts);

    // Write English headers.
    writeHeaders(printer, indexer);

    Calendar calendar = Calendar.getInstance();

    // Loop through all the patients and get their encounters.
    for (Patient patient : patients) {

        // Define an array that will represent the line that will be inserted in the CSV.
        Object[] previousCSVLine = new Object[FIXED_HEADERS.length + indexer.size() * COLUMNS_PER_OBS];

        Date deadLine = new Date(0);

        ArrayList<Encounter> encounters = new ArrayList<>(encounterService.getEncountersByPatient(patient));
        Collections.sort(encounters, ENCOUNTER_COMPARATOR);

        // TODO: For now patients with no encounters are ignored. List them on the future.
        if (encounters.size() == 0)
            continue;

        // Loop through all the encounters for this patient to get the observations.
        for (Encounter encounter : encounters) {
            try {
                // Flag to whether we will use the merged version of the encounter
                // or the single version.
                boolean useMerged = merge;

                // Array that will be used to merge in previous encounter with the current one.
                Object[] mergedCSVLine = new Object[previousCSVLine.length];

                // Duplicate previous encounter into the (future to be) merged one.
                System.arraycopy(previousCSVLine, 0, mergedCSVLine, 0, previousCSVLine.length);

                // Define the array to be used to store the current encounter.
                Object[] currentCSVLine = new Object[FIXED_HEADERS.length + indexer.size() * COLUMNS_PER_OBS];

                // If the current encounter is more then "interval" minutes from the previous
                // print the previous and reset it.
                Date encounterTime = encounter.getEncounterDatetime();
                if (encounterTime.after(deadLine)) {
                    printer.printRecord(previousCSVLine);
                    previousCSVLine = new Object[FIXED_HEADERS.length + indexer.size() * COLUMNS_PER_OBS];
                    useMerged = false;
                }
                // Set the next deadline as the current encounter time plus "interval" minutes.
                calendar.setTime(encounterTime);
                calendar.add(Calendar.MINUTE, interval);
                deadLine = calendar.getTime();

                // Fill the fixed columns values.
                currentCSVLine[0] = patient.getUuid();
                currentCSVLine[1] = patient.getPatientIdentifier("MSF");
                if (patient.getBirthdate() != null) {
                    currentCSVLine[2] = Utils.YYYYMMDD_UTC_FORMAT.format(patient.getBirthdate());
                }
                currentCSVLine[3] = encounter.getUuid();
                currentCSVLine[4] = encounterTime.getTime();
                currentCSVLine[5] = Utils.toIso8601(encounterTime);
                currentCSVLine[6] = Utils.SPREADSHEET_FORMAT.format(encounterTime);

                // All the values fo the fixed columns saved in the current encounter line
                // will also be saved to the merged line.
                System.arraycopy(currentCSVLine, 0, mergedCSVLine, 0, 7);

                // Loop through all the observations for this encounter
                for (Obs obs : encounter.getAllObs()) {
                    Integer index = indexer.getIndex(obs.getConcept());
                    if (index == null)
                        continue;
                    // For each observation there are three columns: if the value of the
                    // observation is a concept, then the three columns contain the English
                    // name, the OpenMRS ID, and the UUID of the concept; otherwise all
                    // three columns contain the formatted value.
                    int valueColumn = FIXED_HEADERS.length + index * COLUMNS_PER_OBS;

                    // Coded values are treated differently
                    if (obs.getValueCoded() != null) {
                        Concept value = obs.getValueCoded();
                        currentCSVLine[valueColumn] = NAMER.getClientName(value);
                        currentCSVLine[valueColumn + 1] = value.getId();
                        currentCSVLine[valueColumn + 2] = value.getUuid();
                        if (useMerged) {
                            // If we are still merging the current encounter values into
                            // the previous one get the previous value and see if it had
                            // something in it.
                            String previousValue = (String) mergedCSVLine[valueColumn];
                            if ((previousValue == null) || (previousValue.isEmpty())) {
                                // If the previous value was empty copy the current value into it.
                                mergedCSVLine[valueColumn] = currentCSVLine[valueColumn];
                                mergedCSVLine[valueColumn + 1] = currentCSVLine[valueColumn + 1];
                                mergedCSVLine[valueColumn + 2] = currentCSVLine[valueColumn + 2];
                            } else {
                                // If the previous encounter have values stored for this
                                // observation we cannot merge them anymore.
                                useMerged = false;
                            }
                        }
                    }
                    // All values except the coded ones will be treated equally.
                    else {
                        // Return the value of the the current observation using the visitor.
                        String value = (String) VisitObsValue.visit(obs, stringVisitor);
                        // Check if we have values stored for this observation
                        if ((value != null) && (!value.isEmpty())) {
                            // Save the value of the observation on the current encounter line.
                            currentCSVLine[valueColumn] = value;
                            currentCSVLine[valueColumn + 1] = value;
                            currentCSVLine[valueColumn + 2] = value;
                            if (useMerged) {
                                // Since we are still merging this encounter with the previous
                                // one let's get the previous value to see if it had something
                                // stored on it.
                                String previousValue = (String) mergedCSVLine[valueColumn];
                                if ((previousValue != null) && (!previousValue.isEmpty())) {
                                    // Yes, we had information stored for this observation on
                                    // the previous encounter
                                    if (obs.getValueText() != null) {
                                        // We only continue merging if the observation is of
                                        // type text, so we concatenate it.
                                        // TODO: add timestamps to the merged values that are of type text
                                        previousValue += "\n" + value;
                                        value = previousValue;
                                    } else {
                                        // Any other type of value we stop the merging.
                                        useMerged = false;
                                    }
                                }
                                mergedCSVLine[valueColumn] = value;
                                mergedCSVLine[valueColumn + 1] = value;
                                mergedCSVLine[valueColumn + 2] = value;
                            }
                        }
                    }
                }
                if (useMerged) {
                    // If after looping through all the observations we didn't had any
                    // overlapped values we keep the merged line.
                    previousCSVLine = mergedCSVLine;
                } else {
                    // We had overlapped values so let's print the previous line and make the
                    // current encounter the previous one. Only if the previous line is not empty.
                    if (previousCSVLine[0] != null) {
                        printer.printRecord(previousCSVLine);
                    }
                    previousCSVLine = currentCSVLine;
                }
            } catch (Exception e) {
                log.error("Error exporting encounter", e);
            }
        }
        // For the last encounter we print the remaining line.
        printer.printRecord(previousCSVLine);
    }
}

From source file:org.pad.pgsql.loadmovies.LoadFiles.java

/**
 * Load movies from csv file and save them in DB.
 *
 * @throws Exception//  w  w w .j  a v  a  2 s . co  m
 */
private static void loadMoviesAndLinks() throws Exception {
    MovieDao movieDao = new MovieDao(DS);
    Map<Integer, Integer[]> moviesLinks = new HashMap<>();
    //Loads all links informations in memory to enrich afterwards movies
    CSVParser parser = new CSVParser(new FileReader("C:\\PRIVE\\SRC\\ml-20m\\links.csv"),
            CSVFormat.EXCEL.withHeader());
    for (CSVRecord link : parser) {
        Integer movieId = Integer.parseInt(link.get("movieId"));
        if (keepId(movieId)) {
            System.out.println("Parsing line " + link.toString());
            Integer[] otherIds = new Integer[2];
            otherIds[0] = Integer.parseInt(link.get("imdbId"));
            if (StringUtils.isNoneEmpty(link.get("tmdbId"))) {
                otherIds[1] = Integer.parseInt(link.get("tmdbId"));
            }
            moviesLinks.put(movieId, otherIds);
        }
    }

    //Read movie file
    final Reader reader = new FileReader("C:\\PRIVE\\SRC\\ml-20m\\movies.csv");
    parser = new CSVParser(reader, CSVFormat.EXCEL.withHeader());

    for (CSVRecord record : parser) {
        //build a movie object from record
        Integer movieId = Integer.parseInt(record.get("movieId"));
        if (keepId(movieId)) {
            String title = record.get("title");
            String genres = record.get("genres");
            //Splitting title to extract the date
            String movieDate = StringUtils.substringBeforeLast(StringUtils.substringAfterLast(title, "("), ")");
            String movieName = null;
            if (StringUtils.isNumeric(movieDate)) {
                movieName = StringUtils.substringBeforeLast(title, "(");
            } else {
                movieName = title;
                movieDate = null;
            }

            System.out.println(movieName + " - " + movieDate);
            Movie movieToAdd = new Movie(movieId, movieName, movieDate);

            //Enrich movie with links
            Integer[] additionalIds = moviesLinks.get(movieId);
            if (additionalIds != null) {
                movieToAdd.setImdbId(additionalIds[0]);
                movieToAdd.setTmdbId(additionalIds[1]);
            }

            //Save in database
            movieDao.save(movieToAdd);
        }
    }
}

From source file:org.pad.pgsql.loadmovies.LoadFiles.java

/**
 * Read tags and load them in a multivalue map.
 *
 * @return MultivalueMap with key movieId and values all tags.
 * @throws Exception it is only a demo./*w  ww  .ja v  a  2  s  . c  o m*/
 */
private static LinkedMultiValueMap<Integer, Tag> readTags() throws Exception {
    TagDao tagDao = new TagDao(DS);
    LinkedMultiValueMap<Integer, Tag> tags = new LinkedMultiValueMap();
    final Reader reader = new FileReader("C:\\PRIVE\\SRC\\ml-20m\\tags.csv");
    CSVParser parser = new CSVParser(reader, CSVFormat.EXCEL.withHeader());
    for (CSVRecord record : parser) {

        Integer movieId = Integer.parseInt(record.get("movieId"));
        Integer userId = Integer.parseInt(record.get("userId"));
        if (keepId(movieId) && keepId(userId)) {
            //CSV Header : userId,movieId,tag,timestamp
            Tag newTag = new Tag();
            newTag.setUserId(userId);
            newTag.setMovieId(movieId);
            newTag.setTag(record.get("tag"));
            newTag.setDate(new Date(Long.parseLong(record.get("timestamp")) * 1000));
            //Adding to map for json loading
            tags.add(newTag.getMovieId(), newTag);
            //Saving in tag table
            //tagDao.save(newTag);
        }
    }
    return tags;
}

From source file:org.pad.pgsql.loadmovies.LoadFiles.java

/**
 * Load ratings and enrich movies with tags informations before updating the related movie.
 *
 * @throws Exception//from  w w  w  .  j  a v a 2s  .c om
 */
private static void loadRatings() throws Exception {
    //MultivalueMap with key movieId and values all tags
    LinkedMultiValueMap<Integer, Tag> tags = readTags();

    //MultivalueMap with key movieId and values all ratings
    LinkedMultiValueMap<Integer, Rating> ratings = new LinkedMultiValueMap();

    //"userId,movieId,rating,timestamp
    final Reader reader = new FileReader("C:\\PRIVE\\SRC\\ml-20m\\ratings.csv");
    CSVParser parser = new CSVParser(reader, CSVFormat.EXCEL.withHeader());
    RatingDao ratingDao = new RatingDao(DS);
    for (CSVRecord record : parser) {
        Integer movieId = Integer.parseInt(record.get("movieId"));
        Integer userId = Integer.parseInt(record.get("userId"));
        if (keepId(movieId) && keepId(userId)) {
            //Building a rating object.
            Rating rating = new Rating();
            rating.setUserId(userId);
            rating.setMovieId(movieId);
            rating.setRating(Float.parseFloat(record.get("rating")));
            rating.setDate(new Date(Long.parseLong(record.get("timestamp")) * 1000));
            //Add for json saving
            ratings.add(rating.getMovieId(), rating);
            //traditional saving
            //ratingDao.save(rating);
        }
    }
    MovieDaoJSON movieDaoJSON = new MovieDaoJSON(DS);
    ratings.entrySet().stream().forEach((integerListEntry -> {
        //Building other information objects
        OtherInformations otherInformations = new OtherInformations();
        List ratingList = integerListEntry.getValue();
        otherInformations.setRatings(ratingList.subList(0, Math.min(10, ratingList.size())));
        otherInformations.computeMean();
        //Retrieve tags from the movieId
        otherInformations.setTags(tags.get(integerListEntry.getKey()));
        try {
            movieDaoJSON.addOtherInformationsToMovie(integerListEntry.getKey(), otherInformations);
        } catch (JsonProcessingException e) {
            e.printStackTrace();
        }
    }));

}

From source file:org.qamatic.mintleaf.readers.CsvReader.java

protected CSVParser getCSVParser() throws IOException {
    return new CSVParser(afileReader, CSVFormat.EXCEL.withHeader().withIgnoreEmptyLines());
}

From source file:org.qamatic.mintleaf.readers.CsvRowListWrapper.java

protected CSVParser getCSVParser() throws MintleafException {
    if (parser == null) {
        try {/*ww w. jav a 2 s . c o  m*/
            parser = new CSVParser(afileReader, CSVFormat.EXCEL.withHeader().withIgnoreEmptyLines());
        } catch (IOException e) {
            throw new MintleafException(e);
        }
    }
    return parser;
}

From source file:org.qamatic.mintleaf.tools.CsvExportFlavour.java

@Override
public void export(ResultSet resultSet) throws MintleafException {
    CSVPrinter printer = null;//ww  w .ja  va 2 s .  c o  m
    try {
        printer = new CSVPrinter(writer, CSVFormat.EXCEL.withHeader(resultSet));
        printer.printRecords(resultSet);
        printer.close();
    } catch (SQLException e) {
        throw new MintleafException(e);
    } catch (IOException e) {
        throw new MintleafException(e);

    }
}

From source file:org.roda.wui.api.v1.utils.CSVOutputStream.java

protected CSVFormat getFormat() {
    return CSVFormat.EXCEL.withDelimiter(this.delimiter);
}

From source file:org.tomitribe.tribestream.registryng.resources.ClientResource.java

@GET // more portable way to do a download from a browser
@Path("download")
@Consumes(APPLICATION_JSON)/*from   w w w .ja v a 2  s.  c o m*/
@Produces(TEXT_PLAIN)
public Response download(@QueryParam("output-type") @DefaultValue("csv") final String extension,
        @QueryParam("filename") @DefaultValue("responses") final String filename,
        @QueryParam("data") final String base64EncodedResponses,
        @Context final HttpServletRequest httpServletRequest, @Context final Providers providers) {
    final DownloadResponses downloadResponses = loadPayload(DownloadResponses.class, providers,
            base64EncodedResponses);
    final String auth = downloadResponses.getIdentity();
    security.check(auth, httpServletRequest, () -> {
    }, () -> {
        throw new WebApplicationException(Response.Status.FORBIDDEN);
    });

    final String contentType;
    final StreamingOutput builder;
    switch (extension) {
    case "csv":
        contentType = TEXT_PLAIN;
        builder = output -> {
            final CSVFormat format = CSVFormat.EXCEL.withHeader("Status", "Duration (ms)", "Error");
            final StringWriter buffer = new StringWriter();
            try (final CSVPrinter print = format.print(buffer)) {
                downloadResponses.getData().forEach(r -> {
                    try {
                        print.printRecord(r.getStatus(), r.getClientExecutionDurationMs(), r.getError());
                    } catch (final IOException e) { // quite unlikely
                        throw new IllegalStateException(e);
                    }
                });
            }
            output.write(buffer.toString().getBytes(StandardCharsets.UTF_8));
        };
        break;
    default:
        throw new WebApplicationException(Response.Status.BAD_REQUEST);
    }
    return Response.status(Response.Status.OK).header("ContentType", contentType)
            .header("Content-Disposition", "attachment; filename=\"" + filename + '.' + extension + "\"")
            .entity(builder).build();
}

From source file:org.wso2.carbon.event.simulator.core.internal.generator.csv.util.CSVReader.java

/**
 * parseFile() method is used to parse the CSV file using the delimiter specified in CSV simulation Configuration
 *
 * @param delimiter delimiter to be used when parsing CSV file
 * @throws IOException if an error occurs when creating a CSVReader
 */// ww  w.j  a  va2s  .  c o m
private CSVParser parseFile(String delimiter) throws IOException {
    switch (delimiter) {
    case ",":
        return new CSVParser(fileReader, CSVFormat.DEFAULT);
    case ";":
        return new CSVParser(fileReader, CSVFormat.EXCEL);
    case "\\t":
        return new CSVParser(fileReader, CSVFormat.TDF);
    default:
        return new CSVParser(fileReader, CSVFormat.newFormat(delimiter.charAt(0)));
    }
}