Example usage for org.apache.commons.csv CSVPrinter CSVPrinter

List of usage examples for org.apache.commons.csv CSVPrinter CSVPrinter

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVPrinter CSVPrinter.

Prototype

public CSVPrinter(final Appendable out, final CSVFormat format) throws IOException 

Source Link

Document

Creates a printer that will print values to the given stream following the CSVFormat.

Usage

From source file:norbert.mynemo.dataimport.scraping.CkMapping.java

/**
 * Creates a printer where a mapping can be printed. The given file must not exist.
 *
 * @param filepath the filepath where the printer will write the data.
 * @return a new printer//ww w  .j a va 2  s .  c o  m
 */
public static CSVPrinter createPrinter(String filepath) throws IOException {
    checkNotNull(filepath);
    checkArgument(!new File(filepath).exists(), "The file must not exist.");

    return new CSVPrinter(new BufferedWriter(new FileWriter(filepath)), CSV_FORMAT_FOR_PRINTER);
}

From source file:norbert.mynemo.dataimport.scraping.CkRating.java

/**
 * Creates a printer where a rating can be printed. The given file must not exist.
 *
 * @param filepath the filepath where the printer will write the data.
 * @return a new printer/*ww w  .ja  va  2  s.c o  m*/
 * @throws IOException
 */
public static CSVPrinter createPrinter(String filepath) throws IOException {
    checkArgument(filepath != null, "The filepath must be not null.");
    checkArgument(!new File(filepath).exists(), "The file must not exist.");

    return new CSVPrinter(new BufferedWriter(new FileWriter(filepath)), CSV_FORMAT_FOR_PRINTER);
}

From source file:org.alfresco.repo.web.scripts.DeclarativeSpreadsheetWebScript.java

/**
 * Generates the spreadsheet, based on the properties in the header
 *  and a callback for the body.//from www  .java2 s.c o  m
 */
public void generateSpreadsheet(Object resource, String format, WebScriptRequest req, Status status,
        Map<String, Object> model) throws IOException {
    Pattern qnameMunger = Pattern.compile("([A-Z][a-z]+)([A-Z].*)");
    String delimiterParam = req.getParameter(PARAM_REQ_DELIMITER);
    CSVStrategy reqCSVstrategy = null;
    if (delimiterParam != null && !delimiterParam.isEmpty()) {
        reqCSVstrategy = new CSVStrategy(delimiterParam.charAt(0), '"', CSVStrategy.COMMENTS_DISABLED);
    }
    // Build up the details of the header
    List<Pair<QName, Boolean>> propertyDetails = buildPropertiesForHeader(resource, format, req);
    String[] headings = new String[propertyDetails.size()];
    String[] descriptions = new String[propertyDetails.size()];
    boolean[] required = new boolean[propertyDetails.size()];
    for (int i = 0; i < headings.length; i++) {
        Pair<QName, Boolean> property = propertyDetails.get(i);
        if (property == null || property.getFirst() == null) {
            headings[i] = "";
            required[i] = false;
        } else {
            QName column = property.getFirst();
            required[i] = property.getSecond();

            // Ask the dictionary service nicely for the details
            PropertyDefinition pd = dictionaryService.getProperty(column);
            if (pd != null && pd.getTitle(dictionaryService) != null) {
                // Use the friendly titles, which may even be localised!
                headings[i] = pd.getTitle(dictionaryService);
                descriptions[i] = pd.getDescription(dictionaryService);
            } else {
                // Nothing friendly found, try to munge the raw qname into
                //  something we can show to a user...
                String raw = column.getLocalName();
                raw = raw.substring(0, 1).toUpperCase() + raw.substring(1);

                Matcher m = qnameMunger.matcher(raw);
                if (m.matches()) {
                    headings[i] = m.group(1) + " " + m.group(2);
                } else {
                    headings[i] = raw;
                }
            }
        }
    }

    // Build a list of just the properties
    List<QName> properties = new ArrayList<QName>(propertyDetails.size());
    for (Pair<QName, Boolean> p : propertyDetails) {
        QName qn = null;
        if (p != null) {
            qn = p.getFirst();
        }
        properties.add(qn);
    }

    // Output
    if ("csv".equals(format)) {
        StringWriter sw = new StringWriter();
        CSVPrinter csv = new CSVPrinter(sw, reqCSVstrategy != null ? reqCSVstrategy : getCsvStrategy());
        csv.println(headings);

        populateBody(resource, csv, properties);

        model.put(MODEL_CSV, sw.toString());
    } else {
        Workbook wb;
        if ("xlsx".equals(format)) {
            wb = new XSSFWorkbook();
            // TODO Properties
        } else {
            wb = new HSSFWorkbook();
            // TODO Properties
        }

        // Add our header row
        Sheet sheet = wb.createSheet("Export");
        Row hr = sheet.createRow(0);
        sheet.createFreezePane(0, 1);

        Font fb = wb.createFont();
        fb.setBoldweight(Font.BOLDWEIGHT_BOLD);
        Font fi = wb.createFont();
        fi.setBoldweight(Font.BOLDWEIGHT_BOLD);
        fi.setItalic(true);

        CellStyle csReq = wb.createCellStyle();
        csReq.setFont(fb);
        CellStyle csOpt = wb.createCellStyle();
        csOpt.setFont(fi);

        // Populate the header
        Drawing draw = null;
        for (int i = 0; i < headings.length; i++) {
            Cell c = hr.createCell(i);
            c.setCellValue(headings[i]);

            if (required[i]) {
                c.setCellStyle(csReq);
            } else {
                c.setCellStyle(csOpt);
            }

            if (headings[i].length() == 0) {
                sheet.setColumnWidth(i, 3 * 250);
            } else {
                sheet.setColumnWidth(i, 18 * 250);
            }

            if (descriptions[i] != null && descriptions[i].length() > 0) {
                // Add a description for it too
                if (draw == null) {
                    draw = sheet.createDrawingPatriarch();
                }
                ClientAnchor ca = wb.getCreationHelper().createClientAnchor();
                ca.setCol1(c.getColumnIndex());
                ca.setCol2(c.getColumnIndex() + 1);
                ca.setRow1(hr.getRowNum());
                ca.setRow2(hr.getRowNum() + 2);

                Comment cmt = draw.createCellComment(ca);
                cmt.setAuthor("");
                cmt.setString(wb.getCreationHelper().createRichTextString(descriptions[i]));
                cmt.setVisible(false);
                c.setCellComment(cmt);
            }
        }

        // Have the contents populated
        populateBody(resource, wb, sheet, properties);

        // Save it for the template
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        wb.write(baos);
        model.put(MODEL_EXCEL, baos.toByteArray());
    }
}

From source file:org.apache.ambari.server.api.services.serializers.CsvSerializer.java

/**
 * Serialize the result into a CSV-formatted text document.
 * <p/>/*from w  w  w  .  j av a2 s  . co m*/
 * It is expected that the result set is a collection of flat resources - no sub-resources will be
 * included in the output.  The root of the tree structure may have a column map (csv_column_map)
 * and a column order (csv_column_order) property set to indicate the header record and ordering
 * of the columns.
 * <p/>
 * The csv_column_map is a map of resource property names to header descriptive names.  If not
 * specified, a header record will not be serialized.
 * <p/>
 * The csv_column_order is a list of resource property names declaring the order of the columns.
 * If not specified, the order will be taken from the key order of csv_column_map or the "natural"
 * ordering of the resource property names, both may be unpredictable.
 *
 * @param result internal result
 * @return a String containing the CSV-formatted document
 */
@Override
public Object serialize(Result result) {
    if (result.getStatus().isErrorState()) {
        return serializeError(result.getStatus());
    } else {
        CSVPrinter csvPrinter = null;
        try {
            // A StringBuffer to store the CSV-formatted document while building it.  It may be
            // necessary to use file-based storage if the data set is expected to be really large.
            StringBuffer buffer = new StringBuffer();

            TreeNode<Resource> root = result.getResultTree();

            if (root != null) {
                csvPrinter = new CSVPrinter(buffer, CSVFormat.DEFAULT);

                // TODO: recursively handle tree structure, for now only handle single level of detail
                if ("true".equalsIgnoreCase(root.getStringProperty("isCollection"))) {
                    List<String> fieldNameOrder = processHeader(csvPrinter, root);

                    Collection<TreeNode<Resource>> children = root.getChildren();
                    if (children != null) {
                        // Iterate over the child nodes of the collection an add each as a new record in the
                        // CSV document.
                        for (TreeNode<Resource> child : children) {
                            processRecord(csvPrinter, child, fieldNameOrder);
                        }
                    }
                }
            }

            return buffer.toString();
        } catch (IOException e) {
            //todo: exception handling.  Create ResultStatus 500 and call serializeError
            throw new RuntimeException("Unable to serialize to csv: " + e, e);
        } finally {
            if (csvPrinter != null) {
                try {
                    csvPrinter.close();
                } catch (IOException ex) {
                }
            }
        }
    }
}

From source file:org.apache.ambari.server.api.services.serializers.CsvSerializer.java

@Override
public Object serializeError(ResultStatus error) {
    CSVPrinter csvPrinter = null;/*from w ww .j a v a 2s.c  om*/
    try {
        StringBuffer buffer = new StringBuffer();
        csvPrinter = new CSVPrinter(buffer, CSVFormat.DEFAULT);

        csvPrinter.printRecord(Arrays.asList("status", "message"));
        csvPrinter.printRecord(Arrays.asList(error.getStatus().getStatus(), error.getMessage()));

        return buffer.toString();
    } catch (IOException e) {
        //todo: exception handling.  Create ResultStatus 500 and call serializeError
        throw new RuntimeException("Unable to serialize to csv: " + e, e);
    } finally {
        if (csvPrinter != null) {
            try {
                csvPrinter.close();
            } catch (IOException ex) {
            }
        }
    }
}

From source file:org.apache.ambari.server.serveraction.kerberos.AbstractKerberosDataFileBuilder.java

/**
 * Opens the data file for writing./*from   w ww .  j ava 2 s .  c o  m*/
 * <p/>
 * This may be called multiple times and the appropriate action will occur depending on if the
 * file has been previously opened or closed.
 *
 * @throws java.io.IOException
 */
public void open() throws IOException {
    if (isClosed()) {
        if (file == null) {
            throw new IOException("Missing file path");
        } else {
            csvPrinter = new CSVPrinter(new FileWriter(file, true), CSVFormat.DEFAULT);

            // If the file is empty, write the header; else don't write the header.
            if (file.length() == 0) {
                // Write the header....
                Iterable<?> headerRecord = getHeaderRecord();
                csvPrinter.printRecord(headerRecord);
            }
        }
    }
}

From source file:org.apache.ambari.view.hive.resources.jobs.JobService.java

/**
 * Get job results in csv format//from  ww w. jav a2 s  . c  om
 */
@GET
@Path("{jobId}/results/csv")
@Produces("text/csv")
public Response getResultsCSV(@PathParam("jobId") String jobId, @Context HttpServletResponse response,
        @QueryParam("columns") final String requestedColumns) {
    try {
        JobController jobController = getResourceManager().readController(jobId);
        final Cursor resultSet = jobController.getResults();
        resultSet.selectColumns(requestedColumns);

        StreamingOutput stream = new StreamingOutput() {
            @Override
            public void write(OutputStream os) throws IOException, WebApplicationException {
                Writer writer = new BufferedWriter(new OutputStreamWriter(os));
                CSVPrinter csvPrinter = new CSVPrinter(writer, CSVFormat.DEFAULT);
                try {
                    while (resultSet.hasNext()) {
                        csvPrinter.printRecord(resultSet.next().getRow());
                        writer.flush();
                    }
                } finally {
                    writer.close();
                }
            }
        };

        return Response.ok(stream).build();
    } catch (WebApplicationException ex) {
        throw ex;
    } catch (ItemNotFound itemNotFound) {
        throw new NotFoundFormattedException(itemNotFound.getMessage(), itemNotFound);
    } catch (Exception ex) {
        throw new ServiceFormattedException(ex.getMessage(), ex);
    }
}

From source file:org.apache.ambari.view.hive.resources.jobs.JobService.java

/**
 * Get job results in csv format//  www.ja  v a 2s . c  om
 */
@GET
@Path("{jobId}/results/csv/saveToHDFS")
@Produces(MediaType.APPLICATION_JSON)
public Response getResultsToHDFS(@PathParam("jobId") String jobId, @QueryParam("commence") String commence,
        @QueryParam("file") final String targetFile, @QueryParam("stop") final String stop,
        @QueryParam("columns") final String requestedColumns, @Context HttpServletResponse response) {
    try {
        final JobController jobController = getResourceManager().readController(jobId);

        String backgroundJobId = "csv" + String.valueOf(jobController.getJob().getId());
        if (commence != null && commence.equals("true")) {
            if (targetFile == null)
                throw new MisconfigurationFormattedException("targetFile should not be empty");
            BackgroundJobController.getInstance(context).startJob(String.valueOf(backgroundJobId),
                    new Runnable() {
                        @Override
                        public void run() {

                            try {
                                Cursor resultSet = jobController.getResults();
                                resultSet.selectColumns(requestedColumns);

                                FSDataOutputStream stream = getSharedObjectsFactory().getHdfsApi()
                                        .create(targetFile, true);
                                Writer writer = new BufferedWriter(new OutputStreamWriter(stream));
                                CSVPrinter csvPrinter = new CSVPrinter(writer, CSVFormat.DEFAULT);
                                try {
                                    while (resultSet.hasNext() && !Thread.currentThread().isInterrupted()) {
                                        csvPrinter.printRecord(resultSet.next().getRow());
                                        writer.flush();
                                    }
                                } finally {
                                    writer.close();
                                }
                                stream.close();

                            } catch (IOException e) {
                                throw new ServiceFormattedException(
                                        "Could not write CSV to HDFS for job#" + jobController.getJob().getId(),
                                        e);
                            } catch (InterruptedException e) {
                                throw new ServiceFormattedException(
                                        "Could not write CSV to HDFS for job#" + jobController.getJob().getId(),
                                        e);
                            } catch (ItemNotFound itemNotFound) {
                                throw new NotFoundFormattedException("Job results are expired", itemNotFound);
                            }

                        }
                    });
        }

        if (stop != null && stop.equals("true")) {
            BackgroundJobController.getInstance(context).interrupt(backgroundJobId);
        }

        JSONObject object = new JSONObject();
        object.put("stopped", BackgroundJobController.getInstance(context).isInterrupted(backgroundJobId));
        object.put("jobId", jobController.getJob().getId());
        object.put("backgroundJobId", backgroundJobId);
        object.put("operationType", "CSV2HDFS");
        object.put("status", BackgroundJobController.getInstance(context).state(backgroundJobId).toString());

        return Response.ok(object).build();
    } catch (WebApplicationException ex) {
        throw ex;
    } catch (ItemNotFound itemNotFound) {
        throw new NotFoundFormattedException(itemNotFound.getMessage(), itemNotFound);
    } catch (Exception ex) {
        throw new ServiceFormattedException(ex.getMessage(), ex);
    }
}

From source file:org.apache.ambari.view.hive.resources.uploads.TableDataReader.java

@Override
public int read(char[] cbuf, int off, int len) throws IOException {

    int totalLen = len;
    int count = 0;
    do {//  ww w . j a v a  2  s  . co m
        int n = stringReader.read(cbuf, off, len);

        if (n != -1) {
            // n  were read
            len = len - n; // len more to be read
            off = off + n; // off now shifted to n more
            count += n;
        }

        if (count == totalLen)
            return count; // all totalLen characters were read

        if (iterator.hasNext()) { // keep reading as long as we keep getting rows
            StringWriter stringWriter = new StringWriter(CAPACITY);
            CSVPrinter csvPrinter = new CSVPrinter(stringWriter, CSV_FORMAT);
            Row row = iterator.next();
            csvPrinter.printRecord(row.getRow());
            stringReader.close(); // close the old string reader
            stringReader = new StringReader(stringWriter.getBuffer().toString());
            csvPrinter.close();
            stringWriter.close();
        } else {
            return count == 0 ? -1 : count;
        }
    } while (count < totalLen);

    return count;
}

From source file:org.apache.camel.dataformat.csv.CsvMarshaller.java

/**
 * Marshals the given object into the given stream.
 *
 * @param exchange     Exchange (used for access to type conversion)
 * @param object       Body to marshal//from   www.  ja va2s .co m
 * @param outputStream Output stream of the CSV
 * @throws NoTypeConversionAvailableException if the body cannot be converted
 * @throws IOException                        if we cannot write into the given stream
 */
public void marshal(Exchange exchange, Object object, OutputStream outputStream)
        throws NoTypeConversionAvailableException, IOException {
    CSVPrinter printer = new CSVPrinter(new OutputStreamWriter(outputStream), format);
    try {
        List<?> list = ExchangeHelper.convertToType(exchange, List.class, object);
        if (list != null) {
            for (Object child : list) {
                printer.printRecord(getRecordValues(exchange, child));
            }
        } else {
            printer.printRecord(getRecordValues(exchange, object));
        }
    } finally {
        IOHelper.close(printer);
    }
}