Example usage for java.sql ResultSet getBytes

List of usage examples for java.sql ResultSet getBytes

Introduction

In this page you can find the example usage for java.sql ResultSet getBytes.

Prototype

byte[] getBytes(String columnLabel) throws SQLException;

Source Link

Document

Retrieves the value of the designated column in the current row of this ResultSet object as a byte array in the Java programming language.

Usage

From source file:org.apache.jackrabbit.core.persistence.bundle.BundleDbPersistenceManager.java

/**
 * {@inheritDoc}// w  ww . ja v  a2s.c  om
 */
public synchronized NodeIdIterator getAllNodeIds(NodeId bigger, int maxCount)
        throws ItemStateException, RepositoryException {
    ResultSet rs = null;
    try {
        UUID lowUuid;
        Object[] keys;
        String sql;
        if (bigger == null) {
            sql = bundleSelectAllIdsSQL;
            lowUuid = null;
            keys = new Object[0];
        } else {
            sql = bundleSelectAllIdsFromSQL;
            lowUuid = bigger.getUUID();
            keys = getKey(lowUuid);
        }
        if (maxCount > 0) {
            // get some more rows, in case the first row is smaller
            // only required for SM_LONGLONG_KEYS
            // probability is very low to get get the wrong first key, < 1 : 2^64
            // see also bundleSelectAllIdsFrom SQL statement
            maxCount += 10;
        }
        Statement stmt = connectionManager.executeStmt(sql, keys, false, maxCount);
        rs = stmt.getResultSet();
        ArrayList result = new ArrayList();
        while ((maxCount == 0 || result.size() < maxCount) && rs.next()) {
            UUID current;
            if (getStorageModel() == SM_BINARY_KEYS) {
                current = new UUID(rs.getBytes(1));
            } else {
                long high = rs.getLong(1);
                long low = rs.getLong(2);
                current = new UUID(high, low);
            }
            if (lowUuid != null) {
                // skip the keys that are smaller or equal (see above, maxCount += 10)
                if (current.compareTo(lowUuid) <= 0) {
                    continue;
                }
            }
            result.add(current);
        }
        ListNodeIdIterator it = new ListNodeIdIterator(result);
        return it;
    } catch (SQLException e) {
        String msg = "getAllNodeIds failed.";
        log.error(msg, e);
        throw new ItemStateException(msg, e);
    } finally {
        closeResultSet(rs);
    }
}

From source file:org.dspace.storage.rdbms.DatabaseManager.java

/**
 * Convert the current row in a ResultSet into a TableRow object.
 *
 * @param results/*from  w ww.j  a v  a  2s.c o  m*/
 *            A ResultSet to process
 * @param table
 *            The name of the table
 * @param pColumnNames
 *            The name of the columns in this resultset
 * @return A TableRow object with the data from the ResultSet
 * @exception SQLException
 *                If a database error occurs
 */
static TableRow process(ResultSet results, String table, List<String> pColumnNames) throws SQLException {
    ResultSetMetaData meta = results.getMetaData();
    int columns = meta.getColumnCount() + 1;

    // If we haven't been passed the column names try to generate them from the metadata / table
    List<String> columnNames = pColumnNames != null ? pColumnNames
            : ((table == null) ? getColumnNames(meta) : getColumnNames(table));

    TableRow row = new TableRow(canonicalize(table), columnNames);

    // Process the columns in order
    // (This ensures maximum backwards compatibility with
    // old JDBC drivers)
    for (int i = 1; i < columns; i++) {
        String name = meta.getColumnName(i);
        int jdbctype = meta.getColumnType(i);

        switch (jdbctype) {
        case Types.BIT:
            row.setColumn(name, results.getBoolean(i));
            break;

        case Types.INTEGER:
        case Types.NUMERIC:
            if (isOracle) {
                long longValue = results.getLong(i);
                if (longValue <= (long) Integer.MAX_VALUE) {
                    row.setColumn(name, (int) longValue);
                } else {
                    row.setColumn(name, longValue);
                }
            } else {
                row.setColumn(name, results.getInt(i));
            }
            break;

        case Types.DECIMAL:
        case Types.BIGINT:
            row.setColumn(name, results.getLong(i));
            break;

        case Types.DOUBLE:
            row.setColumn(name, results.getDouble(i));
            break;

        case Types.CLOB:
            if (isOracle) {
                row.setColumn(name, results.getString(i));
            } else {
                throw new IllegalArgumentException("Unsupported JDBC type: " + jdbctype);
            }
            break;

        case Types.VARCHAR:
            try {
                byte[] bytes = results.getBytes(i);

                if (bytes != null) {
                    String mystring = new String(results.getBytes(i), "UTF-8");
                    row.setColumn(name, mystring);
                } else {
                    row.setColumn(name, results.getString(i));
                }
            } catch (UnsupportedEncodingException e) {
                log.error("Unable to parse text from database", e);
            }
            break;

        case Types.DATE:
            row.setColumn(name, results.getDate(i));
            break;

        case Types.TIME:
            row.setColumn(name, results.getTime(i));
            break;

        case Types.TIMESTAMP:
            row.setColumn(name, results.getTimestamp(i));
            break;

        default:
            throw new IllegalArgumentException("Unsupported JDBC type: " + jdbctype);
        }

        // Determines if the last column was null, and sets the tablerow accordingly
        if (results.wasNull()) {
            row.setColumnNull(name);
        }
    }

    // Now that we've prepped the TableRow, reset the flags so that we can detect which columns have changed
    row.resetChanged();
    return row;
}

From source file:org.wso2.carbon.identity.workflow.mgt.dao.WorkflowRequestDAO.java

/**
 * Get requests of a user created/updated in given time period
 *
 * @param userName     User to get requests of, empty String to retrieve requests of all users
 * @param beginTime    lower limit of date range to filter
 * @param endTime      upper limit of date range to filter
 * @param timeCategory filter by created time or last updated time ?
 * @param tenantId     tenant id of currently logged in user
 * @return/*from w  w w.  ja v a 2s . c o  m*/
 * @throws InternalWorkflowException
 */
public org.wso2.carbon.identity.workflow.mgt.bean.WorkflowRequest[] getRequestsOfUserFilteredByTime(
        String userName, Timestamp beginTime, Timestamp endTime, String timeCategory, int tenantId,
        String status) throws InternalWorkflowException {

    Connection connection = IdentityDatabaseUtil.getDBConnection();
    PreparedStatement prepStmt = null;
    String query = "";

    ResultSet resultSet = null;
    try {
        connection = IdentityDatabaseUtil.getDBConnection();
        if (timeCategory == UPDATED_AT_FILTER) {
            if (status.equals(ALL_TASKS_FILTER) || status.equals("")) {
                query = SQLConstants.GET_REQUESTS_OF_USER_FILTER_FROM_UPDATED_TIME;
            } else {
                query = SQLConstants.GET_REQUESTS_OF_USER_FILTER_FROM_UPDATED_TIME_AND_STATUS;
            }
        } else {
            if (status.equals(ALL_TASKS_FILTER) || status.equals("")) {
                query = SQLConstants.GET_REQUESTS_OF_USER_FILTER_FROM_CREATED_TIME;
            } else {
                query = SQLConstants.GET_REQUESTS_OF_USER_FILTER_FROM_CREATED_TIME_AND_STATUS;
            }
        }
        prepStmt = connection.prepareStatement(query);
        prepStmt.setString(1, userName);
        prepStmt.setTimestamp(2, beginTime);
        prepStmt.setTimestamp(3, endTime);
        prepStmt.setInt(4, tenantId);
        if (status.equals(ALL_TASKS_FILTER) || status.equals("")) {

            prepStmt.setInt(5, SQLConstants.maxResultsPerRequest);
        } else {
            prepStmt.setString(5, status);
            prepStmt.setInt(6, SQLConstants.maxResultsPerRequest);
        }
        resultSet = prepStmt.executeQuery();
        ArrayList<org.wso2.carbon.identity.workflow.mgt.bean.WorkflowRequest> requestDTOs = new ArrayList<>();
        while (resultSet.next()) {
            org.wso2.carbon.identity.workflow.mgt.bean.WorkflowRequest requestDTO = new org.wso2.carbon.identity.workflow.mgt.bean.WorkflowRequest();
            requestDTO.setRequestId(resultSet.getString(SQLConstants.REQUEST_UUID_COLUMN));
            requestDTO.setEventType(resultSet.getString(SQLConstants.REQUEST_OPERATION_TYPE_COLUMN));
            requestDTO.setCreatedAt(resultSet.getTimestamp(SQLConstants.REQUEST_CREATED_AT_COLUMN).toString());
            requestDTO.setUpdatedAt(resultSet.getTimestamp(SQLConstants.REQUEST_UPDATED_AT_COLUMN).toString());
            requestDTO.setStatus(resultSet.getString(SQLConstants.REQUEST_STATUS_COLUMN));
            requestDTO.setRequestParams(
                    (deserializeWorkflowRequest(resultSet.getBytes(SQLConstants.REQUEST_COLUMN)))
                            .getRequestParameterAsString());
            requestDTO.setCreatedBy(resultSet.getString(SQLConstants.CREATED_BY_COLUMN));
            requestDTOs.add(requestDTO);
        }
        org.wso2.carbon.identity.workflow.mgt.bean.WorkflowRequest[] requestArray = new org.wso2.carbon.identity.workflow.mgt.bean.WorkflowRequest[requestDTOs
                .size()];
        for (int i = 0; i < requestDTOs.size(); i++) {
            requestArray[i] = requestDTOs.get(i);
        }
        return requestArray;
    } catch (SQLException e) {
        throw new InternalWorkflowException("Error when executing the sql query:" + query, e);
    } catch (ClassNotFoundException | IOException e) {
        throw new InternalWorkflowException("Error when deserializing a workflow request.", e);
    } finally {
        IdentityDatabaseUtil.closeAllConnections(connection, resultSet, prepStmt);
    }
}

From source file:com.atlassian.jira.util.BugzillaImportBean.java

private void createAttachments(final Connection conn, final PreparedStatement attachPrepStatement,
        final int bug_id, final GenericValue issue) throws Exception {
    if (applicationProperties.getOption(APKeys.JIRA_OPTION_ALLOWATTACHMENTS)) {
        ResultSet resultSet = null;
        try {/*from   w  w  w  .  ja v a2s .  c  o m*/
            attachPrepStatement.clearParameters();
            attachPrepStatement.setInt(1, bug_id);
            resultSet = attachPrepStatement.executeQuery();
            while (resultSet.next()) {
                String fileName = resultSet.getString("attachment_title");
                if (fileName.lastIndexOf('\\') > -1) {
                    fileName = fileName.substring(fileName.lastIndexOf('\\') + 1);
                }

                if (fileName.lastIndexOf('/') > -1) {
                    fileName = fileName.substring(fileName.lastIndexOf('/') + 1);
                }

                log("Importing attachment for bug " + bug_id + ".");

                byte[] fileBytes;
                /*                    try
                                    {*/
                fileBytes = resultSet.getBytes("attachment_data");
                /*                    }
                                    catch (final SQLException e)
                                    {
                                      final PreparedStatement ps = conn.prepareStatement("select thedata from attach_data where id = ?");
                ps.setInt(1, resultSet.getInt("attach_id"));
                final ResultSet attachmentRS = ps.executeQuery();
                attachmentRS.next();
                fileBytes = attachmentRS.getBytes("thedata");
                attachmentRS.close();
                resultSet.close();
                return;
                                    }
                                    */

                final int submitterId = resultSet.getInt("user_id");
                final Attachment attachment = attachmentManager.createAttachment(issue, getUser(submitterId),
                        resultSet.getString("mimetype"), fileName, new Long(fileBytes.length), null,
                        resultSet.getTimestamp("created_ts"));

                // UtilDateTime.nowTimestamp());

                //we need to set the created date back to when it was created in the original system.
                attachment.getGenericValue().set("created", resultSet.getTimestamp("created_ts"));
                attachment.store();

                CoreFactory.getGenericDelegator().storeAll(EasyList.build(issue));
                cacheManager.flush(CacheManager.ISSUE_CACHE, issue);

                final File realAttachFile = AttachmentUtils.getAttachmentFile(attachment);
                final BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(realAttachFile));
                out.write(fileBytes, 0, fileBytes.length);
                out.close();
            }
        } catch (final SQLException e) {
            log("Error on importing attachments for bug " + bug_id + ". Error:" + e.getMessage());
        } finally {
            if (resultSet != null) //prevent nullpointer - JRA-6154
            {
                resultSet.close();
            }
        }
    } else {
        log("Attachments will not be imported from phpBB since attachements are disabled in JIRA.");
    }
}

From source file:org.apache.jackrabbit.core.persistence.bundle.BundleDbPersistenceManager.java

/**
 * {@inheritDoc}// ww  w. ja v a 2  s  .  co m
 */
public synchronized NodeIdIterator getAllNodeIds(NodeId bigger, int maxCount)
        throws ItemStateException, RepositoryException {
    ResultSet rs = null;
    try {
        UUID lowUuid;
        Object[] keys;
        String sql;
        if (bigger == null) {
            sql = bundleSelectAllIdsSQL;
            lowUuid = null;
            keys = new Object[0];
        } else {
            sql = bundleSelectAllIdsFromSQL;
            lowUuid = bigger.getUUID();
            keys = getKey(lowUuid);
        }
        if (maxCount > 0) {
            // get some more rows, in case the first row is smaller
            // only required for SM_LONGLONG_KEYS
            // probability is very low to get get the wrong first key, < 1 : 2^64
            // see also bundleSelectAllIdsFrom SQL statement
            maxCount += 10;
        }
        Statement stmt = connectionManager.executeStmt(sql, keys, false, maxCount);
        rs = stmt.getResultSet();
        ArrayList result = new ArrayList();
        while ((maxCount == 0 || result.size() < maxCount) && rs.next()) {
            UUID current;
            if (getStorageModel() == SM_BINARY_KEYS) {
                current = new UUID(rs.getBytes(1));
            } else {
                long high = rs.getLong(1);
                long low = rs.getLong(2);
                current = new UUID(high, low);
            }
            if (lowUuid != null) {
                // skip the keys that are smaller or equal (see above, maxCount += 10)
                if (current.compareTo(lowUuid) <= 0) {
                    continue;
                }
            }
            result.add(current);
        }
        return new ListNodeIdIterator(result);
    } catch (SQLException e) {
        String msg = "getAllNodeIds failed.";
        log.error(msg, e);
        throw new ItemStateException(msg, e);
    } finally {
        closeResultSet(rs);
    }
}

From source file:org.apache.tajo.storage.jdbc.JdbcScanner.java

protected void convertTuple(ResultSet resultSet, VTuple tuple) {
    try {/*w ww  . j a  v  a  2s  .c om*/
        for (int column_idx = 0; column_idx < targets.length; column_idx++) {
            final Column c = targets[column_idx];
            final int resultIdx = column_idx + 1;

            switch (c.getDataType().getType()) {
            case INT1:
            case INT2:
                tuple.put(column_idx, DatumFactory.createInt2(resultSet.getShort(resultIdx)));
                break;
            case INT4:
                tuple.put(column_idx, DatumFactory.createInt4(resultSet.getInt(resultIdx)));
                break;
            case INT8:
                tuple.put(column_idx, DatumFactory.createInt8(resultSet.getLong(resultIdx)));
                break;
            case FLOAT4:
                tuple.put(column_idx, DatumFactory.createFloat4(resultSet.getFloat(resultIdx)));
                break;
            case FLOAT8:
                tuple.put(column_idx, DatumFactory.createFloat8(resultSet.getDouble(resultIdx)));
                break;
            case CHAR:
                tuple.put(column_idx, DatumFactory.createText(resultSet.getString(resultIdx)));
                break;
            case VARCHAR:
            case TEXT:
                // TODO - trim is unnecessary in many cases, so we can use it for certain cases
                tuple.put(column_idx, DatumFactory.createText(resultSet.getString(resultIdx).trim()));
                break;
            case DATE:
                final Date date = resultSet.getDate(resultIdx);
                tuple.put(column_idx,
                        DatumFactory.createDate(1900 + date.getYear(), 1 + date.getMonth(), date.getDate()));
                break;
            case TIME:
                final Time time = resultSet.getTime(resultIdx);
                tuple.put(column_idx, new TimeDatum(
                        DateTimeUtil.toTime(time.getHours(), time.getMinutes(), time.getSeconds(), 0)));
                break;
            case TIMESTAMP:
                tuple.put(column_idx, DatumFactory
                        .createTimestampDatumWithJavaMillis(resultSet.getTimestamp(resultIdx).getTime()));
                break;
            case BINARY:
            case VARBINARY:
            case BLOB:
                tuple.put(column_idx, DatumFactory.createBlob(resultSet.getBytes(resultIdx)));
                break;
            default:
                throw new TajoInternalError(new UnsupportedDataTypeException(c.getDataType().getType().name()));
            }
        }
    } catch (SQLException s) {
        throw new TajoInternalError(s);
    }
}

From source file:org.hyperic.hq.measurement.server.session.DataManagerImpl.java

public TopNData getTopNData(int resourceId, long time) {
    Statement stmt = null;//from  ww w. jav  a 2 s .co  m
    Connection conn = safeGetConnection();
    StringBuilder builder = new StringBuilder();
    try {
        builder.append("SELECT data FROM TOPNDATA TOP WHERE TOP.resourceid='").append(resourceId)
                .append("' AND TOP.time = '").append(new java.sql.Timestamp(time)).append("'");

        stmt = conn.createStatement();
        ResultSet rs = stmt.executeQuery(builder.toString());
        while (rs.next()) {
            TopNData data = new TopNData();
            data.setData(rs.getBytes("data"));
            data.setResourceId(resourceId);
            data.setTime(new Date(time));
            return data;
        }

    } catch (SQLException e) {
        log.error("Problem fetching TOP data", e);
    } finally {
        DBUtil.closeStatement(LOG_CTX, stmt);
    }
    return null;
}

From source file:org.idempiere.adinterface.ModelADServiceImpl.java

public WindowTabDataDocument queryData(ModelCRUDRequestDocument req) {
    boolean connected = getCompiereService().isConnected();

    boolean manageTrx = this.manageTrx;
    Trx trx = null;/*from  w w w  .  ja  v  a 2  s  . com*/
    try {
        if (!connected)
            getCompiereService().connect();

        CompiereService m_cs = getCompiereService();
        WindowTabDataDocument ret = WindowTabDataDocument.Factory.newInstance();
        WindowTabData resp = ret.addNewWindowTabData();
        ModelCRUD modelCRUD = req.getModelCRUDRequest().getModelCRUD();
        String serviceType = modelCRUD.getServiceType();

        ADLoginRequest reqlogin = req.getModelCRUDRequest().getADLoginRequest();
        String err = login(reqlogin, webServiceName, "queryData", serviceType);
        if (err != null && err.length() > 0) {
            resp.setError(err);
            return ret;
        }

        // Validate parameters vs service type
        validateCRUD(modelCRUD);

        Properties ctx = m_cs.getCtx();
        String tableName = modelCRUD.getTableName();
        Map<String, Object> reqCtx = getRequestCtx();
        MWebServiceType m_webservicetype = getWebServiceType();
        // get the PO for the tablename and record ID
        MTable table = MTable.get(ctx, tableName);
        if (table == null)
            throw new IdempiereServiceFault(
                    "Web service type " + m_webservicetype.getValue() + ": table " + tableName + " not found",
                    new QName("queryData"));

        int roleid = reqlogin.getRoleID();
        MRole role = new MRole(ctx, roleid, null);

        // start a trx
        String trxName = localTrxName;

        if (trxName == null) {
            trxName = Trx.createTrxName("ws_modelQueryData");
            manageTrx = true;
        }
        trx = Trx.get(trxName, true);

        StringBuilder sqlBuilder = new StringBuilder(
                role.addAccessSQL("SELECT * FROM " + tableName, tableName, true, true));

        ArrayList<Object> sqlParaList = new ArrayList<Object>();
        PO holderPo = table.getPO(0, trxName);
        POInfo poinfo = POInfo.getPOInfo(ctx, table.getAD_Table_ID());

        if (modelCRUD.getDataRow() != null) {
            DataRow dr = modelCRUD.getDataRow();
            DataField fields[] = dr.getFieldArray();
            StandardResponseDocument stdRet = StandardResponseDocument.Factory.newInstance();
            StandardResponse stdResp = stdRet.addNewStandardResponse();

            StandardResponseDocument retResp = invokeWSValidator(m_webservicetype,
                    IWSValidator.TIMING_BEFORE_PARSE, holderPo, fields, trx, reqCtx, stdResp, stdRet);
            if (retResp != null) {
                throw new IdempiereServiceFault(retResp.getStandardResponse().getError(),
                        new QName("queryData"));
            }

            retResp = scanFields(fields, m_webservicetype, holderPo, poinfo, trx, stdResp, stdRet);

            if (retResp != null) {
                throw new IdempiereServiceFault(retResp.getStandardResponse().getError(),
                        new QName("queryData"));
            }

            for (DataField field : modelCRUD.getDataRow().getFieldArray()) {
                if (m_webservicetype.isInputColumnNameAllowed(field.getColumn())) {

                    // Jan Thielemann Solution for query using the sentence like
                    X_WS_WebServiceFieldInput inputField = m_webservicetype.getFieldInput(field.getColumn());
                    I_AD_Column col = inputField.getAD_Column();
                    String sqlType = DisplayType.getSQLDataType(col.getAD_Reference_ID(), col.getColumnName(),
                            col.getFieldLength());
                    if (sqlType.contains("CHAR"))
                        sqlBuilder.append(" AND ").append(field.getColumn()).append(" LIKE ?");
                    else
                        sqlBuilder.append(" AND ").append(field.getColumn()).append("=?");

                    sqlParaList.add(holderPo.get_Value(field.getColumn()));
                    // End Jan Thielemann Solution for query using the sentence like                   
                } else if (m_webservicetype.getFieldInput(field.getColumn()) == null) {
                    //If not even ctx variable column
                    throw new IdempiereServiceFault("Web service type " + m_webservicetype.getValue()
                            + ": input column " + field.getColumn() + " not allowed", new QName("queryData"));
                }
            }
        }

        if (modelCRUD.getFilter() != null && modelCRUD.getFilter().length() > 0) {
            String sql = parseSQL(" WHERE " + modelCRUD.getFilter(), sqlParaList, holderPo, poinfo, reqCtx);
            sqlBuilder.append(" AND ").append(sql.substring(6));
        }

        int cnt = 0;
        int rowCnt = 0;
        int offset = modelCRUD.getOffset();
        int limit = modelCRUD.getLimit();

        PreparedStatement pstmtquery = null;
        ResultSet rsquery = null;
        try {
            pstmtquery = DB.prepareStatement(sqlBuilder.toString(), trxName);
            DB.setParameters(pstmtquery, sqlParaList);

            rsquery = pstmtquery.executeQuery();
            // Angelo Dabala' (genied) must create just one DataSet, moved outside of the while loop
            DataSet ds = resp.addNewDataSet();
            while (rsquery.next()) {
                cnt++;
                if ((offset >= cnt) || (limit > 0 && offset + limit < cnt))
                    continue;
                rowCnt++;
                DataRow dr = ds.addNewDataRow();
                for (int i = 0; i < poinfo.getColumnCount(); i++) {
                    String columnName = poinfo.getColumnName(i);
                    if (m_webservicetype.isOutputColumnNameAllowed(columnName)) {
                        DataField dfid = dr.addNewField();
                        dfid.setColumn(columnName);
                        if (rsquery.getObject(columnName) instanceof byte[])
                            dfid.setVal(new String(Base64.encodeBase64(rsquery.getBytes(columnName))));
                        else
                            dfid.setVal(rsquery.getString(columnName));
                    }
                }
            }
        } catch (Exception e) {
            log.log(Level.SEVERE, e.getLocalizedMessage(), e);
            throw new IdempiereServiceFault(e);
        } finally {
            DB.close(rsquery, pstmtquery);
            rsquery = null;
            pstmtquery = null;
        }

        resp.setSuccess(true);
        resp.setRowCount(rowCnt);
        resp.setNumRows(rowCnt);
        resp.setTotalRows(cnt);
        resp.setStartRow(offset);

        return ret;
    } finally {
        if (manageTrx && trx != null)
            trx.close();

        if (!connected)
            getCompiereService().disconnect();
    }
}

From source file:org.apache.nifi.processors.standard.util.JdbcCommon.java

public static long convertToAvroStream(final ResultSet rs, final OutputStream outStream, String recordName,
        ResultSetRowCallback callback, final int maxRows, boolean convertNames)
        throws SQLException, IOException {
    final Schema schema = createSchema(rs, recordName, convertNames);
    final GenericRecord rec = new GenericData.Record(schema);

    final DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
    try (final DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter)) {
        dataFileWriter.create(schema, outStream);

        final ResultSetMetaData meta = rs.getMetaData();
        final int nrOfColumns = meta.getColumnCount();
        long nrOfRows = 0;
        while (rs.next()) {
            if (callback != null) {
                callback.processRow(rs);
            }/*from   www .ja  va  2 s  .  co  m*/
            for (int i = 1; i <= nrOfColumns; i++) {
                final int javaSqlType = meta.getColumnType(i);

                // Need to handle CLOB and BLOB before getObject() is called, due to ResultSet's maximum portability statement
                if (javaSqlType == CLOB) {
                    Clob clob = rs.getClob(i);
                    if (clob != null) {
                        long numChars = clob.length();
                        char[] buffer = new char[(int) numChars];
                        InputStream is = clob.getAsciiStream();
                        int index = 0;
                        int c = is.read();
                        while (c > 0) {
                            buffer[index++] = (char) c;
                            c = is.read();
                        }
                        rec.put(i - 1, new String(buffer));
                        clob.free();
                    } else {
                        rec.put(i - 1, null);
                    }
                    continue;
                }

                if (javaSqlType == BLOB) {
                    Blob blob = rs.getBlob(i);
                    if (blob != null) {
                        long numChars = blob.length();
                        byte[] buffer = new byte[(int) numChars];
                        InputStream is = blob.getBinaryStream();
                        int index = 0;
                        int c = is.read();
                        while (c > 0) {
                            buffer[index++] = (byte) c;
                            c = is.read();
                        }
                        ByteBuffer bb = ByteBuffer.wrap(buffer);
                        rec.put(i - 1, bb);
                        blob.free();
                    } else {
                        rec.put(i - 1, null);
                    }
                    continue;
                }

                final Object value = rs.getObject(i);

                if (value == null) {
                    rec.put(i - 1, null);

                } else if (javaSqlType == BINARY || javaSqlType == VARBINARY || javaSqlType == LONGVARBINARY
                        || javaSqlType == ARRAY) {
                    // bytes requires little bit different handling
                    byte[] bytes = rs.getBytes(i);
                    ByteBuffer bb = ByteBuffer.wrap(bytes);
                    rec.put(i - 1, bb);

                } else if (value instanceof Byte) {
                    // tinyint(1) type is returned by JDBC driver as java.sql.Types.TINYINT
                    // But value is returned by JDBC as java.lang.Byte
                    // (at least H2 JDBC works this way)
                    // direct put to avro record results:
                    // org.apache.avro.AvroRuntimeException: Unknown datum type java.lang.Byte
                    rec.put(i - 1, ((Byte) value).intValue());
                } else if (value instanceof Short) {
                    //MS SQL returns TINYINT as a Java Short, which Avro doesn't understand.
                    rec.put(i - 1, ((Short) value).intValue());
                } else if (value instanceof BigDecimal) {
                    // Avro can't handle BigDecimal as a number - it will throw an AvroRuntimeException such as: "Unknown datum type: java.math.BigDecimal: 38"
                    rec.put(i - 1, value.toString());

                } else if (value instanceof BigInteger) {
                    // Check the precision of the BIGINT. Some databases allow arbitrary precision (> 19), but Avro won't handle that.
                    // It the SQL type is BIGINT and the precision is between 0 and 19 (inclusive); if so, the BigInteger is likely a
                    // long (and the schema says it will be), so try to get its value as a long.
                    // Otherwise, Avro can't handle BigInteger as a number - it will throw an AvroRuntimeException
                    // such as: "Unknown datum type: java.math.BigInteger: 38". In this case the schema is expecting a string.
                    if (javaSqlType == BIGINT) {
                        int precision = meta.getPrecision(i);
                        if (precision < 0 || precision > MAX_DIGITS_IN_BIGINT) {
                            rec.put(i - 1, value.toString());
                        } else {
                            try {
                                rec.put(i - 1, ((BigInteger) value).longValueExact());
                            } catch (ArithmeticException ae) {
                                // Since the value won't fit in a long, convert it to a string
                                rec.put(i - 1, value.toString());
                            }
                        }
                    } else {
                        rec.put(i - 1, value.toString());
                    }

                } else if (value instanceof Number || value instanceof Boolean) {
                    if (javaSqlType == BIGINT) {
                        int precision = meta.getPrecision(i);
                        if (precision < 0 || precision > MAX_DIGITS_IN_BIGINT) {
                            rec.put(i - 1, value.toString());
                        } else {
                            rec.put(i - 1, value);
                        }
                    } else {
                        rec.put(i - 1, value);
                    }

                } else {
                    // The different types that we support are numbers (int, long, double, float),
                    // as well as boolean values and Strings. Since Avro doesn't provide
                    // timestamp types, we want to convert those to Strings. So we will cast anything other
                    // than numbers or booleans to strings by using the toString() method.
                    rec.put(i - 1, value.toString());
                }
            }
            dataFileWriter.append(rec);
            nrOfRows += 1;

            if (maxRows > 0 && nrOfRows == maxRows)
                break;
        }

        return nrOfRows;
    }
}

From source file:be.dataminded.nifi.plugins.util.JdbcCommon.java

public static long convertToAvroStream(final ResultSet rs, final OutputStream outStream, String recordName,
        ResultSetRowCallback callback, final int maxRows, boolean convertNames)
        throws SQLException, IOException {
    final Schema schema = createSchema(rs, recordName, convertNames);
    final GenericRecord rec = new GenericData.Record(schema);

    final DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
    try (final DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter)) {
        dataFileWriter.create(schema, outStream);

        final ResultSetMetaData meta = rs.getMetaData();
        final int nrOfColumns = meta.getColumnCount();
        long nrOfRows = 0;
        while (rs.next()) {
            if (callback != null) {
                callback.processRow(rs);
            }//from   w w w.j av a 2 s  . c o  m
            for (int i = 1; i <= nrOfColumns; i++) {
                final int javaSqlType = meta.getColumnType(i);

                // Need to handle CLOB and BLOB before getObject() is called, due to ResultSet's maximum portability statement
                if (javaSqlType == CLOB) {
                    Clob clob = rs.getClob(i);
                    if (clob != null) {
                        long numChars = clob.length();
                        char[] buffer = new char[(int) numChars];
                        InputStream is = clob.getAsciiStream();
                        int index = 0;
                        int c = is.read();
                        while (c > 0) {
                            buffer[index++] = (char) c;
                            c = is.read();
                        }
                        rec.put(i - 1, new String(buffer));
                        clob.free();
                    } else {
                        rec.put(i - 1, null);
                    }
                    continue;
                }

                if (javaSqlType == BLOB) {
                    Blob blob = rs.getBlob(i);
                    if (blob != null) {
                        long numChars = blob.length();
                        byte[] buffer = new byte[(int) numChars];
                        InputStream is = blob.getBinaryStream();
                        int index = 0;
                        int c = is.read();
                        while (c > 0) {
                            buffer[index++] = (byte) c;
                            c = is.read();
                        }
                        ByteBuffer bb = ByteBuffer.wrap(buffer);
                        rec.put(i - 1, bb);
                        blob.free();
                    } else {
                        rec.put(i - 1, null);
                    }
                    continue;
                }

                final Object value = rs.getObject(i);

                if (value == null) {
                    rec.put(i - 1, null);

                } else if (javaSqlType == BINARY || javaSqlType == VARBINARY || javaSqlType == LONGVARBINARY
                        || javaSqlType == ARRAY) {
                    // bytes requires little bit different handling
                    byte[] bytes = rs.getBytes(i);
                    ByteBuffer bb = ByteBuffer.wrap(bytes);
                    rec.put(i - 1, bb);

                } else if (value instanceof Byte) {
                    // tinyint(1) type is returned by JDBC driver as java.sql.Types.TINYINT
                    // But value is returned by JDBC as java.lang.Byte
                    // (at least H2 JDBC works this way)
                    // direct put to avro record results:
                    // org.apache.avro.AvroRuntimeException: Unknown datum type java.lang.Byte
                    rec.put(i - 1, ((Byte) value).intValue());
                } else if (value instanceof Short) {
                    //MS SQL returns TINYINT as a Java Short, which Avro doesn't understand.
                    rec.put(i - 1, ((Short) value).intValue());
                } else if (value instanceof BigDecimal) {
                    // Avro can't handle BigDecimal as a number - it will throw an AvroRuntimeException such as: "Unknown datum type: java.math.BigDecimal: 38"
                    try {
                        int scale = meta.getScale(i);
                        BigDecimal bigDecimal = ((BigDecimal) value);
                        if (scale == 0) {
                            if (meta.getPrecision(i) < 10) {
                                rec.put(i - 1, bigDecimal.intValue());
                            } else {
                                rec.put(i - 1, bigDecimal.longValue());
                            }
                        } else {
                            rec.put(i - 1, bigDecimal.doubleValue());
                        }
                    } catch (Exception e) {
                        rec.put(i - 1, value.toString());
                    }
                } else if (value instanceof BigInteger) {
                    // Check the precision of the BIGINT. Some databases allow arbitrary precision (> 19), but Avro won't handle that.
                    // It the SQL type is BIGINT and the precision is between 0 and 19 (inclusive); if so, the BigInteger is likely a
                    // long (and the schema says it will be), so try to get its value as a long.
                    // Otherwise, Avro can't handle BigInteger as a number - it will throw an AvroRuntimeException
                    // such as: "Unknown datum type: java.math.BigInteger: 38". In this case the schema is expecting a string.
                    if (javaSqlType == BIGINT) {
                        int precision = meta.getPrecision(i);
                        if (precision < 0 || precision > MAX_DIGITS_IN_BIGINT) {
                            rec.put(i - 1, value.toString());
                        } else {
                            try {
                                rec.put(i - 1, ((BigInteger) value).longValueExact());
                            } catch (ArithmeticException ae) {
                                // Since the value won't fit in a long, convert it to a string
                                rec.put(i - 1, value.toString());
                            }
                        }
                    } else {
                        rec.put(i - 1, value.toString());
                    }

                } else if (value instanceof Number || value instanceof Boolean) {
                    if (javaSqlType == BIGINT) {
                        int precision = meta.getPrecision(i);
                        if (precision < 0 || precision > MAX_DIGITS_IN_BIGINT) {
                            rec.put(i - 1, value.toString());
                        } else {
                            rec.put(i - 1, value);
                        }
                    } else {
                        rec.put(i - 1, value);
                    }

                } else {
                    // The different types that we support are numbers (int, long, double, float),
                    // as well as boolean values and Strings. Since Avro doesn't provide
                    // timestamp types, we want to convert those to Strings. So we will cast anything other
                    // than numbers or booleans to strings by using the toString() method.
                    rec.put(i - 1, value.toString());
                }
            }
            dataFileWriter.append(rec);
            nrOfRows += 1;

            if (maxRows > 0 && nrOfRows == maxRows)
                break;
        }

        return nrOfRows;
    }
}