Example usage for java.sql PreparedStatement setBinaryStream

List of usage examples for java.sql PreparedStatement setBinaryStream

Introduction

In this page you can find the example usage for java.sql PreparedStatement setBinaryStream.

Prototype

void setBinaryStream(int parameterIndex, java.io.InputStream x, long length) throws SQLException;

Source Link

Document

Sets the designated parameter to the given input stream, which will have the specified number of bytes.

Usage

From source file:org.wso2.carbon.repository.core.jdbc.dao.JDBCResourceVersionDAO.java

private void versionContent(int contentID) throws RepositoryException {
    if (!isContentHistoryExist(contentID)) {
        InputStream contentStream = resourceDAO.getContentStream(contentID);

        if (contentStream == null) {
            // create an empty input stream
            contentStream = new ByteArrayInputStream(RepositoryUtils.encodeString(""));
        }/*w w w.j av a 2 s .  co m*/

        // copy the content to the content_history table
        JDBCDatabaseTransaction.ManagedRegistryConnection conn = JDBCDatabaseTransaction.getConnection();
        PreparedStatement ps = null;

        try {
            String sql = "INSERT INTO REG_CONTENT_HISTORY (REG_CONTENT_ID, REG_CONTENT_DATA, REG_TENANT_ID) VALUES (?, ?, ?)";

            int size = contentStream.available();
            ps = conn.prepareStatement(sql);
            ps.setInt(1, contentID);
            ps.setBinaryStream(2, contentStream, size);
            ps.setInt(3, CurrentContext.getTenantId());
            ps.executeUpdate();

        } catch (Exception ex) {
            String msg = "Failed to put the content into history with the content id " + contentID + ". "
                    + ex.getMessage();

            if (isContentHistoryExist(contentID)) {
                log.error("Concurrent Modification: " + msg, ex);
                throw new RepositoryConcurrentModificationException(msg, ex);
            }

            log.error(msg, ex);
            throw new RepositoryDBException(msg, ex);
        } finally {
            try {
                if (ps != null) {
                    ps.close();
                }
            } catch (SQLException ex) {
                String msg = InternalConstants.RESULT_SET_PREPARED_STATEMENT_CLOSE_ERROR;
                log.error(msg, ex);
            }
        }
    }
}

From source file:org.wso2.carbon.registry.core.jdbc.dao.JDBCResourceVersionDAO.java

private void versionContent(int contentID) throws RegistryException {

    if (!isContentHistoryExist(contentID)) {
        InputStream contentStream = resourceDAO.getContentStream(contentID);
        if (contentStream == null) {
            // create an empty input stream
            contentStream = new ByteArrayInputStream(RegistryUtils.encodeString(""));
        }// w  w w .ja  va  2 s .c  o  m
        // copy the content to the content_history table
        JDBCDatabaseTransaction.ManagedRegistryConnection conn = JDBCDatabaseTransaction.getConnection();
        PreparedStatement ps = null;
        try {

            String sql = "INSERT INTO REG_CONTENT_HISTORY (REG_CONTENT_ID, REG_CONTENT_DATA, "
                    + "REG_TENANT_ID) VALUES (?, ?, ?)";

            int size = contentStream.available();
            ps = conn.prepareStatement(sql);
            ps.setInt(1, contentID);
            ps.setBinaryStream(2, contentStream, size);
            ps.setInt(3, CurrentSession.getTenantId());
            ps.executeUpdate();

        } catch (Exception ex) {
            String msg = "Failed to put the content into history with the content id " + contentID + ". "
                    + ex.getMessage();
            if (isContentHistoryExist(contentID)) {
                log.error("Concurrent Modification: " + msg, ex);
                throw new ConcurrentModificationException(msg, ex);
            }
            log.error(msg, ex);
            throw new RegistryException(msg, ex);
        } finally {
            try {
                if (ps != null) {
                    ps.close();
                }
            } catch (SQLException ex) {
                String msg = RegistryConstants.RESULT_SET_PREPARED_STATEMENT_CLOSE_ERROR;
                log.error(msg, ex);
            }
        }
    }
}

From source file:org.cloudgraph.rdb.filter.RDBStatementExecutor.java

@Override
public void executeInsert(PlasmaType type, StringBuilder sql, Map<String, PropertyPair> values) {
    PreparedStatement statement = null;
    List<InputStream> streams = null;
    try {/*from  w ww.jav  a  2  s .  com*/

        if (log.isDebugEnabled()) {
            log.debug("execute: " + sql.toString());
            StringBuilder paramBuf = createParamDebug(values);
            log.debug("params: " + paramBuf.toString());
        }

        statement = con.prepareStatement(sql.toString());

        for (PropertyPair pair : values.values()) {
            PlasmaProperty valueProp = pair.getProp();
            if (pair.getValueProp() != null)
                valueProp = pair.getValueProp();
            int jdbcType = converter.toJDBCDataType(valueProp, pair.getValue());
            Object jdbcValue = converter.toJDBCDataValue(valueProp, pair.getValue());
            if (jdbcType != Types.BLOB && jdbcType != Types.VARBINARY) {
                statement.setObject(pair.getColumn(), jdbcValue, jdbcType);
            } else {
                byte[] bytes = (byte[]) jdbcValue;
                long len = bytes.length;
                ByteArrayInputStream is = new ByteArrayInputStream(bytes);
                statement.setBinaryStream(pair.getColumn(), is, len);
                if (streams == null)
                    streams = new ArrayList<InputStream>();
                streams.add(is);
            }
        }

        statement.execute();
    } catch (Throwable t) {
        throw new DataAccessException(t);
    } finally {
        try {
            if (statement != null)
                statement.close();
        } catch (SQLException e) {
            log.error(e.getMessage(), e);
        }
        if (streams != null)
            try {
                for (InputStream stream : streams)
                    stream.close();
            } catch (IOException e) {
                log.error(e.getMessage(), e);
            }

    }
}

From source file:org.cloudgraph.rdb.filter.RDBStatementExecutor.java

@Override
public void execute(PlasmaType type, StringBuilder sql, Map<String, PropertyPair> values) {
    PreparedStatement statement = null;
    List<InputStream> streams = null;
    try {// ww w.  j a va2  s.  c o m
        if (log.isDebugEnabled()) {
            log.debug("execute: " + sql.toString());
            StringBuilder paramBuf = createParamDebug(values);
            log.debug("params: " + paramBuf.toString());
        }
        statement = con.prepareStatement(sql.toString());
        for (PropertyPair pair : values.values()) {
            PlasmaProperty valueProp = pair.getProp();
            if (pair.getValueProp() != null)
                valueProp = pair.getValueProp();

            int jdbcType = converter.toJDBCDataType(valueProp, pair.getValue());
            Object jdbcValue = converter.toJDBCDataValue(valueProp, pair.getValue());
            if (jdbcType != Types.BLOB && jdbcType != Types.VARBINARY) {
                statement.setObject(pair.getColumn(), jdbcValue, jdbcType);
            } else {
                byte[] bytes = (byte[]) jdbcValue;
                long len = bytes.length;
                ByteArrayInputStream is = new ByteArrayInputStream(bytes);
                statement.setBinaryStream(pair.getColumn(), is, len);
                if (streams == null)
                    streams = new ArrayList<InputStream>();
                streams.add(is);
            }

            if (pair.getOldValue() != null) {
                Object jdbcOldValue = converter.toJDBCDataValue(valueProp, pair.getOldValue());
                if (jdbcType != Types.BLOB && jdbcType != Types.VARBINARY) {
                    statement.setObject(pair.getOldValueColumn(), jdbcOldValue, jdbcType);
                } else {
                    byte[] bytes = (byte[]) jdbcOldValue;
                    long len = bytes.length;
                    ByteArrayInputStream is = new ByteArrayInputStream(bytes);
                    statement.setBinaryStream(pair.getOldValueColumn(), is, len);
                    if (streams == null)
                        streams = new ArrayList<InputStream>();
                    streams.add(is);
                }
            }
        }
        statement.executeUpdate();
    } catch (Throwable t) {
        throw new DataAccessException(t);
    } finally {
        try {
            if (statement != null)
                statement.close();
        } catch (SQLException e) {
            log.error(e.getMessage(), e);
        }
        if (streams != null)
            try {
                for (InputStream stream : streams)
                    stream.close();
            } catch (IOException e) {
                log.error(e.getMessage(), e);
            }
    }
}

From source file:org.kawanfw.test.api.client.InsertAndUpdateBlobTest.java

/**
 * Insert a blob//from ww w  .  j  a  v  a  2 s  . c o m
 * 
 * @throws Exception
 *             it any Exception occurs
 */
public void insertLoopPrepStatement(Connection connection, int numberToInsert, File blobFile) throws Exception {

    // We can now use our Remote JDBC Connection as a regular Connection!
    connection.setAutoCommit(false);

    // We will do all our remote insert in a SQL Transaction
    try {

        String sql = "insert into orderlog values ( ?, ?, ?, ?, ?, ?, ?, ?, ? )";

        // Create a new Prepared Statement
        PreparedStatement prepStatement = null;

        MessageDisplayer.display("");
        MessageDisplayer.display("Inserting " + numberToInsert + " orderlog...");

        for (int customerId = 1; customerId < numberToInsert + 1; customerId++) {
            int i = 1;
            long theTime = new java.util.Date().getTime();

            // We will insert a Blob (the image of the product).
            // The transfer will be done in streaming both on the client
            // and on the Servlet Server: we can upload/download very big
            // files.

            // InputStream in = new BufferedInputStream(new
            // FileInputStream(blobFile));
            InputStream in = new FileInputStream(blobFile);

            prepStatement = connection.prepareStatement(sql);

            prepStatement.setInt(i++, customerId);
            prepStatement.setInt(i++, customerId);
            prepStatement.setString(i++, "Item Description No " + customerId);
            prepStatement.setBigDecimal(i++, new BigDecimal(customerId));
            prepStatement.setDate(i++, new java.sql.Date(theTime));
            prepStatement.setTimestamp(i++, new Timestamp(theTime));

            prepStatement.setBinaryStream(i++, in, (int) blobFile.length());

            // prepStatement.setBoolean(i++, false);
            SqlUtil sqlUtil = new SqlUtil(connection);
            if (sqlUtil.isIngres() || sqlUtil.isPostgreSQL()) {
                prepStatement.setInt(i++, 0);
            } else {
                prepStatement.setBoolean(i++, false);
            }

            prepStatement.setInt(i++, customerId);

            // SystemOutHandle.display("Before executeUpdate...");
            prepStatement.executeUpdate();
            prepStatement.close();

            in.close();

        }

        // We do either everything in a single transaction or nothing
        connection.commit(); // Commit is propagated on Server
        MessageDisplayer.display("Remote Commit Done on AceQL Server!");
    } catch (Exception e) {
        connection.rollback();
        throw e;
    } finally {
        connection.setAutoCommit(true);
    }

}

From source file:org.kawanfw.test.api.client.InsertAndUpdateBlobTestPsqlOID.java

/**
 * Insert a blob/*from  ww  w  .java2s .  co m*/
 * 
 * @throws Exception
 *             it any Exception occurs
 */
public void insertLoopPrepStatement(Connection connection, int numberToInsert, File blobFile) throws Exception {

    // We can now use our Remote JDBC Connection as a regular Connection!
    connection.setAutoCommit(false);

    // We will do all our remote insert in a SQL Transaction
    try {

        String sql = "insert into orderlog_2 values ( ?, ?, ?, ?, ?, ?, ?, ?, ? )";

        // Create a new Prepared Statement
        PreparedStatement prepStatement = null;

        MessageDisplayer.display("");
        MessageDisplayer.display("Inserting " + numberToInsert + " orderlog_2...");

        for (int customerId = 1; customerId < numberToInsert + 1; customerId++) {
            int i = 1;
            long theTime = new java.util.Date().getTime();

            // We will insert a Blob (the image of the product).
            // The transfer will be done in streaming both on the client
            // and on the Servlet Server: we can upload/download very big
            // files.

            // InputStream in = new BufferedInputStream(new
            // FileInputStream(blobFile));
            InputStream in = new FileInputStream(blobFile);

            prepStatement = connection.prepareStatement(sql);

            prepStatement.setInt(i++, customerId);
            prepStatement.setInt(i++, customerId);
            prepStatement.setString(i++, "Item Description No " + customerId);
            prepStatement.setBigDecimal(i++, new BigDecimal(customerId));
            prepStatement.setDate(i++, new java.sql.Date(theTime));
            prepStatement.setTimestamp(i++, new Timestamp(theTime));

            prepStatement.setBinaryStream(i++, in, (int) blobFile.length());

            // prepStatement.setBoolean(i++, false);
            SqlUtil sqlUtil = new SqlUtil(connection);
            if (sqlUtil.isIngres() || sqlUtil.isPostgreSQL()) {
                prepStatement.setInt(i++, 0);
            } else {
                prepStatement.setBoolean(i++, false);
            }

            prepStatement.setInt(i++, customerId);

            // SystemOutHandle.display("Before executeUpdate...");
            prepStatement.executeUpdate();
            prepStatement.close();

            in.close();

        }

        // We do either everything in a single transaction or nothing
        connection.commit(); // Commit is propagated on Server
        MessageDisplayer.display("Remote Commit Done on AceQL Server!");
    } catch (Exception e) {
        connection.rollback();
        throw e;
    } finally {
        connection.setAutoCommit(true);
    }

}

From source file:org.cloudgraph.rdb.filter.RDBStatementExecutor.java

@Override
public List<PropertyPair> executeInsertWithGeneratedKeys(PlasmaType type, StringBuilder sql,
        Map<String, PropertyPair> values) {
    List<PropertyPair> resultKeys = new ArrayList<PropertyPair>();
    PreparedStatement statement = null;
    List<InputStream> streams = null;
    ResultSet generatedKeys = null;
    try {/*from w  w w  .ja  v  a2 s  .  c om*/

        if (log.isDebugEnabled()) {
            log.debug("execute: " + sql.toString());
            StringBuilder paramBuf = createParamDebug(values);
            log.debug("params: " + paramBuf.toString());
        }

        statement = con.prepareStatement(sql.toString(), PreparedStatement.RETURN_GENERATED_KEYS);

        for (PropertyPair pair : values.values()) {
            PlasmaProperty valueProp = pair.getProp();
            if (pair.getValueProp() != null)
                valueProp = pair.getValueProp();
            int jdbcType = converter.toJDBCDataType(valueProp, pair.getValue());
            Object jdbcValue = converter.toJDBCDataValue(valueProp, pair.getValue());
            if (jdbcType != Types.BLOB && jdbcType != Types.VARBINARY) {
                statement.setObject(pair.getColumn(), jdbcValue, jdbcType);
            } else {
                byte[] bytes = (byte[]) jdbcValue;
                long len = bytes.length;
                ByteArrayInputStream is = new ByteArrayInputStream(bytes);
                statement.setBinaryStream(pair.getColumn(), is, len);
                if (streams == null)
                    streams = new ArrayList<InputStream>();
                streams.add(is);
            }
        }

        statement.execute();
        generatedKeys = statement.getGeneratedKeys();
        ResultSetMetaData rsMeta = generatedKeys.getMetaData();
        int numcols = rsMeta.getColumnCount();
        if (log.isDebugEnabled())
            log.debug("returned " + numcols + " keys");

        if (generatedKeys.next()) {
            // FIXME; without metadata describing which properties
            // are actually a sequence, there is guess work
            // involved in matching the values returned
            // automatically from PreparedStatment as they
            // are anonymous in terms of the column names
            // making it impossible to match them to a metadata
            // property.
            List<Property> pkPropList = type.findProperties(KeyType.primary);
            if (pkPropList == null || pkPropList.size() == 0)
                throw new DataAccessException("no pri-key properties found for type '" + type.getName() + "'");
            if (pkPropList.size() > 1)
                throw new DataAccessException("multiple pri-key properties found for type '" + type.getName()
                        + "' - cannot map to generated keys");
            PlasmaProperty prop = (PlasmaProperty) pkPropList.get(0);
            // FIXME: need to find properties per column by physical name
            // alias
            // in case where multiple generated pri-keys
            for (int i = 1; i <= numcols; i++) {
                String columnName = rsMeta.getColumnName(i);
                if (log.isDebugEnabled())
                    log.debug("returned key column '" + columnName + "'");
                int columnType = rsMeta.getColumnType(i);
                Object value = converter.fromJDBCDataType(generatedKeys, i, columnType, prop);
                PropertyPair pair = new PropertyPair((PlasmaProperty) prop, value);
                resultKeys.add(pair);
            }
        }
    } catch (Throwable t) {
        throw new DataAccessException(t);
    } finally {
        try {
            if (statement != null)
                statement.close();
        } catch (SQLException e) {
            log.error(e.getMessage(), e);
        }
        if (streams != null)
            try {
                for (InputStream stream : streams)
                    stream.close();
            } catch (IOException e) {
                log.error(e.getMessage(), e);
            }
    }

    return resultKeys;
}

From source file:org.apache.jackrabbit.core.fs.db.DatabaseFileSystem.java

/**
 * Executes the given SQL statement with the specified parameters.
 * If a <code>SQLException</code> is encountered <i>one</i> attempt is made
 * to re-establish the database connection and re-execute the statement.
 *
 * @param sql    statement to execute/*from   w  ww  .ja v a  2s .c  om*/
 * @param params parameters to set
 * @return the <code>Statement</code> object that had been executed
 * @throws SQLException if an error occurs
 */
protected Statement executeStmt(String sql, Object[] params) throws SQLException {
    int trials = 2;
    while (true) {
        PreparedStatement stmt = (PreparedStatement) preparedStatements.get(sql);
        try {
            for (int i = 0; i < params.length; i++) {
                if (params[i] instanceof SizedInputStream) {
                    SizedInputStream in = (SizedInputStream) params[i];
                    stmt.setBinaryStream(i + 1, in, (int) in.getSize());
                } else {
                    stmt.setObject(i + 1, params[i]);
                }
            }
            stmt.execute();
            resetStatement(stmt);
            return stmt;
        } catch (SQLException se) {
            if (--trials == 0) {
                // no more trials, re-throw
                throw se;
            }
            log.warn("execute failed, about to reconnect...", se.getMessage());

            // try to reconnect
            if (reestablishConnection()) {
                // reconnect succeeded; check whether it's possible to
                // re-execute the prepared stmt with the given parameters
                for (int i = 0; i < params.length; i++) {
                    if (params[i] instanceof SizedInputStream) {
                        SizedInputStream in = (SizedInputStream) params[i];
                        if (in.isConsumed()) {
                            // we're unable to re-execute the prepared stmt
                            // since an InputStream paramater has already
                            // been 'consumed';
                            // re-throw previous SQLException
                            throw se;
                        }
                    }
                }

                // try again to execute the statement
                continue;
            } else {
                // reconnect failed, re-throw previous SQLException
                throw se;
            }
        }
    }
}

From source file:org.wso2.carbon.user.core.tenant.JDBCTenantManager.java

public int addTenant(org.wso2.carbon.user.api.Tenant tenant) throws UserStoreException {
    // if tenant id present in tenant bean, we create the tenant with that tenant id.
    if (tenant.getId() > 0) {
        return addTenantWithGivenId(tenant);
    }/* w  w w .  j  a v a  2 s.co  m*/

    Connection dbConnection = null;
    PreparedStatement prepStmt = null;
    ResultSet result = null;
    int id = 0;
    try {
        dbConnection = getDBConnection();
        String sqlStmt = TenantConstants.ADD_TENANT_SQL;

        String dbProductName = dbConnection.getMetaData().getDatabaseProductName();
        prepStmt = dbConnection.prepareStatement(sqlStmt,
                new String[] { DBUtils.getConvertedAutoGeneratedColumnName(dbProductName, "UM_ID") });
        prepStmt.setString(1, tenant.getDomain().toLowerCase());
        prepStmt.setString(2, tenant.getEmail());
        Date createdTime = tenant.getCreatedDate();
        long createdTimeMs;
        if (createdTime == null) {
            createdTimeMs = System.currentTimeMillis();
        } else {
            createdTimeMs = createdTime.getTime();
        }
        prepStmt.setTimestamp(3, new Timestamp(createdTimeMs));
        String realmConfigString = RealmConfigXMLProcessor
                .serialize((RealmConfiguration) tenant.getRealmConfig()).toString();
        InputStream is = new ByteArrayInputStream(realmConfigString.getBytes());
        prepStmt.setBinaryStream(4, is, is.available());

        prepStmt.executeUpdate();

        result = prepStmt.getGeneratedKeys();
        if (result.next()) {
            id = result.getInt(1);
        }
        dbConnection.commit();
    } catch (Exception e) {

        DatabaseUtil.rollBack(dbConnection);

        String msg = "Error in adding tenant with " + "tenant domain: " + tenant.getDomain().toLowerCase()
                + ".";
        if (log.isDebugEnabled()) {
            log.debug(msg, e);
        }
        throw new UserStoreException(msg, e);
    } finally {
        DatabaseUtil.closeAllConnections(dbConnection, result, prepStmt);
    }
    return id;
}

From source file:org.kawanfw.sql.servlet.sql.ServerPreparedStatementParameters.java

/**
 * Set the binary stream using the underlying Blob file uploaded by the
 * client side//from w w w.  j  av a2 s  .  c o  m
 * 
 * @param preparedStatement
 *            The Prepared Statement to execute
 * @param parameterIndex
 *            the parameter index
 * @param paramValue
 *            the parameter value (the file name)
 * @throws SQLException
 * @throws IOException
 */
private void setBinaryStream(PreparedStatement preparedStatement, int parameterIndex, String paramValue)
        throws SQLException, IOException {
    // Extract the Blob file from the parameter

    debug("before getFileFromParameter()");
    File blobFile = getFileFromParameter(paramValue);
    blobsOrClobs.add(blobFile);

    debug("before new BufferedInputStream(new FileInputStream(blobFile))");

    // Then update the prepared statement binary stream and we are done!
    InputStream in = new BufferedInputStream(new FileInputStream(blobFile));
    long theLength = blobFile.length();

    debug("before preparedStatement.setBinaryStream()");

    Connection connection = preparedStatement.getConnection();
    String sql = statementHolder.getSqlOrder();

    // Test if we are in PostgreSQL with OID column for large file
    if (PostgreSqlUtil.isPostgreSqlStatementWithOID(connection, sql)) {

        debug("column is OID! " + parameterIndex);
        PostgreSqlUtil.setPostgreSqlParameterWithLargeObject(preparedStatement, parameterIndex, in, connection);

    } else {
        // We cast theLength, because the long version may not be
        // implemented by
        // the driver
        debug("column is NOT OID " + parameterIndex);
        preparedStatement.setBinaryStream(parameterIndex, in, (int) theLength);
    }

    this.inList.add(in);

    debug("after preparedStatement.setBinaryStream()");

}