Example usage for java.sql PreparedStatement getConnection

List of usage examples for java.sql PreparedStatement getConnection

Introduction

In this page you can find the example usage for java.sql PreparedStatement getConnection.

Prototype

Connection getConnection() throws SQLException;

Source Link

Document

Retrieves the Connection object that produced this Statement object.

Usage

From source file:de.innovationgate.webgate.api.jdbc.custom.JDBCSource.java

public boolean insertContent(String folder, Object key, Object bean) throws WGBackendException {

    ResultSet resultSet = null;//w  ww .ja  v  a 2 s  . c  o  m
    try {

        // Gather all columns to set as values, including those keys that are set. Find out the key to generate, if present.
        Map allColumns = new HashMap();
        Map valuesMap = (Map) bean;
        allColumns.putAll(valuesMap);
        String keyToGenerate = null;
        Map keyMap = (Map) key;
        Iterator keys = keyMap.keySet().iterator();
        while (keys.hasNext()) {
            String keyName = (String) keys.next();
            Object value = keyMap.get(keyName);
            if (value != null) {
                allColumns.put(keyName, value);
            } else {
                keyToGenerate = keyName;
            }
        }

        // Execute Statement
        PreparedStatement stmt = getInsertStatement(folder, allColumns);
        int rows = stmt.executeUpdate();
        if (rows != 1) {
            throw new WGBackendException("Insert failed. Wrong number of inserted rows returned: " + rows);
        }

        if (keyToGenerate == null) {
            return true;
        }

        // Try to retrieve generated key and store it at the bean and the key map
        if (!stmt.getConnection().getMetaData().supportsGetGeneratedKeys()) {
            throw new WGBackendException(
                    "Row was inserted but JDBC Driver does not support returning of generated keys. Usage of a table with generated key is not possible with this driver.");
        }
        ResultSet generatedKeys = stmt.getGeneratedKeys();
        generatedKeys.first();
        Object generatedKey = generatedKeys.getObject(1);
        valuesMap.put(keyToGenerate, generatedKey);
        keyMap.put(keyToGenerate, generatedKey);

        if (getConnection().getAutoCommit() == false) {
            getConnection().commit();
        }

        return true;

        /*String whereClause = getWhereClause(folder, key);
        resultSet = getTableResultSet(folder, whereClause, true);
                
        if (resultSet != null) {
        startResultSet(resultSet);
          if (!resultSet.next()) {
             resultSet.moveToInsertRow();
             pushRowData(resultSet, (Map) bean);
             resultSet.insertRow();
             if (getConnection().getAutoCommit() == false) {
           getConnection().commit();
             }
             return true;
          }
          else {
             throw new WGBackendException("The key '" + key + "' already exists in table '" + folder + "'");
          }
        }
        else {
          return false;
        }*/

    } catch (SQLException e) {
        try {
            if (getConnection().getAutoCommit() == false) {
                getConnection().rollback();
            }
        } catch (SQLException e1) {
            Logger.getLogger(LOGGER_NAME).error("Error rolling back content insertion", e);
        }
        throw new WGBackendException("Error inserting row", e);
    } finally {
        closeResultSet(resultSet);
    }

}

From source file:com.mmnaseri.dragonfly.data.impl.DefaultDataAccess.java

private void cleanUpStatement(PreparedStatement preparedStatement) {
    try {//from  w  w w. ja va  2  s . c  o  m
        final Connection connection = preparedStatement.getConnection();
        closeStatement(preparedStatement);
        closeConnection(connection);
    } catch (SQLException e) {
        throw new UnsuccessfulOperationError("Failed to clean up", e);
    }
}

From source file:com.mmnaseri.dragonfly.data.impl.DefaultDataAccess.java

private synchronized List<Integer> endBatch() {
    if (!isInBatchMode()) {
        throw new NoBatchOperationError();
    }//from  w  w w  .j  a v  a  2  s .co m
    localCounts.get().clear();
    final List<BatchOperationDescriptor> descriptors = batchOperation.get();
    batchOperation.remove();
    batch.set(false);
    final ArrayList<Integer> result = new ArrayList<Integer>();
    if (descriptors == null) {
        return result;
    }
    log.info("There are " + descriptors.size() + " operation stack(s) to perform");
    while (!descriptors.isEmpty()) {
        final BatchOperationDescriptor descriptor = descriptors.get(0);
        descriptors.remove(0);
        final int[] batchResult;
        log.info("Executing batch operation for statement: " + descriptor.getSql());
        final PreparedStatement preparedStatement = descriptor.getPreparedStatement();
        final Connection connection;
        try {
            connection = preparedStatement.getConnection();
            long time = System.nanoTime();
            batchResult = preparedStatement.executeBatch();
            connection.commit();
            log.info(batchResult.length + " operation(s) completed successfully in "
                    + (System.nanoTime() - time) + "ns");
        } catch (SQLException e) {
            throw new BatchOperationExecutionError("Failed to execute operation batch", e);
        }
        if (StatementType.getStatementType(descriptor.getSql()).equals(StatementType.INSERT)) {
            try {
                final List<Object> deferredEntities = deferredKeys.get();
                final ResultSet generatedKeys = preparedStatement.getGeneratedKeys();
                while (generatedKeys.next()) {
                    final Object entity = deferredEntities.get(0);
                    deferredEntities.remove(0);
                    final EntityHandler<Object> entityHandler = entityHandlerContext.getHandler(entity);
                    entityHandler.setKey(entity, session.getDatabaseDialect().retrieveKey(generatedKeys));
                }
            } catch (SQLException e) {
                throw new BatchOperationExecutionError("Failed to retrieve generated keys", e);
            }
        }
        for (int i : batchResult) {
            result.add(i);
        }
        cleanUpStatement(preparedStatement);
    }
    return result;
}

From source file:cc.tooyoung.common.db.JdbcTemplate.java

@SuppressWarnings("unchecked")
public int[] batchUpdate(String sql, final BatchPreparedStatementSetter pss) throws DataAccessException {
    if (ApiLogger.isTraceEnabled()) {
        ApiLogger.trace("Executing SQL batch update [" + sql + "]");
    }/*from   w  w w .  j a  v a 2s. c  om*/

    return (int[]) execute(sql, new PreparedStatementCallback() {
        public Object doInPreparedStatement(PreparedStatement ps) throws SQLException {
            try {
                int batchSize = pss.getBatchSize();
                InterruptibleBatchPreparedStatementSetter ipss = (pss instanceof InterruptibleBatchPreparedStatementSetter
                        ? (InterruptibleBatchPreparedStatementSetter) pss
                        : null);
                if (JdbcUtils.supportsBatchUpdates(ps.getConnection())) {
                    for (int i = 0; i < batchSize; i++) {
                        pss.setValues(ps, i);
                        if (ipss != null && ipss.isBatchExhausted(i)) {
                            break;
                        }
                        ps.addBatch();
                    }
                    return ps.executeBatch();
                } else {
                    List rowsAffected = new ArrayList();
                    for (int i = 0; i < batchSize; i++) {
                        pss.setValues(ps, i);
                        if (ipss != null && ipss.isBatchExhausted(i)) {
                            break;
                        }
                        rowsAffected.add(new Integer(ps.executeUpdate()));
                    }
                    int[] rowsAffectedArray = new int[rowsAffected.size()];
                    for (int i = 0; i < rowsAffectedArray.length; i++) {
                        rowsAffectedArray[i] = ((Integer) rowsAffected.get(i)).intValue();
                    }
                    return rowsAffectedArray;
                }
            } finally {
                if (pss instanceof ParameterDisposer) {
                    ((ParameterDisposer) pss).cleanupParameters();
                }
            }
        }
    }, true);
}

From source file:lib.JdbcTemplate.java

@Override
public <T> int[][] batchUpdate(String sql, final Collection<T> batchArgs, final int batchSize,
        final ParameterizedPreparedStatementSetter<T> pss) throws DataAccessException {

    if (logger.isDebugEnabled()) {
        logger.debug("Executing SQL batch update [" + sql + "] with a batch size of " + batchSize);
    }/* w w  w  . j  a  v a2 s  .  c  o  m*/
    return execute(sql, new PreparedStatementCallback<int[][]>() {
        @Override
        public int[][] doInPreparedStatement(PreparedStatement ps) throws SQLException {
            List<int[]> rowsAffected = new ArrayList<int[]>();
            try {
                boolean batchSupported = true;
                if (!JdbcUtils.supportsBatchUpdates(ps.getConnection())) {
                    batchSupported = false;
                    logger.warn(
                            "JDBC Driver does not support Batch updates; resorting to single statement execution");
                }
                int n = 0;
                for (T obj : batchArgs) {
                    pss.setValues(ps, obj);
                    n++;
                    if (batchSupported) {
                        ps.addBatch();
                        if (n % batchSize == 0 || n == batchArgs.size()) {
                            if (logger.isDebugEnabled()) {
                                int batchIdx = (n % batchSize == 0) ? n / batchSize : (n / batchSize) + 1;
                                int items = n - ((n % batchSize == 0) ? n / batchSize - 1 : (n / batchSize))
                                        * batchSize;
                                logger.debug(
                                        "Sending SQL batch update #" + batchIdx + " with " + items + " items");
                            }
                            rowsAffected.add(ps.executeBatch());
                        }
                    } else {
                        int i = ps.executeUpdate();
                        rowsAffected.add(new int[] { i });
                    }
                }
                int[][] result = new int[rowsAffected.size()][];
                for (int i = 0; i < result.length; i++) {
                    result[i] = rowsAffected.get(i);
                }
                return result;
            } finally {
                if (pss instanceof ParameterDisposer) {
                    ((ParameterDisposer) pss).cleanupParameters();
                }
            }
        }
    });
}

From source file:lib.JdbcTemplate.java

@Override
public int[] batchUpdate(String sql, final BatchPreparedStatementSetter pss) throws DataAccessException {
    if (logger.isDebugEnabled()) {
        logger.debug("Executing SQL batch update [" + sql + "]");
    }//from  w  ww . j  a v  a 2  s.  co  m

    return execute(sql, new PreparedStatementCallback<int[]>() {
        @Override
        public int[] doInPreparedStatement(PreparedStatement ps) throws SQLException {
            try {
                int batchSize = pss.getBatchSize();
                InterruptibleBatchPreparedStatementSetter ipss = (pss instanceof InterruptibleBatchPreparedStatementSetter
                        ? (InterruptibleBatchPreparedStatementSetter) pss
                        : null);
                if (JdbcUtils.supportsBatchUpdates(ps.getConnection())) {
                    for (int i = 0; i < batchSize; i++) {
                        pss.setValues(ps, i);
                        if (ipss != null && ipss.isBatchExhausted(i)) {
                            break;
                        }
                        ps.addBatch();
                    }
                    return ps.executeBatch();
                } else {
                    List<Integer> rowsAffected = new ArrayList<Integer>();
                    for (int i = 0; i < batchSize; i++) {
                        pss.setValues(ps, i);
                        if (ipss != null && ipss.isBatchExhausted(i)) {
                            break;
                        }
                        rowsAffected.add(ps.executeUpdate());
                    }
                    int[] rowsAffectedArray = new int[rowsAffected.size()];
                    for (int i = 0; i < rowsAffectedArray.length; i++) {
                        rowsAffectedArray[i] = rowsAffected.get(i);
                    }
                    return rowsAffectedArray;
                }
            } finally {
                if (pss instanceof ParameterDisposer) {
                    ((ParameterDisposer) pss).cleanupParameters();
                }
            }
        }
    });
}

From source file:fr.aliacom.obm.common.calendar.CalendarDaoJdbcImpl.java

private int fillEventStatement(PreparedStatement ps, Event ev, AccessToken at, int i) throws SQLException {
    int idx = i;//from  w  ww. j a  v a 2  s .c om
    ps.setString(idx++, ev.getExtId().getExtId());
    ps.setString(idx++, ev.getTimezoneName());
    ps.setObject(idx++, obmHelper.getDBCP().getJdbcObject(ObmHelper.VOPACITY, ev.getOpacity().toString()));
    ps.setString(idx++, ev.getTitle());
    ps.setString(idx++, ev.getLocation());
    Integer cat = catIdFromString(ps.getConnection(), ev.getCategory(), at.getDomain().getId());
    if (cat != null) {
        ps.setInt(idx++, cat);
    } else {
        ps.setNull(idx++, Types.INTEGER);
    }
    ps.setInt(idx++, RFC2445.getPriorityOrDefault(ev.getPriority()));
    ps.setInt(idx++, ev.getPrivacy().toInteger());
    if (ev.getStartDate() != null) {
        ps.setTimestamp(idx++, new Timestamp(ev.getStartDate().getTime()));
    } else {
        ps.setNull(idx++, Types.DATE);
    }
    ps.setInt(idx++, ev.getDuration());
    ps.setBoolean(idx++, ev.isAllday());
    EventRecurrence r = ev.getRecurrence();
    ps.setString(idx++, r.getKind().toString());
    ps.setInt(idx++, r.getFrequence());
    ps.setString(idx++, new RecurrenceDaysSerializer().serialize(r.getDays()));
    if (r.getEnd() != null) {
        ps.setTimestamp(idx++, new Timestamp(r.getEnd().getTime()));
    } else {
        ps.setNull(idx++, Types.DATE);
    }
    ps.setNull(idx++, Types.VARCHAR); // color
    ps.setNull(idx++, Types.DATE); // FIXME completed
    ps.setNull(idx++, Types.VARCHAR); // FIXME url
    ps.setString(idx++, ev.getDescription());
    ps.setInt(idx++, at.getDomain().getId());
    ps.setString(idx++, at.getOrigin());
    ps.setObject(idx++, obmHelper.getDBCP().getJdbcObject(ObmHelper.VCOMPONENT, ev.getType().toString()));
    ps.setInt(idx++, ev.getSequence());
    return idx;
}

From source file:org.apache.ddlutils.platform.PlatformImplBase.java

/**
 * Performs the batch for the given statement, and checks that the specified amount of rows have been changed.
 * //from   ww  w . jav  a 2 s .c  o  m
 * @param statement The prepared statement
 * @param numRows   The number of rows that should change
 * @param table     The changed table
 */
private void executeBatch(PreparedStatement statement, int numRows, Table table)
        throws DatabaseOperationException {
    if (statement != null) {
        try {
            Connection connection = statement.getConnection();

            beforeInsert(connection, table);

            int[] results = statement.executeBatch();

            closeStatement(statement);
            afterInsert(connection, table);

            boolean hasSum = true;
            int sum = 0;

            for (int idx = 0; (results != null) && (idx < results.length); idx++) {
                if (results[idx] < 0) {
                    hasSum = false;
                    if (results[idx] == Statement.EXECUTE_FAILED) {
                        _log.warn("The batch insertion of row " + idx + " into table " + table.getName()
                                + " failed but the driver is able to continue processing");
                    } else if (results[idx] != Statement.SUCCESS_NO_INFO) {
                        _log.warn("The batch insertion of row " + idx + " into table " + table.getName()
                                + " returned an undefined status value " + results[idx]);
                    }
                } else {
                    sum += results[idx];
                }
            }
            if (hasSum && (sum != numRows)) {
                _log.warn("Attempted to insert " + numRows + " rows into table " + table.getName()
                        + " but changed " + sum + " rows");
            }
        } catch (SQLException ex) {
            if (ex instanceof BatchUpdateException) {
                SQLException sqlEx = ((BatchUpdateException) ex).getNextException();

                throw new DatabaseOperationException("Error while inserting into the database", sqlEx);
            } else {
                throw new DatabaseOperationException("Error while inserting into the database", ex);
            }
        }
    }
}

From source file:org.apache.hive.jdbc.TestJdbcDriver2.java

@Test
public void testParentReferences() throws Exception {
    /* Test parent references from Statement */
    Statement s = this.con.createStatement();
    ResultSet rs = s.executeQuery("SELECT * FROM " + dataTypeTableName);

    assertTrue(s.getConnection() == this.con);
    assertTrue(rs.getStatement() == s);//from ww w.ja v  a2 s  .c  o m

    rs.close();
    s.close();

    /* Test parent references from PreparedStatement */
    PreparedStatement ps = this.con.prepareStatement("SELECT * FROM " + dataTypeTableName);
    rs = ps.executeQuery();

    assertTrue(ps.getConnection() == this.con);
    assertTrue(rs.getStatement() == ps);

    rs.close();
    ps.close();

    /* Test DatabaseMetaData queries which do not have a parent Statement */
    DatabaseMetaData md = this.con.getMetaData();

    assertTrue(md.getConnection() == this.con);

    rs = md.getCatalogs();
    assertNull(rs.getStatement());
    rs.close();

    rs = md.getColumns(null, null, null, null);
    assertNull(rs.getStatement());
    rs.close();

    rs = md.getFunctions(null, null, null);
    assertNull(rs.getStatement());
    rs.close();

    rs = md.getImportedKeys(null, null, null);
    assertNull(rs.getStatement());
    rs.close();

    rs = md.getPrimaryKeys(null, null, null);
    assertNull(rs.getStatement());
    rs.close();

    rs = md.getProcedureColumns(null, null, null, null);
    assertNull(rs.getStatement());
    rs.close();

    rs = md.getProcedures(null, null, null);
    assertNull(rs.getStatement());
    rs.close();

    rs = md.getSchemas();
    assertNull(rs.getStatement());
    rs.close();

    rs = md.getTableTypes();
    assertNull(rs.getStatement());
    rs.close();

    rs = md.getTables(null, null, null, null);
    assertNull(rs.getStatement());
    rs.close();

    rs = md.getTypeInfo();
    assertNull(rs.getStatement());
    rs.close();
}

From source file:org.apache.jackrabbit.core.util.db.Oracle10R1ConnectionHelper.java

/**
 * Wraps any input-stream parameters in temporary blobs and frees these again after the statement
 * has been executed./*from   w ww  . j  av a2s.  c om*/
 * 
 * {@inheritDoc}
 */
@Override
protected PreparedStatement execute(PreparedStatement stmt, Object[] params) throws SQLException {
    List<Blob> tmpBlobs = new ArrayList<Blob>();
    try {
        for (int i = 0; params != null && i < params.length; i++) {
            Object p = params[i];
            if (p instanceof StreamWrapper) {
                StreamWrapper wrapper = (StreamWrapper) p;
                Blob tmp = createTemporaryBlob(stmt.getConnection(), wrapper.getStream());
                tmpBlobs.add(tmp);
                stmt.setBlob(i + 1, tmp);
            } else if (p instanceof InputStream) {
                Blob tmp = createTemporaryBlob(stmt.getConnection(), (InputStream) p);
                tmpBlobs.add(tmp);
                stmt.setBlob(i + 1, tmp);
            } else {
                stmt.setObject(i + 1, p);
            }
        }
        stmt.execute();
        return stmt;
    } catch (Exception e) {
        throw new SQLException(e.getMessage());
    } finally {
        for (Blob blob : tmpBlobs) {
            try {
                freeTemporaryBlob(blob);
            } catch (Exception e) {
                log.warn("Could not close temporary blob", e);
            }
        }
    }
}