Example usage for java.sql Statement RETURN_GENERATED_KEYS

List of usage examples for java.sql Statement RETURN_GENERATED_KEYS

Introduction

In this page you can find the example usage for java.sql Statement RETURN_GENERATED_KEYS.

Prototype

int RETURN_GENERATED_KEYS

To view the source code for java.sql Statement RETURN_GENERATED_KEYS.

Click Source Link

Document

The constant indicating that generated keys should be made available for retrieval.

Usage

From source file:org.etudes.component.app.jforum.JforumDataServiceImpl.java

private int createForum(Connection connection, String forumName, int catgId, String forumDescription,
        int forumType, int accessType, boolean forumModerated, int gradeType, Date startDate, Date endDate,
        int lockEndDate) throws SQLException {
    if (logger.isDebugEnabled())
        logger.debug("creating forum with forumName :" + forumName + " for categoryId : " + catgId);
    if (logger.isDebugEnabled())
        logger.debug("Entering createForum......");

    int forumId = -1;

    try {/* w w w .  ja  va2  s .co  m*/
        String forumGetMaxOrder = "SELECT MAX(forum_order) FROM jforum_forums";
        PreparedStatement p = connection.prepareStatement(forumGetMaxOrder);
        ResultSet rs = p.executeQuery();
        int order = 1;
        if (rs.next()) {
            order = rs.getInt(1) + 1;
        }

        rs.close();
        p.close();
        //if (logger.isInfoEnabled()) logger.info("forum order : "+ order);
        String forumAddNew = null;

        if (sqlService.getVendor().equals("oracle")) {
            forumAddNew = "INSERT INTO jforum_forums (forum_id, categories_id, forum_name, "
                    + "forum_desc, forum_order, forum_type, forum_access_type, forum_grade_type, start_date, end_date, lock_end_date) "
                    + "VALUES (jforum_forums_seq.nextval, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
            p = connection.prepareStatement(forumAddNew);
            p.setInt(1, catgId);
            p.setString(2, forumName);
            p.setString(3, forumDescription);
            p.setInt(4, order);
            p.setInt(5, forumType);
            p.setInt(6, accessType);
            if (gradeType == GRADE_BY_FORUM || gradeType == GRADE_BY_TOPIC)
                p.setInt(7, gradeType);
            else
                p.setInt(7, GRADE_DISABLED);

            if (startDate == null) {
                p.setTimestamp(8, null);
            } else {
                p.setTimestamp(8, new Timestamp(startDate.getTime()));
            }

            if (endDate == null) {
                p.setTimestamp(9, null);
                p.setInt(10, 0);
            } else {
                p.setTimestamp(9, new Timestamp(endDate.getTime()));
                p.setInt(10, lockEndDate);
            }

            p.executeUpdate();

            p.close();

            String forumLastGeneratedForumId = "SELECT jforum_forums_seq.currval FROM DUAL";
            p = connection.prepareStatement(forumLastGeneratedForumId);
            rs = p.executeQuery();

            if (rs.next()) {
                forumId = rs.getInt(1);
            }

            rs.close();
            p.close();
        } else if (sqlService.getVendor().equalsIgnoreCase("mysql")) {
            forumAddNew = "INSERT INTO jforum_forums (categories_id, forum_name, forum_desc, "
                    + "forum_order, forum_type, forum_access_type, forum_grade_type, start_date, end_date, lock_end_date) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
            p = connection.prepareStatement(forumAddNew, Statement.RETURN_GENERATED_KEYS);
            p.setInt(1, catgId);
            p.setString(2, forumName);
            p.setString(3, forumDescription);
            p.setInt(4, order);
            p.setInt(5, forumType);
            p.setInt(6, accessType);
            if (gradeType == GRADE_BY_FORUM || gradeType == GRADE_BY_TOPIC)
                p.setInt(7, gradeType);
            else
                p.setInt(7, GRADE_DISABLED);

            if (startDate == null) {
                p.setTimestamp(8, null);
            } else {
                p.setTimestamp(8, new Timestamp(startDate.getTime()));
            }

            if (endDate == null) {
                p.setTimestamp(9, null);
                p.setInt(10, 0);
            } else {
                p.setTimestamp(9, new Timestamp(endDate.getTime()));
                p.setInt(10, lockEndDate);
            }

            p.executeUpdate();

            rs = p.getGeneratedKeys();
            if (rs.next()) {
                forumId = rs.getInt(1);

                rs.close();
                p.close();
            }
        }

        if (logger.isDebugEnabled())
            logger.debug("Exiting createForum......");
    } catch (SQLException e) {
        if (logger.isErrorEnabled())
            logger.error("createForum():Error while creating forum : " + e.toString());
        e.printStackTrace();
        throw e;
    }
    return forumId;
}

From source file:org.apache.sqoop.repository.derby.DerbyRepositoryHandler.java

/**
 * Pre-register HDFS Connector so that config upgrade will work.
 * NOTE: This should be used only in the upgrade path
 *///from  w w w.ja  v  a 2s  . co m
@Deprecated
protected long registerHdfsConnector(Connection conn) {
    if (LOG.isTraceEnabled()) {
        LOG.trace("Begin HDFS Connector pre-loading.");
    }

    List<URL> connectorConfigs = ConnectorManagerUtils.getConnectorConfigs();

    if (LOG.isInfoEnabled()) {
        LOG.info("Connector configs: " + connectorConfigs);
    }

    ConnectorHandler handler = null;
    for (URL url : connectorConfigs) {
        handler = new ConnectorHandler(url);

        if (handler.getConnectorConfigurable().getPersistenceId() != -1) {
            return handler.getConnectorConfigurable().getPersistenceId();
        }
        PreparedStatement baseConnectorStmt = null;
        if (handler.getUniqueName().equals(CONNECTOR_HDFS)) {
            try {
                baseConnectorStmt = conn.prepareStatement(
                        STMT_INSERT_INTO_CONNECTOR_WITHOUT_SUPPORTED_DIRECTIONS,
                        Statement.RETURN_GENERATED_KEYS);
                baseConnectorStmt.setString(1, handler.getConnectorConfigurable().getUniqueName());
                baseConnectorStmt.setString(2, handler.getConnectorConfigurable().getClassName());
                baseConnectorStmt.setString(3, "0");
                if (baseConnectorStmt.executeUpdate() == 1) {
                    ResultSet rsetConnectorId = baseConnectorStmt.getGeneratedKeys();
                    if (rsetConnectorId.next()) {
                        if (LOG.isInfoEnabled()) {
                            LOG.info("HDFS Connector pre-loaded: " + rsetConnectorId.getLong(1));
                        }
                        return rsetConnectorId.getLong(1);
                    }
                }
            } catch (SQLException e) {
                throw new SqoopException(DerbyRepoError.DERBYREPO_0013);
            } finally {
                closeStatements(baseConnectorStmt);
            }

            break;
        }
    }

    return -1L;
}

From source file:org.dcache.chimera.FsSqlDriver.java

/**
 *
 *  creates a new id for a tag and stores it into t_tags_inodes table.
 *
 * @param uid/*  w w  w.j  a  va  2  s .  c  om*/
 * @param gid
 * @param mode
 * @param value
 * @return
 */
long createTagInode(int uid, int gid, int mode, byte[] value) {
    final String CREATE_TAG_INODE_WITH_VALUE = "INSERT INTO t_tags_inodes (imode, inlink, iuid, igid, isize, "
            + "ictime, iatime, imtime, ivalue) VALUES (?,1,?,?,?,?,?,?,?)";

    Timestamp now = new Timestamp(System.currentTimeMillis());
    KeyHolder keyHolder = new GeneratedKeyHolder();
    int rc = _jdbc.update(con -> {
        PreparedStatement ps = con.prepareStatement(CREATE_TAG_INODE_WITH_VALUE,
                Statement.RETURN_GENERATED_KEYS);
        ps.setInt(1, mode | UnixPermission.S_IFREG);
        ps.setInt(2, uid);
        ps.setInt(3, gid);
        ps.setLong(4, value.length);
        ps.setTimestamp(5, now);
        ps.setTimestamp(6, now);
        ps.setTimestamp(7, now);
        ps.setBinaryStream(8, new ByteArrayInputStream(value), value.length);
        return ps;
    }, keyHolder);
    if (rc != 1) {
        throw new JdbcUpdateAffectedIncorrectNumberOfRowsException(CREATE_TAG_INODE_WITH_VALUE, 1, rc);
    }
    return (Long) keyHolder.getKeys().get("itagid");
}

From source file:org.apache.sqoop.repository.derby.DerbyRepositoryHandler.java

/**
 * Create an HDFS connection ( used only in version 2).
 * Intended to be used when moving HDFS connector out of the sqoop driver
 * to its own connector.//from www.  j  a v  a2s  . com
 *
 * NOTE: Should be used only in the upgrade path!
 */
@Deprecated
private Long createHdfsConnection(Connection conn, Long connectorId) {
    if (LOG.isTraceEnabled()) {
        LOG.trace("Creating HDFS link.");
    }

    PreparedStatement stmt = null;
    int result;
    try {
        stmt = conn.prepareStatement(STMT_INSERT_CONNECTION, Statement.RETURN_GENERATED_KEYS);
        stmt.setString(1, CONNECTOR_HDFS);
        stmt.setLong(2, connectorId);
        stmt.setBoolean(3, true);
        stmt.setNull(4, Types.VARCHAR);
        stmt.setTimestamp(5, new Timestamp(System.currentTimeMillis()));
        stmt.setNull(6, Types.VARCHAR);
        stmt.setTimestamp(7, new Timestamp(System.currentTimeMillis()));

        result = stmt.executeUpdate();
        if (result != 1) {
            throw new SqoopException(DerbyRepoError.DERBYREPO_0003, Integer.toString(result));
        }
        ResultSet rsetConnectionId = stmt.getGeneratedKeys();

        if (!rsetConnectionId.next()) {
            throw new SqoopException(DerbyRepoError.DERBYREPO_0004);
        }

        if (LOG.isTraceEnabled()) {
            LOG.trace("Created HDFS connection.");
        }

        return rsetConnectionId.getLong(1);
    } catch (SQLException ex) {
        throw new SqoopException(DerbyRepoError.DERBYREPO_0005, ex);
    } finally {
        closeStatements(stmt);
    }
}

From source file:org.skfiy.typhon.spi.pvp.PvpProvider.java

private int saveWarReport(WarReport warReport) {
    Connection conn = null;//www  . j  a va  2 s . co m
    PreparedStatement ps = null;
    ResultSet rs = null;
    int id = 0;

    try {
        conn = connectionProvider.getConnection();
        ps = conn.prepareStatement("insert into t_pvp_report(data,creationTime) values(?,?)",
                Statement.RETURN_GENERATED_KEYS);
        ps.setString(1, JSON.toJSONString(warReport));
        ps.setLong(2, System.currentTimeMillis());

        if (ps.executeUpdate() > 0) {
            rs = ps.getGeneratedKeys();
            rs.next();
            id = rs.getInt(1);
        }
        DbUtils.commitQuietly(conn);

    } catch (SQLException e) {
        DbUtils.rollbackQuietly(conn);
        throw new DbException(e);
    } finally {
        DbUtils.closeQuietly(conn, ps, rs);
    }
    return id;
}

From source file:org.openconcerto.sql.model.SQLDataSource.java

/**
 * Execute la requete avec le statement pass. Attention cette mthode ne peut fermer le
 * statement car elle retourne directement le resultSet.
 * /*  w  w  w  . ja  v  a 2  s .  c  om*/
 * @param query le requte  excuter.
 * @param stmt le statement.
 * @return le rsultat de la requte, should never be null according to the spec but Derby don't
 *         care.
 * @throws SQLException si erreur lors de l'excution de la requte.
 */
private ResultSet execute(String query, Statement stmt) throws SQLException, RTInterruptedException {
    // System.err.println("\n" + count + "*** " + query + "\n");

    if (State.DEBUG)
        State.INSTANCE.beginRequest(query);

    // test before calling JDBC methods and creating threads
    boolean interrupted = false;
    if (QUERY_TUNING > 0) {
        try {
            Thread.sleep(QUERY_TUNING);
        } catch (InterruptedException e1) {
            interrupted = true;
        }
    } else {
        interrupted = Thread.currentThread().isInterrupted();
    }
    if (interrupted) {
        throw new RTInterruptedException("request interrupted : " + query);
    }

    final long t1 = System.currentTimeMillis();
    ResultSet rs = null;
    try {
        // MAYBE un truc un peu plus formel
        if (query.startsWith("INSERT") || query.startsWith("UPDATE") || query.startsWith("DELETE")
                || query.startsWith("CREATE") || query.startsWith("ALTER") || query.startsWith("DROP")
                || query.startsWith("SET")) {
            // MS SQL doesn't support UPDATE
            final boolean returnGenK = query.startsWith("INSERT")
                    && stmt.getConnection().getMetaData().supportsGetGeneratedKeys();
            stmt.executeUpdate(query,
                    returnGenK ? Statement.RETURN_GENERATED_KEYS : Statement.NO_GENERATED_KEYS);
            rs = returnGenK ? stmt.getGeneratedKeys() : null;
        } else {
            // TODO en faire qu'un seul par Connection
            final ExecutorThread thr = new ExecutorThread(stmt, query);
            // on lance l'excution
            thr.start();
            // et on attend soit qu'elle finisse soit qu'on soit interrompu
            try {
                rs = thr.getRs();
            } catch (InterruptedException e) {
                thr.stopQuery();
                throw new InterruptedQuery("request interrupted : " + query, e, thr);
            }
        }
    } finally {
        if (State.DEBUG)
            State.INSTANCE.endRequest(query);
    }
    long t2 = System.currentTimeMillis();
    // obviously very long queries tend to last longer, that's normal so don't warn
    if (t2 - t1 > 1000 && query.length() < 1000) {
        System.err.println("Warning:" + (t2 - t1) + "ms pour :" + query);
    }

    count++;
    return rs;
}

From source file:fi.helsinki.cs.iot.kahvihub.database.sqliteJdbc.IotHubDatabaseSqliteJDBCImpl.java

@Override
public FeedEntry addFeedEntry(Feed feed, JSONObject data) {
    if (feed == null) {
        Log.e(TAG, "Cannot add entry from a NULL feed");
        return null;
    }/*from   w  w w.ja va2  s.  co  m*/
    FeedEntry entry = null;
    try {
        checkOpenness();
        final String query = "INSERT INTO " + IotHubDataHandler.TABLE_FEED_ENTRY + " ( "
                + IotHubDataHandler.KEY_FEED_ENTRY_FEED_ID + "," + IotHubDataHandler.KEY_FEED_ENTRY_TIMESTAMP
                + "," + IotHubDataHandler.KEY_FEED_ENTRY_DATA + ") VALUES (?,?,?)";
        PreparedStatement ps = connection.prepareStatement(query, Statement.RETURN_GENERATED_KEYS);
        ps.setLong(1, feed.getId());
        ps.setLong(2, new Date().getTime());
        ps.setString(3, data.toString());
        ps.executeUpdate();
        ResultSet genKeysFeed = ps.getGeneratedKeys();
        if (genKeysFeed.next()) {
            long insertIdEntry = genKeysFeed.getLong(1);
            entry = getFeedEntry(insertIdEntry);
        }
        genKeysFeed.close();
        ps.close();
    } catch (SQLException | IotHubDatabaseException e) {
        e.printStackTrace();
        return null;
    }
    return entry;
}

From source file:org.apache.sqoop.repository.derby.DerbyRepositoryHandler.java

/**
 * We are creating the LINK FORM for HDFS and later it the schema will
 * be renamed to LINK CONFIG// w w w  . j av  a 2  s  . co m
 * NOTE: Should be used only in the upgrade path!
 */
@Deprecated
private Long createHdfsLinkForm(Connection conn, Long connectorId) {
    if (LOG.isTraceEnabled()) {
        LOG.trace("Creating HDFS link.");
    }

    PreparedStatement stmt = null;
    int result;
    try {
        short index = 0;
        stmt = conn.prepareStatement(STMT_INSERT_INTO_FORM, Statement.RETURN_GENERATED_KEYS);
        stmt.setLong(1, connectorId);
        stmt.setString(2, "linkConfig");
        // it could also be set to the deprecated "CONNECTION"
        stmt.setString(3, MConfigType.LINK.name());
        stmt.setShort(4, index);
        result = stmt.executeUpdate();
        if (result != 1) {
            throw new SqoopException(DerbyRepoError.DERBYREPO_0012, Integer.toString(result));
        }
        ResultSet rsetFormId = stmt.getGeneratedKeys();

        if (!rsetFormId.next()) {
            throw new SqoopException(DerbyRepoError.DERBYREPO_0013);
        }

        if (LOG.isTraceEnabled()) {
            LOG.trace("Created HDFS connector link FORM.");
        }
        return rsetFormId.getLong(1);
    } catch (SQLException ex) {
        throw new SqoopException(DerbyRepoError.DERBYREPO_0019, ex);
    } finally {
        closeStatements(stmt);
    }
}

From source file:org.apache.sqoop.repository.common.CommonRepositoryHandler.java

private long insertAndGetConnectorId(MConnector mc, Connection conn) {
    PreparedStatement baseConnectorStmt = null;
    try {//  w ww .  j a va2  s  . c om
        baseConnectorStmt = conn.prepareStatement(crudQueries.getStmtInsertIntoConfigurable(),
                Statement.RETURN_GENERATED_KEYS);
        baseConnectorStmt.setString(1, mc.getUniqueName());
        baseConnectorStmt.setString(2, mc.getClassName());
        baseConnectorStmt.setString(3, mc.getVersion());
        baseConnectorStmt.setString(4, mc.getType().name());

        int baseConnectorCount = baseConnectorStmt.executeUpdate();
        if (baseConnectorCount != 1) {
            throw new SqoopException(CommonRepositoryError.COMMON_0009, Integer.toString(baseConnectorCount));
        }

        ResultSet rsetConnectorId = baseConnectorStmt.getGeneratedKeys();

        if (!rsetConnectorId.next()) {
            throw new SqoopException(CommonRepositoryError.COMMON_0010);
        }
        // connector configurable also have directions
        insertConnectorDirections(rsetConnectorId.getLong(1), mc.getSupportedDirections(), conn);
        return rsetConnectorId.getLong(1);
    } catch (SQLException ex) {
        throw new SqoopException(CommonRepositoryError.COMMON_0011, mc.toString(), ex);
    } finally {
        closeStatements(baseConnectorStmt);
    }
}

From source file:edu.ku.brc.specify.conversion.AgentConverter.java

/**
 * @param newDBConnArg//  w w w .  j  a  v  a2s .  co  m
 * @param oldId
 * @param newId
 * @throws SQLException 
 */
protected void duplicateAddress(final Connection newDBConnArg, final Integer oldAddrId, final Integer newAddrId,
        final Integer newAgentId) throws SQLException {
    log.info(String.format("Duplicating oldAddrId[%d]    newAddrId[%d] to newAgentId[%d]", oldAddrId, newAddrId,
            newAgentId));

    String addFieldNames = DisciplineDuplicator.getFieldNameList(newDBConn, "address");
    String insertSQL = String.format("INSERT INTO address (%s) (SELECT %s FROM address WHERE AddressID = %d)",
            addFieldNames, addFieldNames, newAddrId);
    try {
        Statement updateStatement = newDBConnArg.createStatement();
        updateStatement.executeUpdate(insertSQL, Statement.RETURN_GENERATED_KEYS);

        int insertedAddrID = BasicSQLUtils.getInsertedId(updateStatement);
        String sql = String.format("UPDATE address SET AgentID=%d WHERE AddressID = %d", newAgentId,
                insertedAddrID);
        updateStatement.executeUpdate(sql);
        updateStatement.close();

    } catch (Exception ex) {
        ex.printStackTrace();
    }
}