Example usage for java.sql Connection rollback

List of usage examples for java.sql Connection rollback

Introduction

In this page you can find the example usage for java.sql Connection rollback.

Prototype

void rollback() throws SQLException;

Source Link

Document

Undoes all changes made in the current transaction and releases any database locks currently held by this Connection object.

Usage

From source file:com.oic.event.SetProfile.java

@Override
public void ActionEvent(JSONObject json, WebSocketListener webSocket) {
    JSONObject responseJSON = new JSONObject();
    responseJSON.put("method", "setprofile");
    if (!validation(json, webSocket)) {
        return;/*from ww w. j  a  v  a2  s  . com*/
    }
    Connection con = DatabaseConnection.getConnection();
    PreparedStatement ps;
    try {
        con = DatabaseConnection.getConnection();
        con.setAutoCommit(false);
        String sql = "UPDATE user SET studentnumber = ?, name = ?, avatarid = ?, grade = ?, sex = ?, birth = ?, comment = ? "
                + "WHERE userid = ?";
        ps = con.prepareStatement(sql);
        ps.setString(1, json.get("studentid").toString());
        ps.setString(2, json.get("username").toString());
        ps.setInt(3, Integer.parseInt(json.get("avatarid").toString()));
        ps.setInt(4, Integer.parseInt(json.get("grade").toString()));
        ps.setInt(5, Integer.parseInt(json.get("gender").toString()));
        ps.setDate(6, toDate(json.get("birthday").toString()));
        ps.setString(7, json.get("comment").toString());
        ps.setLong(8, webSocket.getCharacter().getUserId());
        ps.executeUpdate();
        ps.close();

        sql = "UPDATE setting SET privategrade = ?, privatesex = ?, privatebirth =? WHERE userid = ?";
        ps = con.prepareStatement(sql);
        ps.setInt(1, Integer.parseInt(json.get("vgrade").toString()));
        ps.setInt(2, Integer.parseInt(json.get("vgender").toString()));
        ps.setInt(3, Integer.parseInt(json.get("vbirthday").toString()));
        ps.setLong(4, webSocket.getCharacter().getUserId());

        ps.executeUpdate();
        ps.close();

        con.commit();

        //TODO 
        responseJSON.put("status", 0);
    } catch (Exception e) {
        try {
            con.rollback();
        } catch (SQLException sq) {
            LOG.warning("[setProfile]Error Rolling back.");
        }
        e.printStackTrace();
        responseJSON.put("status", 1);
    } finally {
        try {
            con.setAutoCommit(true);
        } catch (SQLException ex) {
            Logger.getLogger(SetProfile.class.getName()).log(Level.WARNING,
                    "Error going back to AutoCommit mode", ex);
        }
    }

    webSocket.sendJson(responseJSON);
}

From source file:mom.trd.opentheso.bdd.helper.RelationsHelper.java

/**
 * Cette fonction permet de supprimer une relation terme associ  un
 * concept/*from  w w w .j a va 2  s  .  c  o m*/
 *
 * @param ds
 * @param idConcept1
 * @param idThesaurus
 * @param idConcept2
 * @param idUser
 * @return boolean
 */
public boolean deleteRelationRT(HikariDataSource ds, String idConcept1, String idThesaurus, String idConcept2,
        int idUser) {

    Connection conn;
    Statement stmt;
    boolean status = false;

    try {
        // Get connection from pool
        conn = ds.getConnection();
        conn.setAutoCommit(false);
        try {
            stmt = conn.createStatement();
            try {

                if (!new RelationsHelper().addRelationHistorique(conn, idConcept1, idThesaurus, idConcept2,
                        "RT", idUser, "DEL")) {
                    conn.rollback();
                    conn.close();
                    return false;
                }
                if (!new RelationsHelper().addRelationHistorique(conn, idConcept2, idThesaurus, idConcept1,
                        "RT", idUser, "DEL")) {
                    conn.rollback();
                    conn.close();
                    return false;
                }

                String query = "delete from hierarchical_relationship" + " where id_concept1 ='" + idConcept1
                        + "'" + " and id_thesaurus = '" + idThesaurus + "'" + " and role = 'RT'"
                        + " and id_concept2 = '" + idConcept2 + "'";

                stmt.executeUpdate(query);
                query = "delete from hierarchical_relationship" + " where id_concept1 ='" + idConcept2 + "'"
                        + " and id_thesaurus = '" + idThesaurus + "'" + " and role = 'RT'"
                        + " and id_concept2 = '" + idConcept1 + "'";

                stmt.executeUpdate(query);
                status = true;
                conn.commit();
            } finally {
                stmt.close();
            }
        } finally {
            conn.close();
        }
    } catch (SQLException sqle) {
        // Log exception
        log.error("Error while deleting relation RT of Concept : " + idConcept1, sqle);
    }
    return status;
}

From source file:es.juntadeandalucia.panelGestion.negocio.utiles.JDBCConnector.java

public String getPrimaryKey(String tableName, String schemaName) throws Exception {
    Exception error = null;//from  www . j av a  2  s  .co m

    String primaryKey = null;

    Connection connection = null;

    try {
        DataSource dataSource = poolDataSources.get(schemaId);
        connection = dataSource.getConnection();
        connection.setAutoCommit(false);

        // get primary keys
        DatabaseMetaData dataBaseMetaData = connection.getMetaData();
        ResultSet rs = dataBaseMetaData.getPrimaryKeys(null, schemaName, tableName);
        if (rs.next()) {
            primaryKey = rs.getString("column_name");
        }
    } catch (SQLException e) {
        error = e;
    } finally {
        if (connection != null) {
            try {
                if (error != null) {
                    connection.rollback();
                }
            } catch (SQLException se) {
                log.warn("Se produjo un error al manejar la conexin: ".concat(se.getLocalizedMessage()));
            }
            try {
                connection.close();
            } catch (SQLException se) {
                log.warn("Se produjo un error al intentar cerrar la conexin: "
                        .concat(se.getLocalizedMessage()));
            }
        }
    }
    if (error != null) {
        throw error;
    }
    return primaryKey;
}

From source file:mom.trd.opentheso.bdd.helper.RelationsHelper.java

/**
 * Cette fonction permet de rajouter une relation terme gnrique  un
 * concept//from   w  w  w.ja v  a 2s .  co  m
 *
 * @param conn
 * @param idConceptNT
 * @param idThesaurus
 * @param idConceptBT
 * @param idUser
 * @return boolean
 */
public boolean addRelationBT(Connection conn, String idConceptNT, String idThesaurus, String idConceptBT,
        int idUser) {

    Statement stmt;
    boolean status = false;

    try {
        try {
            conn.setAutoCommit(false);
            stmt = conn.createStatement();
            try {
                if (!new RelationsHelper().addRelationHistorique(conn, idConceptNT, idThesaurus, idConceptBT,
                        "BT", idUser, "ADD")) {
                    conn.rollback();
                    conn.close();
                    return false;
                }
                if (!new RelationsHelper().addRelationHistorique(conn, idConceptBT, idThesaurus, idConceptNT,
                        "NT", idUser, "ADD")) {
                    conn.rollback();
                    conn.close();
                    return false;
                }

                String query = "Insert into hierarchical_relationship"
                        + "(id_concept1, id_thesaurus, role, id_concept2)" + " values (" + "'" + idConceptNT
                        + "'" + ",'" + idThesaurus + "'" + ",'BT'" + ",'" + idConceptBT + "')";

                stmt.executeUpdate(query);
                query = "Insert into hierarchical_relationship"
                        + "(id_concept1, id_thesaurus, role, id_concept2)" + " values (" + "'" + idConceptBT
                        + "'" + ",'" + idThesaurus + "'" + ",'NT'" + ",'" + idConceptNT + "')";
                stmt.executeUpdate(query);
                status = true;
                // conn.commit();
            } finally {
                stmt.close();
            }
        } finally {
            //         conn.close();
        }
    } catch (SQLException sqle) {
        // Log exception
        //  if (sqle.getMessage().contains("duplicate key value violates unique constraint")) {

        if (!sqle.getSQLState().equalsIgnoreCase("23505")) {
            log.error("Error while adding relation BT of Concept : " + idConceptNT, sqle);
        }
    }
    return status;
}

From source file:com.netflix.metacat.usermetadata.mysql.MySqlTagService.java

/**
 * Tags the given table with the given <code>tags</code>.
 * @param name table name/*from w w  w. ja va  2  s. co m*/
 * @param tags list of tags
 * @return return the complete list of tags associated with the table
 */
@Override
public Set<String> setTableTags(final QualifiedName name, final Set<String> tags,
        final boolean updateUserMetadata) {
    addTags(tags);
    try {
        final Connection conn = getDataSource().getConnection();
        try {
            final TagItem tagItem = findOrCreateTagItemByName(name.toString(), conn);
            Set<String> inserts = Sets.newHashSet();
            Set<String> deletes = Sets.newHashSet();
            Set<String> values = tagItem.getValues();
            if (values == null || values.isEmpty()) {
                inserts = tags;
            } else {
                inserts = Sets.difference(tags, values).immutableCopy();
                deletes = Sets.difference(values, tags).immutableCopy();
            }
            values = tags;
            if (!inserts.isEmpty()) {
                insertTagItemTags(tagItem.getId(), inserts, conn);
            }
            if (!deletes.isEmpty()) {
                removeTagItemTags(tagItem.getId(), deletes, conn);
            }
            if (updateUserMetadata) {
                // Set the tags in user metadata
                final Map<String, Set<String>> data = Maps.newHashMap();
                data.put(NAME_TAGS, values);
                userMetadataService.saveDefinitionMetadata(name, "admin",
                        Optional.of(metacatJson.toJsonObject(data)), true);
            }
            conn.commit();
        } catch (SQLException e) {
            conn.rollback();
            throw e;
        } finally {
            conn.close();
        }
    } catch (SQLException e) {
        final String message = String.format("Failed to remove tags for name %s", name);
        log.error(message, e);
        throw new UserMetadataServiceException(message, e);
    }
    return tags;
}

From source file:com.che.software.testato.domain.dao.jdbc.impl.HierarchyDAO.java

/**
 * Creates a hierarchy from a bean of criterions.
 * /* w ww  .  j  av a  2 s .  co  m*/
 * @author Clement HELIOU (clement.heliou@che-software.com).
 * @param creationBean the bean of criterions.
 * @since July, 2011.
 * @throws HierarchyCreationDAOException if an error occurs during the
 *         creation.
 */
@Override
public void createHierarchyFromBean(HierarchyCreation creationBean) throws HierarchyCreationDAOException {
    LOGGER.debug("createHierarchyFromBean().");
    Connection connection = null;
    try {
        connection = getDataSource().getConnection();
        connection.setAutoCommit(false);
        getQueryRunner().update(connection,
                "INSERT INTO hirearchy(hierarchy_id, creation_date, last_update_date, user_id, high_level_goal) VALUES(nextval('hierarchy_seq'), NOW(), NOW(), ?, ?) ",
                new Object[] { creationBean.getUser().getUserId(), creationBean.getHighLevelGoal() });
        Integer createdHierarchyId = (Integer) getQueryRunner().query(connection,
                "SELECT MAX(hierarchy_id)::int AS hierarchy_id FROM hirearchy ",
                new ScalarHandler("hierarchy_id"));
        getQueryRunner().update(connection,
                "INSERT INTO hierarchy_version(version_id, hierarchy_id) VALUES(?, ?) ",
                new Object[] { creationBean.getVersion().getVersionId(), createdHierarchyId });
        getQueryRunner().update(connection,
                "INSERT INTO action_plan(action_plan_id) VALUES(nextval('action_plan_seq')) ");
        Integer createdActionPlanId = (Integer) getQueryRunner().query(connection,
                "SELECT MAX(action_plan_id)::int AS action_plan_id FROM action_plan ",
                new ScalarHandler("action_plan_id"));
        getQueryRunner().update(connection,
                "INSERT INTO hierarchy_action_plan(hierarchy_id, action_plan_id) VALUES(?, ?) ",
                new Object[] { createdHierarchyId, createdActionPlanId });
        connection.commit();
    } catch (SQLException e) {
        try {
            connection.rollback();
        } catch (SQLException e1) {
            throw new HierarchyCreationDAOException(e1);
        }
        throw new HierarchyCreationDAOException(e);
    } finally {
        if (null != connection) {
            try {
                connection.close();
            } catch (SQLException e) {
                throw new HierarchyCreationDAOException(e);
            }
        }
    }
}

From source file:com.amazon.carbonado.repo.jdbc.JDBCRepository.java

/**
 * @param name name to give repository instance
 * @param isMaster when true, storables in this repository must manage
 * version properties and sequence properties
 * @param dataSource provides JDBC database connections
 * @param catalog optional catalog to search for tables -- actual meaning
 * is database independent/*from   w w  w  .j  a v  a  2 s  . co m*/
 * @param schema optional schema to search for tables -- actual meaning is
 * is database independent
 * @param forceStoredSequence tells the repository to use a stored sequence
 * even if the database supports native sequences
 */
@SuppressWarnings("unchecked")
JDBCRepository(AtomicReference<Repository> rootRef, String name, boolean isMaster,
        Iterable<TriggerFactory> triggerFactories, DataSource dataSource, boolean dataSourceClose,
        String catalog, String schema, Integer fetchSize, Map<String, Boolean> autoVersioningMap,
        Map<String, Boolean> suppressReloadMap, String sequenceSelectStatement, boolean forceStoredSequence,
        boolean primaryKeyCheckDisabled, SchemaResolver resolver) throws RepositoryException {
    super(name);
    if (dataSource == null) {
        throw new IllegalArgumentException("DataSource cannot be null");
    }
    mIsMaster = isMaster;
    mTriggerFactories = triggerFactories;
    mRootRef = rootRef;
    mDataSource = dataSource;
    mDataSourceClose = dataSourceClose;
    mCatalog = catalog;
    mSchema = schema;
    mFetchSize = fetchSize;
    mPrimaryKeyCheckDisabled = primaryKeyCheckDisabled;

    mAutoVersioningMap = autoVersioningMap;
    mSuppressReloadMap = suppressReloadMap;

    mResolver = resolver;

    mOpenConnections = new IdentityHashMap<Connection, Object>();
    mOpenConnectionsLock = new ReentrantLock(true);

    // Temporarily set to generic one, in case there's a problem during initialization.
    mExceptionTransformer = new JDBCExceptionTransformer();

    mTxnMgr = new JDBCTransactionManager(this);

    getLog().info("Opening repository \"" + getName() + '"');

    // Test connectivity and get some info on transaction isolation levels.
    Connection con = getConnection();
    try {
        DatabaseMetaData md = con.getMetaData();
        if (md == null || !md.supportsTransactions()) {
            throw new RepositoryException("Database does not support transactions");
        }

        mDatabaseProductName = md.getDatabaseProductName();

        boolean supportsSavepoints;
        try {
            supportsSavepoints = md.supportsSavepoints();
        } catch (AbstractMethodError e) {
            supportsSavepoints = false;
        }

        if (supportsSavepoints) {
            con.setAutoCommit(false);
            // Some JDBC drivers (HSQLDB) lie about their savepoint support.
            try {
                con.setSavepoint();
            } catch (SQLException e) {
                mLog.warn("JDBC driver for " + mDatabaseProductName + " reports supporting savepoints, but it "
                        + "doesn't appear to work: " + e);
                supportsSavepoints = false;
            } finally {
                con.rollback();
                con.setAutoCommit(true);
            }
        }

        mSupportsSavepoints = supportsSavepoints;
        mSupportsSelectForUpdate = md.supportsSelectForUpdate();
        mSupportsScrollInsensitiveReadOnly = md.supportsResultSetConcurrency(ResultSet.TYPE_SCROLL_INSENSITIVE,
                ResultSet.CONCUR_READ_ONLY);

        mJdbcDefaultIsolationLevel = md.getDefaultTransactionIsolation();
        mDefaultIsolationLevel = mapIsolationLevelFromJdbc(mJdbcDefaultIsolationLevel);

        mReadUncommittedLevel = selectIsolationLevel(md, IsolationLevel.READ_UNCOMMITTED);
        mReadCommittedLevel = selectIsolationLevel(md, IsolationLevel.READ_COMMITTED);
        mRepeatableReadLevel = selectIsolationLevel(md, IsolationLevel.REPEATABLE_READ);
        mSerializableLevel = selectIsolationLevel(md, IsolationLevel.SERIALIZABLE);
    } catch (SQLException e) {
        throw toRepositoryException(e);
    } finally {
        try {
            closeConnection(con);
        } catch (SQLException e) {
            // Don't care.
        }
    }

    mSupportStrategy = JDBCSupportStrategy.createStrategy(this);
    if (forceStoredSequence) {
        mSupportStrategy.setSequenceSelectStatement(null);
    } else if (sequenceSelectStatement != null && sequenceSelectStatement.length() > 0) {
        mSupportStrategy.setSequenceSelectStatement(sequenceSelectStatement);
    }
    mSupportStrategy.setForceStoredSequence(forceStoredSequence);
    mExceptionTransformer = mSupportStrategy.createExceptionTransformer();

    getLog().info("Opened repository \"" + getName() + '"');

    setAutoShutdownEnabled(true);
}

From source file:org.sakaiproject.genericdao.springjdbc.JdbcGenericDao.java

/**
 * Allows a developer to do a manual release of the connection,
 * this will cause a new connection to be obtained the next time a method is executed,
 * anything not yet committed for this connection will be rolled back and lost,
 * normally if you are using a DataSource which pools connections then you do not
 * need to worry about this too much, just do it at the end of your work unit
 * /*from   w  ww .ja v a 2s.c  om*/
 * @return true if the connection was closed
 */
public boolean closeConnection() {
    boolean success = false;
    try {
        Connection conn = getConnection();
        try {
            conn.rollback();
        } catch (Exception e) {
            // oh well, keep going
        }
        DataSourceUtils.doReleaseConnection(conn, getDataSource());
        success = true;
    } catch (CannotGetJdbcConnectionException e) {
        logWarn("Could not close connection sucessfully: " + e.getMessage());
    } catch (SQLException e) {
        logWarn("Could not close connection sucessfully: " + e.getMessage());
    }
    return success;
}

From source file:eagle.storage.jdbc.entity.impl.JdbcEntityWriterImpl.java

@Override
public List<String> write(List<E> entities) throws Exception {
    List<String> keys = new ArrayList<String>();
    if (LOG.isDebugEnabled())
        LOG.debug("Writing " + entities.size() + " entities");
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();/*from  w  ww . j a v  a 2s. c  o m*/
    Connection connection = ConnectionManagerFactory.getInstance().getConnection();
    // set auto commit false and commit by hands for 3x~5x better performance
    connection.setAutoCommit(false);

    try {
        TorqueStatementPeerImpl<E> peer = connectionManager
                .getStatementExecutor(this.jdbcEntityDefinition.getJdbcTableName());
        for (E entity : entities) {
            entity.setEncodedRowkey(peer.getPrimaryKeyBuilder().build(entity));
            ColumnValues columnValues = JdbcEntitySerDeserHelper.buildColumnValues(entity,
                    this.jdbcEntityDefinition);

            // TODO: implement batch insert for better performance
            ObjectKey key = peer.delegate().doInsert(columnValues, connection);

            try {
                if (key != null) {
                    keys.add((String) key.getValue());
                } else {
                    keys.add(entity.getEncodedRowkey());
                }
            } catch (ClassCastException ex) {
                throw new RuntimeException(
                        "Key is not in type of String (VARCHAR) , but JdbcType (java.sql.Types): "
                                + key.getJdbcType() + ", value: " + key.getValue(),
                        ex);
            }
        }

        // Why not commit in finally: give up all if any single entity throws exception to make sure consistency guarantee
        if (LOG.isDebugEnabled()) {
            LOG.debug("Committing writing");
        }
        connection.commit();
    } catch (Exception ex) {
        LOG.error("Failed to write records, rolling back", ex);
        connection.rollback();
        throw ex;
    } finally {
        stopWatch.stop();
        if (LOG.isDebugEnabled())
            LOG.debug("Closing connection");
        connection.close();
    }

    LOG.info(String.format("Wrote %s records in %s ms (table: %s)", keys.size(), stopWatch.getTime(),
            this.jdbcEntityDefinition.getJdbcTableName()));
    return keys;
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnDbUtil.java

public static void prepDb() throws Exception {
    // This is a bogus hack because it copies the contents of the SQL file
    // intended for creating derby databases, and thus will inexorably get
    // out of date with it.  I'm open to any suggestions on how to make this
    // read the file in a build friendly way.

    Connection conn = null;
    Statement stmt = null;/*from w  w  w .  ja  v  a  2 s .c o m*/
    try {
        conn = getConnection();
        stmt = conn.createStatement();
        stmt.execute("CREATE TABLE TXNS (" + "  TXN_ID bigint PRIMARY KEY," + "  TXN_STATE char(1) NOT NULL,"
                + "  TXN_STARTED bigint NOT NULL," + "  TXN_LAST_HEARTBEAT bigint NOT NULL,"
                + "  TXN_USER varchar(128) NOT NULL," + "  TXN_HOST varchar(128) NOT NULL)");

        stmt.execute("CREATE TABLE TXN_COMPONENTS (" + "  TC_TXNID bigint REFERENCES TXNS (TXN_ID),"
                + "  TC_DATABASE varchar(128) NOT NULL," + "  TC_TABLE varchar(128),"
                + "  TC_PARTITION varchar(767))");
        stmt.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" + "  CTC_TXNID bigint,"
                + "  CTC_DATABASE varchar(128) NOT NULL," + "  CTC_TABLE varchar(128),"
                + "  CTC_PARTITION varchar(767))");
        stmt.execute("CREATE TABLE NEXT_TXN_ID (" + "  NTXN_NEXT bigint NOT NULL)");
        stmt.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
        stmt.execute("CREATE TABLE HIVE_LOCKS (" + " HL_LOCK_EXT_ID bigint NOT NULL,"
                + " HL_LOCK_INT_ID bigint NOT NULL," + " HL_TXNID bigint," + " HL_DB varchar(128) NOT NULL,"
                + " HL_TABLE varchar(128)," + " HL_PARTITION varchar(767)," + " HL_LOCK_STATE char(1) NOT NULL,"
                + " HL_LOCK_TYPE char(1) NOT NULL," + " HL_LAST_HEARTBEAT bigint NOT NULL,"
                + " HL_ACQUIRED_AT bigint," + " HL_USER varchar(128) NOT NULL,"
                + " HL_HOST varchar(128) NOT NULL," + " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
        stmt.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");

        stmt.execute("CREATE TABLE NEXT_LOCK_ID (" + " NL_NEXT bigint NOT NULL)");
        stmt.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");

        stmt.execute("CREATE TABLE COMPACTION_QUEUE (" + " CQ_ID bigint PRIMARY KEY,"
                + " CQ_DATABASE varchar(128) NOT NULL," + " CQ_TABLE varchar(128) NOT NULL,"
                + " CQ_PARTITION varchar(767)," + " CQ_STATE char(1) NOT NULL," + " CQ_TYPE char(1) NOT NULL,"
                + " CQ_WORKER_ID varchar(128)," + " CQ_START bigint," + " CQ_RUN_AS varchar(128))");

        stmt.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
        stmt.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");

        conn.commit();
    } catch (SQLException e) {
        // This might be a deadlock, if so, let's retry
        conn.rollback();
        if (e instanceof SQLTransactionRollbackException && deadlockCnt++ < 5) {
            LOG.warn("Caught deadlock, retrying db creation");
            prepDb();
        } else {
            throw e;
        }
    } finally {
        deadlockCnt = 0;
        closeResources(conn, stmt, null);
    }
}