Example usage for java.sql Connection rollback

List of usage examples for java.sql Connection rollback

Introduction

In this page you can find the example usage for java.sql Connection rollback.

Prototype

void rollback() throws SQLException;

Source Link

Document

Undoes all changes made in the current transaction and releases any database locks currently held by this Connection object.

Usage

From source file:edu.clemson.cs.nestbed.server.adaptation.sql.ProgramSqlAdapter.java

public Program createNewProgram(int projectID, String name, String description) throws AdaptationException {
    Program program = null;//from  w ww  .  jav a2 s.c  o  m
    Connection connection = null;
    Statement statement = null;
    ResultSet resultSet = null;

    try {
        connection = DriverManager.getConnection(CONN_STR);
        connection.setAutoCommit(false);
        statement = connection.createStatement();

        String query = "INSERT INTO Programs(projectID, name, " + "description, sourcePath) VALUES ( "
                + projectID + ", " + "'" + name + "', " + "'" + description + "', " + "'" + "[unknown]" + "')";

        log.debug("SQL Query:\n" + query);
        statement.executeUpdate(query);

        query = "SELECT * FROM Programs WHERE " + " projectID   =  " + projectID + "  AND " + " name        = '"
                + name + "' AND " + " description = '" + description + "'";

        resultSet = statement.executeQuery(query);

        if (!resultSet.next()) {
            connection.rollback();
            String msg = "Attempt to create program failed";
            log.error(msg);
            throw new AdaptationException(msg);
        }

        program = getProgram(resultSet);
        connection.commit();
    } catch (SQLException ex) {
        try {
            connection.rollback();
        } catch (Exception e) {
        }

        String msg = "SQLException in createNewProgram";
        log.error(msg, ex);
        throw new AdaptationException(msg, ex);
    } finally {
        try {
            resultSet.close();
        } catch (Exception ex) {
        }
        try {
            statement.close();
        } catch (Exception ex) {
        }
        try {
            connection.close();
        } catch (Exception ex) {
        }
    }

    return program;
}

From source file:net.codjo.dataprocess.server.treatmenthelper.TreatmentHelper.java

public static void initRepository(Connection con, List<RepositoryDescriptor> repositoryDescList)
        throws Exception {
    try {//from   www . j av a  2s.  c o  m
        con.setAutoCommit(false);

        LOG.info("Ajout des rfrentiels de traitement suivants :");
        for (RepositoryDescriptor repositoryDesc : repositoryDescList) {
            deleteRepository(con, repositoryDesc.getRepositoryId());
            insertAllRepositoryContent(con, repositoryDesc.getRepositoryId(),
                    repositoryDesc.getRepositoryName(), repositoryDesc.getRepositoryPath());
            insertRepository(con, repositoryDesc.getRepositoryId(), repositoryDesc.getRepositoryName());
        }
        List<TreatmentFragment> treatmentFragmentList = checkIntegrityRepositoryContent(con);
        if (!treatmentFragmentList.isEmpty()) {
            String message = " est trop long ! : ";
            int maxLength = maxLengthTreatmentId(treatmentFragmentList) + message.length() + LENGTH;
            StringBuilder errorMessage = new StringBuilder();
            errorMessage.append("\n").append(StringUtils.repeat("#", maxLength));
            errorMessage.append("\n").append(StringUtils.repeat("+", maxLength));
            for (TreatmentFragment treatmentFragment : treatmentFragmentList) {
                errorMessage.append("\n").append(treatmentFragment.getTreatmentId()).append(message)
                        .append(treatmentFragment.getContentFragment());
            }
            errorMessage.append("\n").append(StringUtils.repeat("+", maxLength));
            errorMessage.append("\n").append(StringUtils.repeat("#", maxLength));
            throw new TreatmentException(errorMessage.toString());
        } else {
            con.commit();
            LOG.info("Ajout termin avec succs !");
        }
    } catch (Exception ex) {
        con.rollback();
        LOG.error("\nErreur durant l'ajout des rfrentiels de traitement.\n!!! Rollback effectu !!!\n", ex);
        throw ex;
    } finally {
        con.setAutoCommit(true);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler.java

/**
 * This will remove an entry from the queue after
 * it has been compacted./*from   ww  w  .  j  ava 2s.c o m*/
 * @param info info on the compaction entry to remove
 */
public void markCleaned(CompactionInfo info) throws MetaException {
    try {
        Connection dbConn = null;
        Statement stmt = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
            stmt = dbConn.createStatement();
            String s = "delete from COMPACTION_QUEUE where cq_id = " + info.id;
            LOG.debug("Going to execute update <" + s + ">");
            if (stmt.executeUpdate(s) != 1) {
                LOG.error("Unable to delete compaction record");
                LOG.debug("Going to rollback");
                dbConn.rollback();
            }

            // Remove entries from completed_txn_components as well, so we don't start looking there
            // again.
            s = "delete from COMPLETED_TXN_COMPONENTS where ctc_database = '" + info.dbname + "' and "
                    + "ctc_table = '" + info.tableName + "'";
            if (info.partName != null) {
                s += " and ctc_partition = '" + info.partName + "'";
            }
            LOG.debug("Going to execute update <" + s + ">");
            if (stmt.executeUpdate(s) < 1) {
                LOG.error("Expected to remove at least one row from completed_txn_components when "
                        + "marking compaction entry as clean!");
            }

            s = "select txn_id from TXNS, TXN_COMPONENTS where txn_id = tc_txnid and txn_state = '"
                    + TXN_ABORTED + "' and tc_database = '" + info.dbname + "' and tc_table = '"
                    + info.tableName + "'";
            if (info.partName != null)
                s += " and tc_partition = '" + info.partName + "'";
            LOG.debug("Going to execute update <" + s + ">");
            ResultSet rs = stmt.executeQuery(s);
            Set<Long> txnids = new HashSet<Long>();
            while (rs.next())
                txnids.add(rs.getLong(1));
            if (txnids.size() > 0) {

                // Remove entries from txn_components, as there may be aborted txn components
                StringBuilder buf = new StringBuilder();
                buf.append("delete from TXN_COMPONENTS where tc_txnid in (");
                boolean first = true;
                for (long id : txnids) {
                    if (first)
                        first = false;
                    else
                        buf.append(", ");
                    buf.append(id);
                }

                buf.append(") and tc_database = '");
                buf.append(info.dbname);
                buf.append("' and tc_table = '");
                buf.append(info.tableName);
                buf.append("'");
                if (info.partName != null) {
                    buf.append(" and tc_partition = '");
                    buf.append(info.partName);
                    buf.append("'");
                }
                LOG.debug("Going to execute update <" + buf.toString() + ">");
                int rc = stmt.executeUpdate(buf.toString());
                LOG.debug("Removed " + rc + " records from txn_components");

                // Don't bother cleaning from the txns table.  A separate call will do that.  We don't
                // know here which txns still have components from other tables or partitions in the
                // table, so we don't know which ones we can and cannot clean.
            }

            LOG.debug("Going to commit");
            dbConn.commit();
        } catch (SQLException e) {
            LOG.error("Unable to delete from compaction queue " + e.getMessage());
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "markCleaned(" + info + ")");
            throw new MetaException(
                    "Unable to connect to transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeDbConn(dbConn);
            closeStmt(stmt);
        }
    } catch (RetryException e) {
        markCleaned(info);
    }
}

From source file:com.funambol.json.coredb.dao.DBManager.java

public boolean deleteItem(String table, Map<String, String> item, String... keys) throws Exception {
    if (ds == null)
        throw new Exception("Data source is null.");

    Connection connection = null;
    Statement stmt = null;/*from ww  w  . ja  v a 2s. c om*/
    ResultSet rsltSet = null;
    ResultSetMetaData metadata = null;

    try {
        connection = ds.getConnection();
    } catch (SQLException ex) {
        release(connection, stmt, rsltSet);
        throw new Exception("An error occurred retrieving connection.", ex);
    }

    if (connection == null) {
        throw new Exception("Connection is null.");
    }

    try {
        stmt = connection.createStatement();
    } catch (SQLException ex) {
        release(connection, stmt, rsltSet);
        throw new Exception("An error occurred creating statement.", ex);
    }

    String query = prepareDeleteQuery(table, item, keys);

    try {
        int affectedRows = stmt.executeUpdate(query);
        if (affectedRows != 1) {
            connection.rollback();
            return false;
        }

        connection.commit();
        return true;
    } catch (SQLException ex) {
        release(connection, stmt, rsltSet);
        throw new Exception("An error occurred executing query [" + query + "].", ex);
    } finally {
        release(connection, stmt, rsltSet);
    }
}

From source file:com.netflix.metacat.usermetadata.mysql.MysqlUserMetadataService.java

@Override
public void deleteDefinitionMetadatas(@Nonnull final List<QualifiedName> names) {
    try {//  ww w .  ja v  a  2s. c  o  m
        final Connection conn = poolingDataSource.getConnection();
        try {
            final List<List<QualifiedName>> subLists = Lists.partition(names,
                    config.getUserMetadataMaxInClauseItems());
            for (List<QualifiedName> subNames : subLists) {
                _deleteDefinitionMetadatas(conn, subNames);
            }
            conn.commit();
        } catch (SQLException e) {
            conn.rollback();
            throw e;
        } finally {
            conn.close();
        }
    } catch (SQLException e) {
        log.error("Sql exception", e);
        throw new UserMetadataServiceException(
                String.format("Failed deleting the definition metadata for %s", names), e);
    }
}

From source file:org.ulyssis.ipp.processor.Processor.java

private void registerInitialTags() {
    Snapshot oldSnapshot = this.snapshot;
    Connection connection = null;
    try {/*w  w w. j av a2 s .  c  om*/
        connection = Database.createConnection(EnumSet.of(READ_WRITE));
        for (Team team : Config.getCurrentConfig().getTeams()) {
            for (TagId tag : team.getTags()) {
                AddTagEvent e = new AddTagEvent(Instant.EPOCH, tag, team.getTeamNb());
                e.save(connection);
                this.snapshot = e.apply(this.snapshot);
                this.snapshot.save(connection);
            }
        }
        connection.commit();
    } catch (SQLException e) {
        LOG.error("An error occurred when registering initial tags!", e);
        this.snapshot = oldSnapshot;
        try {
            if (connection != null) {
                connection.rollback();
            }
        } catch (SQLException e2) {
            LOG.error("Error in rollback after previous error", e2);
        }
    } finally {
        if (connection != null) {
            try {
                connection.close();
            } catch (SQLException e) {
                LOG.error("Error while closing connection", e);
            }
        }
    }
}

From source file:dao.ColMessageDaoDb.java

public void addColMessage(String tid, String mid, String message, String topic, String userId, String userLogin,
        String collabrumId, boolean personalBlog, String fontSize, String fontFace, String fontColor,
        String moodId, String bgColor) throws BaseDaoException {

    if (RegexStrUtil.isNull(tid) || RegexStrUtil.isNull(mid) || RegexStrUtil.isNull(userId)) {
        throw new BaseDaoException("params are null");
    }/*from  ww  w . ja v  a 2  s  . co m*/

    if (RegexStrUtil.isNull(message) && RegexStrUtil.isNull(topic)) {
        throw new BaseDaoException("message & topic are null");
    }

    /**
     *  check if this is personal blog. if not, check the permission - diaryAdmin or Organizer
     */
    if (!personalBlog) {
        if (!isOrganizer(collabrumId, userLogin, userId) && !isColMember(collabrumId, userId)) {
            throw new BaseDaoException("permission denied as this user is not a member of collabrum " + userId);
        }
    }

    /**
     *  Get scalability datasource for collmessages partitioned on tid
     */
    String sourceName = scalabilityManager.getWriteScalability(tid);
    ds = scalabilityManager.getSource(sourceName);
    if (ds == null) {
        throw new BaseDaoException("ds null, addColMessage() " + sourceName + " userId = " + userId);
    }
    Connection conn = null;
    try {
        conn = ds.getConnection();
        conn.setAutoCommit(false);
        addQuery.run(conn, tid, mid, message, topic, userId);
        addAttrQuery.run(conn, tid, "LAST_INSERT_ID()", fontSize, fontFace, fontColor, moodId, bgColor);
    } catch (Exception e) {
        try {
            conn.rollback();
        } catch (Exception e1) {
            try {
                if (conn != null) {
                    conn.setAutoCommit(true);
                    conn.close();
                }
            } catch (Exception e2) {
                throw new BaseDaoException("connection close exception", e2);
            }
            throw new BaseDaoException("error occured while rollingback entries from ColmessageDaoDb", e1);
        }
    }
    try {
        conn.commit();
    } catch (Exception e3) {
        throw new BaseDaoException("commit exception", e3);
    }
    try {
        if (conn != null) {
            conn.setAutoCommit(true);
            conn.close();
        }
    } catch (Exception e4) {
        throw new BaseDaoException("connection close exception", e4);
    }

    /**
     *  delete collabrum messages
     */
    StringBuffer sb = new StringBuffer(collabrumId);
    sb.append("-");
    sb.append(tid);
    String key = sb.toString();
    Fqn fqn = cacheUtil.fqn(DbConstants.COLMSGS);
    if (treeCache.exists(fqn, key)) {
        treeCache.remove(fqn, key);
    }

    fqn = cacheUtil.fqn(DbConstants.COLTOPIC);
    if (treeCache.exists(fqn, key)) {
        treeCache.remove(fqn, key);
    }

    fqn = cacheUtil.fqn(DbConstants.COLTOPICS);
    if (treeCache.exists(fqn, collabrumId)) {
        treeCache.remove(fqn, collabrumId);
    }

    fqn = cacheUtil.fqn(DbConstants.COLLABRUM);
    if (treeCache.exists(fqn, collabrumId)) {
        treeCache.remove(fqn, collabrumId);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler.java

/**
 * This will mark an entry in the queue as compacted
 * and put it in the ready to clean state.
 * @param info info on the compaction entry to mark as compacted.
 *//*from   w w  w .ja va2 s  .  c o  m*/
public void markCompacted(CompactionInfo info) throws MetaException {
    try {
        Connection dbConn = null;
        Statement stmt = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
            stmt = dbConn.createStatement();
            String s = "update COMPACTION_QUEUE set cq_state = '" + READY_FOR_CLEANING + "', "
                    + "cq_worker_id = null where cq_id = " + info.id;
            LOG.debug("Going to execute update <" + s + ">");
            if (stmt.executeUpdate(s) != 1) {
                LOG.error("Unable to update compaction record");
                LOG.debug("Going to rollback");
                dbConn.rollback();
            }
            LOG.debug("Going to commit");
            dbConn.commit();
        } catch (SQLException e) {
            LOG.error("Unable to update compaction queue " + e.getMessage());
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "markCompacted(" + info + ")");
            throw new MetaException(
                    "Unable to connect to transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeDbConn(dbConn);
            closeStmt(stmt);
        }
    } catch (RetryException e) {
        markCompacted(info);
    }
}

From source file:och.comp.db.base.universal.UniversalQueries.java

private List<Integer> tryUpdate(BaseUpdateOp... updates) throws SQLException {

    List<Integer> out = new ArrayList<>();
    if (isEmpty(updates))
        return out;

    Connection conn = getSingleOrNewConnection(ds);
    conn.setAutoCommit(false);/*from  w  w  w  .  jav a 2s .c o  m*/

    try {

        for (BaseUpdateOp update : updates) {
            Integer opResult;
            if (update instanceof UpdateRows) {
                opResult = updateRows((UpdateRows) update, conn);
            } else if (update instanceof CreateRow) {
                opResult = createRow((CreateRow) update, conn);
            } else if (update instanceof DeleteRows) {
                opResult = deleteRows((DeleteRows) update, conn);
            } else {
                opResult = 0;
            }
            out.add(opResult);
        }

        conn.commit();
        return out;

    } catch (SQLException e) {
        saveRealRollbackException(e);
        conn.rollback();
        throw e;

    } finally {
        try {
            conn.setAutoCommit(true);
            conn.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

From source file:eagle.storage.jdbc.entity.impl.JdbcEntityUpdaterImpl.java

@Override
public int update(List<E> entities) throws Exception {
    ConnectionManager cm = ConnectionManagerFactory.getInstance();
    TorqueStatementPeerImpl<E> peer = cm.getStatementExecutor(this.jdbcEntityDefinition.getJdbcTableName());
    Connection connection = cm.getConnection();
    connection.setAutoCommit(false);// w w w. j  a  v a  2 s .c o m

    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    int num = 0;
    try {
        for (E entity : entities) {
            String primaryKey = entity.getEncodedRowkey();
            PrimaryKeyCriteriaBuilder pkBuilder = new PrimaryKeyCriteriaBuilder(Arrays.asList(primaryKey),
                    this.jdbcEntityDefinition.getJdbcTableName());
            Criteria selectCriteria = pkBuilder.build();
            if (LOG.isDebugEnabled())
                LOG.debug("Updating by query: " + SqlBuilder.buildQuery(selectCriteria).getDisplayString());
            ColumnValues columnValues = JdbcEntitySerDeserHelper.buildColumnValues(entity,
                    this.jdbcEntityDefinition);
            num += peer.delegate().doUpdate(selectCriteria, columnValues, connection);
        }
        if (LOG.isDebugEnabled())
            LOG.debug("Committing updates");
        connection.commit();
    } catch (Exception ex) {
        LOG.error("Failed to update, rolling back", ex);
        connection.rollback();
        throw ex;
    } finally {
        stopWatch.stop();
        if (LOG.isDebugEnabled())
            LOG.debug("Closing connection");
        connection.close();
    }
    LOG.info(String.format("Updated %s records in %s ms", num, stopWatch.getTime()));
    return num;
}