Example usage for java.sql Connection rollback

List of usage examples for java.sql Connection rollback

Introduction

In this page you can find the example usage for java.sql Connection rollback.

Prototype

void rollback() throws SQLException;

Source Link

Document

Undoes all changes made in the current transaction and releases any database locks currently held by this Connection object.

Usage

From source file:com.moss.schematrax.SchemaUpdater.java

public synchronized void createSchema(DatabaseType databaseType, Connection sqlConnection, String schema,
        String schemaVersion) throws SchematraxException {

    try {/*  w  w w . j a  v a  2s  .  c  o  m*/
        /*
         * CREATE THE SCHEMA_UPDATES TABLE
         */
        if (manageTransactions)
            sqlConnection.setAutoCommit(false);
        boolean updatesTableExists = updatesTableExists(databaseType, sqlConnection, schema);

        if (!updatesTableExists) {
            Statement statement = sqlConnection.createStatement();
            String timestampDataType = "TIMESTAMP";

            if (databaseType.equals(DatabaseType.DB_TYPE_SQL_SERVER)) {
                timestampDataType = "DATETIME";
            }

            String createUpdatesTableStatementText = "create table " + schema
                    + ".SCHEMA_UPDATES (id varchar(255), dateApplied " + timestampDataType + ")";
            statement.executeUpdate(createUpdatesTableStatementText);
            if (manageTransactions)
                sqlConnection.commit();
        }

        /*
         * FIND AND APPLY THE CREATION SCRIPT
         */
        SchemaVersion priorVersion = schemaData.getVersion(schemaVersion);
        SchemaUpdate creationScript = null;

        while (creationScript == null & priorVersion != null) {
            try {
                SchemaUpdate versionCreateScript = priorVersion.getCreationScript();
                if (versionCreateScript != null) {
                    /* 
                     * This will throw an exception if this doesn't exist for our 
                     * current platform (which we can ignore since there may be an 
                     * earlier script we can use as a starting point)
                     */
                    versionCreateScript.getUpdateText(databaseType);
                    creationScript = versionCreateScript;
                }
            } catch (DatabaseTypeNotSupportedException e) {
            }

            priorVersion = priorVersion.getPriorVersion();
        }
        if (creationScript == null) {
            throw new MissingElementException("Could not find a schema creation script");
        }

        log.info("Executing creation script");
        applyUpdateNoCommit(sqlConnection, databaseType, schema, creationScript);
        recordUpdate(sqlConnection, schema, creationScript);

        /*
         * HANDLE CONSOLIDATED UPDATES
         */
        List<SchemaUpdate> consolidatedUpdates = getUpdatesToVersion(priorVersion);
        List<String> consolidatedUpdateIds = new ArrayList<String>();
        for (SchemaUpdate update : consolidatedUpdates) {
            consolidatedUpdateIds.add(update.getId());
            log.info("Marking consolidated update \"" + update.getId() + "\" as applied.");
            recordUpdate(sqlConnection, schema, update); // we record these because they are rolled-into the creation script
        }

        //          Apply injections
        for (Injector injector : injectors) {
            String description = "after update '" + creationScript.getId() + "', injected by "
                    + Injector.class.getSimpleName() + " " + injector.getClass().getName();
            doInjections(injector.forVersion(creationScript.getId(), consolidatedUpdateIds), databaseType,
                    sqlConnection, schema, description);
        }

        // now run the normal schema update routine to get us up to the requested version as needed
        updateSchema(databaseType, sqlConnection, schema, schemaVersion);

        if (manageTransactions)
            sqlConnection.commit();

    } catch (Exception e) {
        log.info("There was an error", e);
        try {
            if (manageTransactions)
                sqlConnection.rollback();
            throw new SchematraxException(e);
        } catch (SQLException e1) {
            log.info("There was an error rolling back the transaction", e1);
            throw new SchematraxException(e1);
        }
    }

}

From source file:hoot.services.db.DbUtils.java

public static void batchRecordsDirectWays(final long mapId, final List<?> records,
        final RecordBatchType recordBatchType, Connection conn, int maxRecordBatchSize) throws Exception {
    PreparedStatement ps = null;// ww w.ja v a2  s .c  o m
    try {
        String sql = null;
        long execResult = -1;
        //conn.setAutoCommit(false);
        int count = 0;

        switch (recordBatchType) {
        case INSERT:

            sql = "insert into current_ways_" + mapId
                    + " (id, changeset_id, \"timestamp\", visible, version, tags) "
                    + "values (?, ?, ?, ?, ?, ?)";

            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentWays way = (CurrentWays) o;

                ps.setLong(1, way.getId());
                ps.setLong(2, way.getChangesetId());
                ps.setTimestamp(3, way.getTimestamp());
                ps.setBoolean(4, way.getVisible());
                ps.setLong(5, way.getVersion());

                Map<String, String> tags = (Map<String, String>) way.getTags();

                String hstoreStr = "";
                Iterator it = tags.entrySet().iterator();
                while (it.hasNext()) {
                    Map.Entry pairs = (Map.Entry) it.next();
                    if (hstoreStr.length() > 0) {
                        hstoreStr += ",";
                    }
                    hstoreStr += "\"" + pairs.getKey() + "\"=>\"" + pairs.getValue() + "\"";
                }
                ps.setObject(6, hstoreStr, Types.OTHER);
                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                    }
                }

            }

            break;

        case UPDATE:

            sql = "update current_ways_" + mapId
                    + " set changeset_id=?, visible=?, \"timestamp\"=?, version=?, tags=? " + "where id=?";
            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentWays way = (CurrentWays) o;

                ps.setLong(1, way.getChangesetId());
                ps.setBoolean(2, way.getVisible());
                ps.setTimestamp(3, way.getTimestamp());
                ps.setLong(4, way.getVersion());

                Map<String, String> tags = (Map<String, String>) way.getTags();

                String hstoreStr = "";
                Iterator it = tags.entrySet().iterator();
                while (it.hasNext()) {
                    Map.Entry pairs = (Map.Entry) it.next();
                    if (hstoreStr.length() > 0) {
                        hstoreStr += ",";
                    }
                    hstoreStr += "\"" + pairs.getKey() + "\"=>\"" + pairs.getValue() + "\"";
                }
                ps.setObject(5, hstoreStr, Types.OTHER);

                ps.setLong(6, way.getId());

                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                    }
                }
            }

            break;

        case DELETE:

            sql = "delete from current_ways_" + mapId + " where id=?";
            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentWays way = (CurrentWays) o;

                ps.setLong(1, way.getId());

                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                    }
                }

            }

            break;

        default:
            throw new Exception("");
        }

        ps.executeBatch();
        //conn.commit();
    } catch (Exception e) {
        conn.rollback();
        String msg = "Error executing batch query.";
        msg += "  " + e.getMessage();
        msg += " Cause:" + e.getCause().toString();
        throw new Exception(msg);
    } finally {
        if (ps != null) {
            ps.close();
        }
        //conn.setAutoCommit(true);
    }
}

From source file:hoot.services.db.DbUtils.java

public static void batchRecordsDirectRelations(final long mapId, final List<?> records,
        final RecordBatchType recordBatchType, Connection conn, int maxRecordBatchSize) throws Exception {
    PreparedStatement ps = null;//from w  ww  .j  a  v a  2  s  .  c  o  m
    try {
        String sql = null;
        long execResult = -1;
        //conn.setAutoCommit(false);
        int count = 0;

        switch (recordBatchType) {
        case INSERT:

            sql = "insert into current_relations_" + mapId
                    + " (id, changeset_id, \"timestamp\", visible, version, tags) "
                    + "values (?, ?, ?, ?, ?, ?)";

            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentRelations rel = (CurrentRelations) o;

                ps.setLong(1, rel.getId());
                ps.setLong(2, rel.getChangesetId());
                ps.setTimestamp(3, rel.getTimestamp());
                ps.setBoolean(4, rel.getVisible());
                ps.setLong(5, rel.getVersion());

                Map<String, String> tags = (Map<String, String>) rel.getTags();

                String hstoreStr = "";
                Iterator it = tags.entrySet().iterator();
                while (it.hasNext()) {
                    Map.Entry pairs = (Map.Entry) it.next();
                    if (hstoreStr.length() > 0) {
                        hstoreStr += ",";
                    }
                    hstoreStr += "\"" + pairs.getKey() + "\"=>\"" + pairs.getValue() + "\"";
                }
                ps.setObject(6, hstoreStr, Types.OTHER);
                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                    }
                }

            }

            break;

        case UPDATE:

            sql = "update current_relations_" + mapId
                    + " set changeset_id=?, visible=?, \"timestamp\"=?, version=?, tags=? " + "where id=?";
            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentRelations rel = (CurrentRelations) o;

                ps.setLong(1, rel.getChangesetId());
                ps.setBoolean(2, rel.getVisible());
                ps.setTimestamp(3, rel.getTimestamp());
                ps.setLong(4, rel.getVersion());

                Map<String, String> tags = (Map<String, String>) rel.getTags();

                String hstoreStr = "";
                Iterator it = tags.entrySet().iterator();
                while (it.hasNext()) {
                    Map.Entry pairs = (Map.Entry) it.next();
                    if (hstoreStr.length() > 0) {
                        hstoreStr += ",";
                    }
                    hstoreStr += "\"" + pairs.getKey() + "\"=>\"" + pairs.getValue() + "\"";
                }
                ps.setObject(5, hstoreStr, Types.OTHER);

                ps.setLong(6, rel.getId());

                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                    }
                }
            }

            break;

        case DELETE:

            sql = "delete from current_relations_" + mapId + " where id=?";
            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentRelations rel = (CurrentRelations) o;

                ps.setLong(1, rel.getId());

                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                    }
                }

            }

            break;

        default:
            throw new Exception("");
        }

        ps.executeBatch();
        //conn.commit();
    } catch (Exception e) {
        conn.rollback();
        String msg = "Error executing batch query.";
        msg += "  " + e.getMessage();
        msg += " Cause:" + e.getCause().toString();
        throw new Exception(msg);
    } finally {
        if (ps != null) {
            ps.close();
        }
        //conn.setAutoCommit(true);
    }
}

From source file:it.fub.jardin.server.DbUtils.java

public int setObjects(final Integer resultsetId, final List<BaseModelData> records, String username)
        throws HiddenException {

    int result = 0;
    Connection connection = this.dbConnectionHandler.getConn();
    final String sep = ",";

    String tableName = null;//from   w  w w  .  j  av  a  2  s.com
    // String set = "";
    try {
        ResultSetMetaData metadata = this.dbProperties.getResultsetMetadata(connection, resultsetId);
        tableName = metadata.getTableName(1);
        connection.setAutoCommit(false);
        for (BaseModelData record : records) {
            String set = "";
            int columns = record.getPropertyNames().size();
            for (String property : record.getPropertyNames()) {
                set += "`" + property + "`=?" + sep;
            }
            set = set.substring(0, set.length() - sep.length());

            // String query =
            // "INSERT INTO `" + tableName + "` SET " + set
            // + " ON DUPLICATE KEY UPDATE " + set;
            String query = "INSERT INTO `" + tableName + "` SET " + set;

            PreparedStatement ps = (PreparedStatement) connection.prepareStatement(query);
            int i = 1;
            for (String property : record.getPropertyNames()) {
                Object value = record.get(property);
                if ((value != null) && (String.valueOf(value).length() > 0)) {
                    ps.setObject(i, record.get(property));
                    // ps.setObject(i + columns, record.get(property));
                } else {
                    ps.setNull(i, java.sql.Types.NULL);
                    // ps.setNull(i + columns, java.sql.Types.NULL);
                }
                i++;
            }
            // System.out.println(ps.toString());
            int num = ps.executeUpdate();
            if (num > 0) {
                String toLog = "INSERT (" + ps.toString() + ")";
                // Log.debug(toLog);
                JardinLogger.debug(username, toLog);
            }
            result += num;
        }
        connection.commit();
        connection.setAutoCommit(true);
    } catch (MySQLIntegrityConstraintViolationException ex) {
        try {
            connection.rollback();
        } catch (Exception e) {
            // TODO Auto-generated catch block
            JardinLogger.debug(username, "Errore SQL: impossibile eseguire rollback transazione");
            e.printStackTrace();
        }
        String message = ex.getLocalizedMessage();
        String newMess = "";
        // Log.warn("Errore SQL", ex);
        if (ex.getErrorCode() == 1062) {
            // updateObjects(resultsetId, records);
            newMess = newMess.concat(ex.getErrorCode() + " - Errore!!! \n PRIMARY KEY DUPLICATA :\n" + message);
        } else if (ex.getErrorCode() == 1048) {
            newMess = newMess
                    .concat(ex.getErrorCode() + " - Errore!!! \n VINCOLO DI INTEGRITA' VIOLATO :\n" + message);
        } else if (ex.getErrorCode() == 1452) {
            newMess = newMess
                    .concat(ex.getErrorCode() + " - Errore!!! \n VINCOLO DI FOREIGN KEY VIOLATO :\n" + message);
        } else {
            newMess = ex.getErrorCode() + " - Errore!!! \n Problemi sui dati da salvare :\n" + message;
        }
        JardinLogger.debug(username, "Errore SQL: " + newMess);
        throw new HiddenException(newMess);

    } catch (Exception e) {
        try {
            JardinLogger.error(username, "Errore SQL: impossibile eseguire rollback transazione");
            connection.rollback();
        } catch (Exception e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }
        // Log.warn("Errore SQL", e);
        throw new HiddenException("Errore durante il salvataggio delle modifiche:\n" + e.getLocalizedMessage());
    } finally {
        // JardinLogger.info("Records setted");
        this.dbConnectionHandler.closeConn(connection);
    }
    return result;
}

From source file:mom.trd.opentheso.bdd.helper.ConceptHelper.java

/**
 * Cette fonction permet d'insrrer un Concept dans la table Concept avec un
 * idConcept existant (Import ou Orphelin) Rollback
 *
 * @param conn//from ww  w .  j  a  v a 2  s . c  o m
 * @param concept
 * @param urlSite
 * @param isArkActive
 * @param idUser
 * @return
 */
public boolean insertConceptInTableRollBack(Connection conn, Concept concept, String urlSite,
        boolean isArkActive, int idUser) {

    Statement stmt;
    boolean status = false;
    if (concept.getCreated() == null) {
        concept.setCreated(new java.util.Date());
    }
    if (concept.getModified() == null) {
        concept.setModified(new java.util.Date());
    }
    try {
        // Get connection from pool
        String query;
        try {
            conn.setAutoCommit(false);
            stmt = conn.createStatement();
            try {

                /**
                 * rcupration du code Ark via WebServices
                 *
                 */
                String idArk = "";
                if (isArkActive) {
                    ArrayList<DcElement> dcElementsList = new ArrayList<>();
                    Ark_Client ark_Client = new Ark_Client();
                    idArk = ark_Client.getArkId(new FileUtilities().getDate(),
                            urlSite + "?idc=" + concept.getIdConcept() + "&idt=" + concept.getIdThesaurus(), "",
                            "", dcElementsList, "pcrt"); // pcrt : p= pactols, crt=code DCMI pour collection
                }
                /**
                 * Ajout des informations dans la table Concept
                 */
                if (!addConceptHistorique(conn, concept, idUser)) {
                    conn.rollback();
                    conn.close();
                    return false;
                }
                if (concept.getCreated() == null || concept.getModified() == null) {

                    query = "Insert into concept "
                            + "(id_concept, id_thesaurus, id_ark, status, notation, top_concept, id_group)"
                            + " values (" + "'" + concept.getIdConcept() + "'" + ",'" + concept.getIdThesaurus()
                            + "'" + ",'" + idArk + "'" + ",'" + concept.getStatus() + "'" + ",'"
                            + concept.getNotation() + "'" + "," + concept.isTopConcept() + ",'"
                            + concept.getIdGroup() + "')";
                } else {
                    query = "Insert into concept "
                            + "(id_concept, id_thesaurus, id_ark, created, modified, status, notation, top_concept, id_group)"
                            + " values (" + "'" + concept.getIdConcept() + "'" + ",'" + concept.getIdThesaurus()
                            + "'" + ",'" + idArk + "'" + ",'" + concept.getCreated() + "'" + ",'"
                            + concept.getModified() + "'" + ",'" + concept.getStatus() + "'" + ",'"
                            + concept.getNotation() + "'" + "," + concept.isTopConcept() + ",'"
                            + concept.getIdGroup() + "')";
                }

                stmt.executeUpdate(query);
                status = true;
                conn.commit();
            } finally {
                stmt.close();
            }
        } finally {

        }
    } catch (SQLException sqle) {
        // Log exception
        if (!sqle.getMessage().contains("duplicate key value violates unique constraint")) {
            log.error("Error while adding Concept : " + concept.getIdConcept(), sqle);
        }
    }
    return status;
}

From source file:mom.trd.opentheso.bdd.helper.ConceptHelper.java

/**
 * Cette fonction permet d'insrrer un Concept dans la table Concept avec un
 * idConcept existant (Import)/* www .j a  va  2 s . c  o m*/
 *
 * @param ds
 * @param concept
 * @param urlSite
 * @param isArkActive
 * @param idUser
 * @return
 */
public boolean insertConceptInTable(HikariDataSource ds, Concept concept, String urlSite, boolean isArkActive,
        int idUser) {

    Connection conn;
    Statement stmt;
    boolean status = false;
    if (concept.getCreated() == null) {
        concept.setCreated(new java.util.Date());
    }
    if (concept.getModified() == null) {
        concept.setModified(new java.util.Date());
    }
    try {
        // Get connection from pool
        conn = ds.getConnection();
        String query;
        try {
            conn.setAutoCommit(false);
            stmt = conn.createStatement();
            try {

                /**
                 * rcupration du code Ark via WebServices
                 *
                 */
                String idArk = "";
                if (isArkActive) {
                    ArrayList<DcElement> dcElementsList = new ArrayList<>();
                    Ark_Client ark_Client = new Ark_Client();
                    idArk = ark_Client.getArkId(new FileUtilities().getDate(),
                            urlSite + "?idc=" + concept.getIdConcept() + "&idt=" + concept.getIdThesaurus(), "",
                            "", dcElementsList, "pcrt"); // pcrt : p= pactols, crt=code DCMI pour collection
                    concept.setIdArk(idArk);
                } else {
                    concept.setIdArk("");
                }

                /**
                 * Ajout des informations dans la table Concept
                 */
                if (!addConceptHistorique(conn, concept, idUser)) {
                    conn.rollback();
                    conn.close();
                    return false;
                }

                if (concept.getCreated() == null || concept.getModified() == null) {
                    query = "Insert into concept "
                            + "(id_concept, id_thesaurus, id_ark, status, notation, top_concept, id_group)"
                            + " values (" + "'" + concept.getIdConcept() + "'" + ",'" + concept.getIdThesaurus()
                            + "'" + ",'" + concept.getIdArk() + "'" + ",'" + concept.getStatus() + "'" + ",'"
                            + concept.getNotation() + "'" + "," + concept.isTopConcept() + ",'"
                            + concept.getIdGroup() + "')";
                } else {
                    query = "Insert into concept "
                            + "(id_concept, id_thesaurus, id_ark, created, modified, status, notation, top_concept, id_group)"
                            + " values (" + "'" + concept.getIdConcept() + "'" + ",'" + concept.getIdThesaurus()
                            + "'" + ",'" + concept.getIdArk() + "'" + ",'" + concept.getCreated() + "'" + ",'"
                            + concept.getModified() + "'" + ",'" + concept.getStatus() + "'" + ",'"
                            + concept.getNotation() + "'" + "," + concept.isTopConcept() + ",'"
                            + concept.getIdGroup() + "')";
                }

                stmt.executeUpdate(query);
                status = true;
                conn.commit();
            } finally {
                stmt.close();
            }
        } finally {
            conn.close();
        }
    } catch (SQLException sqle) {
        // Log exception
        if (!sqle.getMessage().contains("duplicate key value violates unique constraint")) {
            log.error("Error while adding Concept : " + concept.getIdConcept(), sqle);
        }
    }
    return status;
}

From source file:com.concursive.connect.web.modules.documents.dao.FileItem.java

/**
 * Description of the Method/*  w ww. j  a  va 2 s  .  co m*/
 *
 * @param db Description of Parameter
 * @return Description of the Returned Value
 * @throws SQLException Description of Exception
 */
public boolean insert(Connection db) throws SQLException {
    if (!isValid()) {
        LOG.debug("Object validation failed");
        return false;
    }

    boolean result = false;
    boolean doCommit = false;
    try {
        if (doCommit = db.getAutoCommit()) {
            db.setAutoCommit(false);
        }
        StringBuffer sql = new StringBuffer();
        sql.append("INSERT INTO project_files "
                + "(folder_id, subject, client_filename, filename, version, size, ");
        sql.append("enabled, downloads, ");
        if (entered != null) {
            sql.append("entered, ");
        }
        if (modified != null) {
            sql.append("modified, ");
        }
        sql.append(" link_module_id, link_item_id, "
                + " enteredby, modifiedby, default_file, image_width, image_height, comment, featured_file) "
                + "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ");
        if (entered != null) {
            sql.append("?, ");
        }
        if (modified != null) {
            sql.append("?, ");
        }
        sql.append("?, ?, ?, ?, ?, ?, ?, ?, ?) ");

        int i = 0;
        PreparedStatement pst = db.prepareStatement(sql.toString());
        if (folderId > 0) {
            pst.setInt(++i, folderId);
        } else {
            pst.setNull(++i, java.sql.Types.INTEGER);
        }
        pst.setString(++i, subject);
        pst.setString(++i, clientFilename);
        pst.setString(++i, filename);
        pst.setDouble(++i, version);
        pst.setInt(++i, size);
        pst.setBoolean(++i, enabled);
        pst.setInt(++i, downloads);
        if (entered != null) {
            pst.setTimestamp(++i, entered);
        }
        if (modified != null) {
            pst.setTimestamp(++i, modified);
        }
        pst.setInt(++i, linkModuleId);
        pst.setInt(++i, linkItemId);
        pst.setInt(++i, enteredBy);
        pst.setInt(++i, modifiedBy);
        pst.setBoolean(++i, defaultFile);
        pst.setInt(++i, imageWidth);
        pst.setInt(++i, imageHeight);
        pst.setString(++i, comment);
        pst.setBoolean(++i, featuredFile);
        pst.execute();
        pst.close();
        id = DatabaseUtils.getCurrVal(db, "project_files_item_id_seq", -1);
        // New default item
        if (defaultFile) {
            updateDefaultRecord(db, linkModuleId, linkItemId, id);
        }
        // Insert the version information
        if (doVersionInsert) {
            FileItemVersion thisVersion = new FileItemVersion();
            thisVersion.setId(this.getId());
            thisVersion.setSubject(subject);
            thisVersion.setClientFilename(clientFilename);
            thisVersion.setFilename(filename);
            thisVersion.setVersion(version);
            thisVersion.setSize(size);
            thisVersion.setEnteredBy(enteredBy);
            thisVersion.setModifiedBy(modifiedBy);
            thisVersion.setImageWidth(imageWidth);
            thisVersion.setImageHeight(imageHeight);
            thisVersion.setComment(comment);
            thisVersion.insert(db);
        }
        logUpload(db);
        if (doCommit) {
            db.commit();
        }
        result = true;
    } catch (Exception e) {
        e.printStackTrace(System.out);
        if (doCommit) {
            db.rollback();
        }
        throw new SQLException(e.getMessage());
    } finally {
        if (doCommit) {
            db.setAutoCommit(true);
        }
    }
    return result;
}

From source file:com.concursive.connect.web.modules.documents.dao.FileItem.java

/**
 * Description of the Method/*w  ww  .j a v  a 2  s  .  c o  m*/
 *
 * @param db           Description of Parameter
 * @param baseFilePath Description of Parameter
 * @return Description of the Returned Value
 * @throws SQLException Description of Exception
 */
public boolean delete(Connection db, String baseFilePath) throws SQLException {
    if (id == -1) {
        throw new SQLException("Id not specified");
    }
    this.buildVersionList(db);
    // Need to delete the actual files
    for (FileItemVersion thisVersion : versionList) {
        // For each version, delete the main record
        String filePath = baseFilePath + getDatePath(thisVersion.getEntered());
        File fileToDelete = new File(filePath + thisVersion.getFilename());
        if (!fileToDelete.delete()) {
            LOG.error("File not found -- could not delete file: " + fileToDelete.getPath());
        }
        // Delete the thumbnails for this version
        ThumbnailList thumbnailList = new ThumbnailList();
        thumbnailList.setItemId(thisVersion.getId());
        thumbnailList.setVersion(thisVersion.getVersion());
        thumbnailList.buildList(db);
        for (Thumbnail thisThumbnail : thumbnailList) {
            File thumbnailToDelete = new File(filePath + thisThumbnail.getFilename());
            if (!thumbnailToDelete.delete()) {
                LOG.error("File thumbnail not found -- could not delete file: " + fileToDelete.getPath());
            }
        }
    }
    boolean result = false;
    boolean commit = db.getAutoCommit();
    try {
        if (commit) {
            db.setAutoCommit(false);
        }
        //Delete the ratings
        Rating.delete(db, this.getId(), TABLE, PRIMARY_KEY);

        // Delete the log of downloads
        int i = 0;
        PreparedStatement pst = db
                .prepareStatement("DELETE FROM project_files_download " + "WHERE item_id = ? ");
        pst.setInt(++i, this.getId());
        pst.execute();
        pst.close();
        // Delete the thumbnail
        i = 0;
        pst = db.prepareStatement("DELETE FROM project_files_thumbnail " + "WHERE item_id = ? ");
        pst.setInt(++i, this.getId());
        pst.execute();
        pst.close();
        // Delete all of the versions
        i = 0;
        pst = db.prepareStatement("DELETE FROM project_files_version " + "WHERE item_id = ? ");
        pst.setInt(++i, this.getId());
        pst.execute();
        pst.close();
        // Delete the master record
        i = 0;
        pst = db.prepareStatement("DELETE FROM project_files " + "WHERE item_id = ? ");
        pst.setInt(++i, this.getId());
        pst.execute();
        pst.close();
        if (linkModuleId == Constants.PROJECT_IMAGE_FILES) {
            if (this.getDefaultFile()) {
                Project project = ProjectUtils.loadProject(linkItemId);
                // Enable the next image
                FileItemList files = new FileItemList();
                files.setLinkModuleId(Constants.PROJECT_IMAGE_FILES);
                files.setLinkItemId(linkItemId);
                files.setIgnoreId(this.getId());
                files.buildList(db);
                if (files.size() > 0) {
                    project.setLogoId(files.get(0).getId());
                } else {
                    project.setLogoId(-1);
                }
                project.updateLogoId(db);
            }
        }
        if (commit) {
            db.commit();
        }
        if (linkModuleId == Constants.PROJECT_IMAGE_FILES) {
            CacheUtils.invalidateValue(Constants.SYSTEM_PROJECT_CACHE, linkItemId);
        }
        result = true;
    } catch (Exception e) {
        if (commit) {
            db.rollback();
        }
        throw new SQLException(e.getMessage());
    } finally {
        if (commit) {
            db.setAutoCommit(true);
        }
    }
    return result;
}

From source file:com.concursive.connect.web.modules.login.dao.User.java

/**
 * Description of the Method/*from   ww w . j a v a 2s . c om*/
 *
 * @param db        The database connection
 * @param ipAddress The ip address requesting the user to be added
 * @param prefs     The application prefs
 * @return true if the record was added successfully
 * @throws SQLException Database exception
 */
public boolean insert(Connection db, String ipAddress, ApplicationPrefs prefs) throws SQLException {
    boolean commit = db.getAutoCommit();
    try {
        if (commit) {
            db.setAutoCommit(false);
        }
        // Insert the user
        PreparedStatement pst = db.prepareStatement("INSERT INTO users "
                + "(instance_id, group_id, department_id, first_name, last_name, username, password, temporary_password, "
                + "company, email, enteredby, modifiedby, enabled, start_page, access_personal, access_enterprise, "
                + "access_admin, access_inbox, access_resources, expiration, registered, "
                + "account_size, access_invite, access_add_projects, terms, timezone, currency, language"
                + (entered != null ? ", entered" : "") + (modified != null ? ", modified" : "") + ") "
                + "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?"
                + (entered != null ? ", ?" : "") + (modified != null ? ", ?" : "") + ") ");
        int i = 0;
        DatabaseUtils.setInt(pst, ++i, instanceId);
        pst.setInt(++i, groupId);
        pst.setInt(++i, departmentId);
        pst.setString(++i, firstName);
        pst.setString(++i, lastName);
        pst.setString(++i, username);
        pst.setString(++i, password);
        pst.setString(++i, temporaryPassword);
        pst.setString(++i, company);
        pst.setString(++i, email);
        pst.setInt(++i, enteredBy);
        pst.setInt(++i, modifiedBy);
        pst.setBoolean(++i, enabled);
        pst.setInt(++i, startPage);
        pst.setBoolean(++i, true);
        pst.setBoolean(++i, true);
        pst.setBoolean(++i, accessAdmin);
        pst.setBoolean(++i, false);
        pst.setBoolean(++i, false);
        DatabaseUtils.setTimestamp(pst, ++i, expiration);
        pst.setBoolean(++i, registered);
        DatabaseUtils.setInt(pst, ++i, accountSize);
        pst.setBoolean(++i, accessInvite);
        pst.setBoolean(++i, accessAddProjects);
        pst.setBoolean(++i, terms);
        pst.setString(++i, timeZone);
        pst.setString(++i, currency);
        pst.setString(++i, language);
        if (entered != null) {
            pst.setTimestamp(++i, entered);
        }
        if (modified != null) {
            pst.setTimestamp(++i, modified);
        }
        pst.execute();
        pst.close();
        id = DatabaseUtils.getCurrVal(db, "users_user_id_seq", -1);
        // Record the IP
        if (ipAddress != null) {
            pst = db.prepareStatement("INSERT INTO user_log (user_id, ip_address) VALUES (?, ?)");
            pst.setInt(1, id);
            pst.setString(2, ipAddress);
            pst.execute();
            pst.close();
        }
        if (!isApiRestore()) {
            // Insert a corresponding user profile project
            UserUtils.addUserProfile(db, this, prefs);
            if (profileProjectId == -1) {
                LOG.error("profileProjectId did not get assigned!");
            }
        }
        if (commit) {
            db.commit();
        }
    } catch (Exception e) {
        LOG.error("adding user", e);
        if (commit) {
            db.rollback();
        }
        throw new SQLException(e.getMessage());
    } finally {
        if (commit) {
            db.setAutoCommit(true);
        }
    }
    return true;
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

/**
 * This enters locks into the queue in {@link #LOCK_WAITING} mode.
 *
 * Isolation Level Notes:/*from  w  w w . ja  va2 s . c  om*/
 * 1. We use S4U (withe read_committed) to generate the next (ext) lock id.  This serializes
 * any 2 {@code enqueueLockWithRetry()} calls.
 * 2. We use S4U on the relevant TXNS row to block any concurrent abort/commit/etc operations
 * @see #checkLockWithRetry(Connection, long, long)
 */
private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst)
        throws NoSuchTxnException, TxnAbortedException, MetaException {
    boolean success = false;
    Connection dbConn = null;
    try {
        Statement stmt = null;
        ResultSet rs = null;
        ResultSet lockHandle = null;
        try {
            lockInternal();
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            long txnid = rqst.getTxnid();
            stmt = dbConn.createStatement();
            if (isValidTxn(txnid)) {
                //this also ensures that txn is still there in expected state
                lockHandle = lockTransactionRecord(stmt, txnid, TXN_OPEN);
                if (lockHandle == null) {
                    ensureValidTxn(dbConn, txnid, stmt);
                    shouldNeverHappen(txnid);
                }
            }
            /** Get the next lock id.
             * This has to be atomic with adding entries to HIVE_LOCK entries (1st add in W state) to prevent a race.
             * Suppose ID gen is a separate txn and 2 concurrent lock() methods are running.  1st one generates nl_next=7,
             * 2nd nl_next=8.  Then 8 goes first to insert into HIVE_LOCKS and acquires the locks.  Then 7 unblocks,
             * and add it's W locks but it won't see locks from 8 since to be 'fair' {@link #checkLock(java.sql.Connection, long)}
             * doesn't block on locks acquired later than one it's checking*/
            String s = sqlGenerator.addForUpdateClause("select nl_next from NEXT_LOCK_ID");
            LOG.debug("Going to execute query <" + s + ">");
            rs = stmt.executeQuery(s);
            if (!rs.next()) {
                LOG.debug("Going to rollback");
                dbConn.rollback();
                throw new MetaException(
                        "Transaction tables not properly " + "initialized, no record found in next_lock_id");
            }
            long extLockId = rs.getLong(1);
            s = "update NEXT_LOCK_ID set nl_next = " + (extLockId + 1);
            LOG.debug("Going to execute update <" + s + ">");
            stmt.executeUpdate(s);

            if (txnid > 0) {
                List<String> rows = new ArrayList<>();
                // For each component in this lock request,
                // add an entry to the txn_components table
                for (LockComponent lc : rqst.getComponent()) {
                    if (lc.isSetIsAcid() && !lc.isIsAcid()) {
                        //we don't prevent using non-acid resources in a txn but we do lock them
                        continue;
                    }
                    boolean updateTxnComponents;
                    if (!lc.isSetOperationType()) {
                        //request came from old version of the client
                        updateTxnComponents = true;//this matches old behavior
                    } else {
                        switch (lc.getOperationType()) {
                        case INSERT:
                        case UPDATE:
                        case DELETE:
                            if (!lc.isSetIsDynamicPartitionWrite()) {
                                //must be old client talking, i.e. we don't know if it's DP so be conservative
                                updateTxnComponents = true;
                            } else {
                                /**
                                 * we know this is part of DP operation and so we'll get
                                 * {@link #addDynamicPartitions(AddDynamicPartitions)} call with the list
                                 * of partitions actually chaged.
                                 */
                                updateTxnComponents = !lc.isIsDynamicPartitionWrite();
                            }
                            break;
                        case SELECT:
                            updateTxnComponents = false;
                            break;
                        case NO_TXN:
                            /*this constant is a bit of a misnomer since we now always have a txn context.  It
                             just means the operation is such that we don't care what tables/partitions it
                             affected as it doesn't trigger a compaction or conflict detection.  A better name
                             would be NON_TRANSACTIONAL.*/
                            updateTxnComponents = false;
                            break;
                        default:
                            //since we have an open transaction, only 4 values above are expected 
                            throw new IllegalStateException(
                                    "Unexpected DataOperationType: " + lc.getOperationType() + " agentInfo="
                                            + rqst.getAgentInfo() + " " + JavaUtils.txnIdToString(txnid));
                        }
                    }
                    if (!updateTxnComponents) {
                        continue;
                    }
                    String dbName = lc.getDbname();
                    String tblName = lc.getTablename();
                    String partName = lc.getPartitionname();
                    rows.add(txnid + ", '" + dbName + "', " + (tblName == null ? "null" : "'" + tblName + "'")
                            + ", " + (partName == null ? "null" : "'" + partName + "'") + "," + quoteString(
                                    OpertaionType.fromDataOperationType(lc.getOperationType()).toString()));
                }
                List<String> queries = sqlGenerator.createInsertValuesStmt(
                        "TXN_COMPONENTS (tc_txnid, tc_database, tc_table, tc_partition, tc_operation_type)",
                        rows);
                for (String query : queries) {
                    LOG.debug("Going to execute update <" + query + ">");
                    int modCount = stmt.executeUpdate(query);
                }
            }

            List<String> rows = new ArrayList<>();
            long intLockId = 0;
            for (LockComponent lc : rqst.getComponent()) {
                if (lc.isSetOperationType() && lc.getOperationType() == DataOperationType.UNSET
                        && (conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST)
                                || conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEZ_TEST))) {
                    //old version of thrift client should have (lc.isSetOperationType() == false) but they do not
                    //If you add a default value to a variable, isSet() for that variable is true regardless of the where the
                    //message was created (for object variables.  It works correctly for boolean vars, e.g. LockComponent.isAcid).
                    //in test mode, upgrades are not tested, so client version and server version of thrift always matches so
                    //we see UNSET here it means something didn't set the appropriate value.
                    throw new IllegalStateException("Bug: operationType=" + lc.getOperationType()
                            + " for component " + lc + " agentInfo=" + rqst.getAgentInfo());
                }
                intLockId++;
                String dbName = lc.getDbname();
                String tblName = lc.getTablename();
                String partName = lc.getPartitionname();
                LockType lockType = lc.getType();
                char lockChar = 'z';
                switch (lockType) {
                case EXCLUSIVE:
                    lockChar = LOCK_EXCLUSIVE;
                    break;
                case SHARED_READ:
                    lockChar = LOCK_SHARED;
                    break;
                case SHARED_WRITE:
                    lockChar = LOCK_SEMI_SHARED;
                    break;
                }
                long now = getDbTime(dbConn);
                rows.add(extLockId + ", " + intLockId + "," + txnid + ", " + quoteString(dbName) + ", "
                        + valueOrNullLiteral(tblName) + ", " + valueOrNullLiteral(partName) + ", "
                        + quoteChar(LOCK_WAITING) + ", " + quoteChar(lockChar) + ", " +
                        //for locks associated with a txn, we always heartbeat txn and timeout based on that
                        (isValidTxn(txnid) ? 0 : now) + ", " + valueOrNullLiteral(rqst.getUser()) + ", "
                        + valueOrNullLiteral(rqst.getHostname()) + ", "
                        + valueOrNullLiteral(rqst.getAgentInfo()));// + ")";
            }
            List<String> queries = sqlGenerator
                    .createInsertValuesStmt("HIVE_LOCKS (hl_lock_ext_id, hl_lock_int_id, hl_txnid, hl_db, "
                            + "hl_table, hl_partition,hl_lock_state, hl_lock_type, "
                            + "hl_last_heartbeat, hl_user, hl_host, hl_agent_info)", rows);
            for (String query : queries) {
                LOG.debug("Going to execute update <" + query + ">");
                int modCount = stmt.executeUpdate(query);
            }
            dbConn.commit();
            success = true;
            return new ConnectionLockIdPair(dbConn, extLockId);
        } catch (SQLException e) {
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "enqueueLockWithRetry(" + rqst + ")");
            throw new MetaException(
                    "Unable to update transaction database " + StringUtils.stringifyException(e));
        } finally {
            close(lockHandle);
            close(rs, stmt, null);
            if (!success) {
                /* This needs to return a "live" connection to be used by operation that follows it.
                Thus it only closes Connection on failure/retry. */
                closeDbConn(dbConn);
            }
            unlockInternal();
        }
    } catch (RetryException e) {
        return enqueueLockWithRetry(rqst);
    }
}