Example usage for java.sql Connection commit

List of usage examples for java.sql Connection commit

Introduction

In this page you can find the example usage for java.sql Connection commit.

Prototype

void commit() throws SQLException;

Source Link

Document

Makes all changes made since the previous commit/rollback permanent and releases any database locks currently held by this Connection object.

Usage

From source file:cz.cas.lib.proarc.common.user.UserManagerSql.java

@Override
public void setUserGroups(UserProfile user, List<Group> groups, String owner, String log) {
    try {/*from ww  w.  j  a va 2  s  .  c o m*/
        FedoraTransaction ftx = new FedoraTransaction(remoteStorage);
        FedoraUserDao fedoraUsers = new FedoraUserDao();
        fedoraUsers.setTransaction(ftx);
        Connection c = source.getConnection();
        boolean rollback = true;
        try {
            c.setAutoCommit(false);
            groupStorage.removeMembership(c, user.getId());
            if (!groups.isEmpty()) {
                groupStorage.addMembership(c, user.getId(), groups);
            }
            fedoraUsers.setMembership(user, groups, log);
            c.commit();
            ftx.commit();
            rollback = false;
        } finally {
            ftx.close();
            DbUtils.close(c, rollback);
        }
    } catch (Exception ex) {
        throw new IllegalStateException(ex);
    }
}

From source file:org.ulyssis.ipp.processor.Processor.java

/**
 * Restore the state from the database//from   w  w  w  .  java  2 s.  c o m
 *
 * @return Whether we could restore from db, if false, we're starting from a clean slate
 */
private boolean restoreFromDb() {
    Connection connection = null;
    Snapshot oldSnapshot = this.snapshot;
    try {
        connection = Database.createConnection(EnumSet.of(READ_WRITE));
        Optional<Snapshot> snapshot = Snapshot.loadLatest(connection);
        if (snapshot.isPresent()) {
            this.snapshot = snapshot.get();
            connection.commit();
            return true;
        } else {
            List<Event> events = Event.loadAll(connection);
            Snapshot snapshotBefore = this.snapshot;
            // Instant now = Instant.now(); // TODO: Handle future events later!
            for (Event event : events) {
                if (!event.isRemoved()/* && event.getTime().isBefore(now)*/) { // TODO: Future events later!
                    this.snapshot = event.apply(this.snapshot);
                    this.snapshot.save(connection);
                }
            }
            connection.commit();
            return !Objects.equals(this.snapshot, snapshotBefore);
        }
    } catch (SQLException | IOException e) {
        LOG.error("An error occurred when restoring from database!", e);
        this.snapshot = oldSnapshot;
        try {
            if (connection != null) {
                connection.rollback();
            }
        } catch (SQLException e2) {
            LOG.error("Error in rollback after previous error", e2);
        }
        return false;
    } finally {
        if (connection != null) {
            try {
                connection.close();
            } catch (SQLException e) {
                LOG.error("Error while closing connection", e);
            }
        }
    }
}

From source file:com.agiletec.plugins.jpcrowdsourcing.aps.system.services.ideainstance.IdeaInstanceDAO.java

@Override
public void removeIdeaInstance(String code) {
    PreparedStatement stat = null;
    Connection conn = null;
    try {//from  w  w  w. java2  s  .c o m
        List<String> ideaList = this.getIdeaDAO().searchIdea(code, null, null, null, null);
        conn = this.getConnection();
        conn.setAutoCommit(false);
        this.getIdeaDAO().removeIdeas(ideaList, conn);
        this.removeIdeaInstanceGroups(code, conn);
        this.removeIdeaInstance(code, conn);
        conn.commit();
    } catch (Throwable t) {
        this.executeRollback(conn);
        _logger.error("Error deleting ideainstance", t);
        throw new RuntimeException("Error deleting ideainstance", t);
    } finally {
        this.closeDaoResources(null, stat, conn);
    }
}

From source file:com.china317.gmmp.gmmp_report_analysis.App.java

private static void IntOutNoneRecordsStoreIntoDB(Map<String, AlarmNoMark> iniOutNoneRecords,
        ApplicationContext context) {/* ww  w  .j  a  v a  2  s  .com*/
    Connection conn = null;
    String sql = "";
    try {
        SqlMapClient sc = (SqlMapClient) context.getBean("sqlMapClientLybc");
        conn = sc.getDataSource().getConnection();
        conn.setAutoCommit(false);
        Statement st = conn.createStatement();
        Iterator<String> it = iniOutNoneRecords.keySet().iterator();
        while (it.hasNext()) {
            String key = it.next();
            AlarmNoMark pos = iniOutNoneRecords.get(key);
            sql = "insert into TAB_ALARM_NOMARK " + " (LICENCE,BEGIN_TIME,END_TIME,ROAD) " + " values (" + "'"
                    + pos.getLicense() + "'," + "'" + pos.getBeginTime() + "'," + "'" + pos.getEndTime() + "',"
                    + "'" + pos.getRoad() + "')";
            log.info(sql);
            st.addBatch(sql);
        }
        st.executeBatch();
        conn.commit();
        log.info("[insertIntoDB TAB_ALARM_NOMARK success!!!]");
    } catch (Exception e) {
        e.printStackTrace();
        log.error(sql);
    } finally {
        iniOutNoneRecords.clear();
        if (conn != null) {
            try {
                conn.close();
            } catch (SQLException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.adaptris.jdbc.connection.FailoverDatasourceTest.java

@Test
public void testCommitRollback() throws Exception {
    Connection conn = new MyProxy();

    try {// w w w. j  av a  2 s. c  o  m
        try {
            conn.setAutoCommit(conn.getAutoCommit());
        } catch (SQLException e) {

        }
        try {
            conn.setAutoCommit(false);
            conn.commit();
        } catch (SQLException e) {

        }

        try {
            conn.setAutoCommit(false);
            conn.rollback();
        } catch (SQLException e) {

        }

        try {
            conn.setAutoCommit(false);
            conn.rollback(conn.setSavepoint());
        } catch (SQLException e) {

        }
        try {
            conn.setAutoCommit(false);
            conn.rollback(conn.setSavepoint("test"));
        } catch (SQLException e) {

        }
        try {
            conn.setAutoCommit(false);
            conn.releaseSavepoint(conn.setSavepoint("test2"));
        } catch (SQLException e) {

        }
    } finally {
        JdbcUtil.closeQuietly(conn);

    }
}

From source file:de.walware.statet.r.internal.core.pkgmanager.DB.java

private void checkDB() throws SQLException {
    final Connection connection = getConnection();

    final ResultSet schemas = connection.getMetaData().getSchemas(null, REnv.NAME);
    while (schemas.next()) {
        if (REnv.NAME.equals(schemas.getString(1))) {
            return;
        }// www .  j  av  a 2  s .  co  m
    }

    try (final Statement statement = connection.createStatement()) {
        statement.execute(REnv.LibPaths.DEFINE_1);
        statement.execute(REnv.Pkgs.DEFINE_1);

        connection.commit();
    } catch (final SQLException e) {
        closeOnError();
        throw e;
    }
}

From source file:dk.netarkivet.archive.arcrepositoryadmin.ReplicaCacheHelpers.java

/**
 * Method for updating the filelist of a replicafileinfo instance.
 * Updates the following fields for the entry in the replicafileinfo:
 * <br/> filelist_status = OK./*from w  w w . j  a  va  2s .  c  o  m*/
 * <br/> filelist_checkdatetime = current time.
 *
 * @param replicafileinfoId The id of the replicafileinfo.
 * @param con An open connection to the archive database
 */
protected static void updateReplicaFileInfoFilelist(long replicafileinfoId, Connection con) {
    PreparedStatement statement = null;
    try {
        // The SQL statement
        final String sql = "UPDATE replicafileinfo SET filelist_status = ?, " + "filelist_checkdatetime = ? "
                + "WHERE replicafileinfo_guid = ?";

        Date now = new Date(Calendar.getInstance().getTimeInMillis());

        // complete the SQL statement.
        statement = DBUtils.prepareStatement(con, sql, FileListStatus.OK.ordinal(), now, replicafileinfoId);

        // execute the SQL statement
        statement.executeUpdate();
        con.commit();
    } catch (Exception e) {
        String msg = "Problems updating the replicafileinfo.";
        log.warn(msg);
        throw new IOFailure(msg, e);
    } finally {
        DBUtils.closeStatementIfOpen(statement);
    }
}

From source file:gridool.db.catalog.DistributionCatalog.java

@Nonnull
public int[] bindTableId(@Nonnull final String[] tableNames, @Nonnull final String templateTableNamePrefix)
        throws GridException {
    final int numTableNames = tableNames.length;
    if (numTableNames == 0) {
        return new int[0];
    }/*  w  ww .  j  a v a 2 s .  c  o m*/

    final int[] tableIds = new int[numTableNames];
    Arrays.fill(tableIds, -1);
    final String insertQuery = "INSERT INTO \"" + partitionkeyTableName
            + "\"(tablename, tplprefix) VALUES(?, ?)";
    final String selectQuery = "SELECT tablename, id FROM \"" + partitionkeyTableName + '"';
    final ResultSetHandler rsh = new ResultSetHandler() {
        public Object handle(final ResultSet rs) throws SQLException {
            for (int i = 0; rs.next(); i++) {
                String tblname = rs.getString(1);
                int pos = ArrayUtils.indexOf(tableNames, tblname);
                if (pos != -1) {
                    int key = rs.getInt(2);
                    tableIds[pos] = key;
                }
            }
            return null;
        }
    };
    final Object[][] params = new Object[numTableNames][];
    for (int i = 0; i < numTableNames; i++) {
        params[i] = new Object[] { tableNames[i], templateTableNamePrefix };
    }
    synchronized (tableIdMap) {
        final Connection conn = GridDbUtils.getPrimaryDbConnection(dbAccessor, false);
        try {
            JDBCUtils.batch(conn, insertQuery, params);
            JDBCUtils.query(conn, selectQuery, rsh);
            conn.commit();
        } catch (SQLException e) {
            SQLException nexterr = e.getNextException();
            if (nexterr == null) {
                LOG.error(e);
            } else {
                LOG.error(PrintUtils.prettyPrintStackTrace(nexterr), e);
            }
            try {
                conn.rollback();
            } catch (SQLException rbe) {
                LOG.warn("Rollback failed", rbe);
            }
            throw new GridException(e);
        } finally {
            JDBCUtils.closeQuietly(conn);
        }
        for (int i = 0; i < numTableNames; i++) {
            String tblname = tableNames[i];
            int tid = tableIds[i];
            if (tid == -1) {
                throw new IllegalStateException("Table ID is not registered for table: " + tblname);
            }
            tableIdMap.put(tblname, tid);
            String templateTableName = templateTableNamePrefix + tblname;
            tableIdMap.put(templateTableName, tid);
        }
    }
    return tableIds;
}

From source file:gridool.db.partitioning.phihash.monetdb.MonetDBCsvLoadOperation.java

private static long invokeCopyInto(final Connection conn, final String copyIntoQuery, final String tableName,
        final String fileName, final long numRecords) throws SQLException {
    final File loadFile = prepareLoadFile(fileName);
    final String queryTpl = complementCopyIntoQuery(copyIntoQuery, loadFile);
    long rtotal = 0;
    try {//from   w  ww . j av  a  2s .  c o m
        if (ENV_COPYINTO_PIECES > 0) {
            for (long offset = 0; offset < numRecords; offset += ENV_COPYINTO_PIECES) {
                final long rest = numRecords - offset;
                if (rest > 0) {
                    final String query;
                    if (rest > ENV_COPYINTO_PIECES) {
                        query = getCopyIntoQuery(queryTpl, ENV_COPYINTO_PIECES, offset);
                    } else {
                        query = getCopyIntoQuery(queryTpl, rest, offset);
                    }
                    final int ret = JDBCUtils.update(conn, query);
                    if (ret > 0) {
                        rtotal += ret;
                    } else {
                        LOG.warn("Unexpected result '" + ret + "' for query: " + query);
                    }
                } else {
                    break;
                }
            }
        } else {
            String query = getCopyIntoQuery(queryTpl, numRecords);
            rtotal = JDBCUtils.update(conn, query);
        }
        conn.commit();
    } catch (SQLException e) {
        LOG.error("rollback a transaction: " + queryTpl, e);
        conn.rollback();
        throw e;
    } finally {
        new FileDeletionThread(loadFile, LOG).start();
    }
    return rtotal;
}

From source file:dqyt.cy6.yutao.common.port.adapter.persistence.eventsourcing.mysql.MySQLJDBCEventStore.java

public EventStream fullEventStreamFor(EventStreamId anIdentity) {

    Connection connection = this.connection();

    ResultSet result = null;/*  w  w  w.jav a2 s .c  o  m*/

    try {
        PreparedStatement statement = connection
                .prepareStatement("SELECT stream_version, event_type, event_body FROM tbl_es_event_store "
                        + "WHERE stream_name = ? " + "ORDER BY stream_version");

        statement.setString(1, anIdentity.streamName());

        result = statement.executeQuery();

        connection.commit();

        return this.buildEventStream(result);

    } catch (Throwable t) {
        throw new EventStoreException("Cannot query full event stream for: " + anIdentity.streamName()
                + " because: " + t.getMessage(), t);
    } finally {
        if (result != null) {
            try {
                result.close();
            } catch (SQLException e) {
                // ignore
            }
        }
        try {
            connection.close();
        } catch (SQLException e) {
            // ignore
        }
    }
}