Example usage for java.sql Connection commit

List of usage examples for java.sql Connection commit

Introduction

In this page you can find the example usage for java.sql Connection commit.

Prototype

void commit() throws SQLException;

Source Link

Document

Makes all changes made since the previous commit/rollback permanent and releases any database locks currently held by this Connection object.

Usage

From source file:dbcount.DbCountInitializeJob.java

private void createTables(final Connection conn, final boolean useView) throws SQLException {
    final String createAccess = "CREATE TABLE " + "Access(url VARCHAR(100) NOT NULL,"
            + " referrer VARCHAR(100)," + " time BIGINT NOT NULL," + " PRIMARY KEY (url, time))";
    final String createPageview = "CREATE TABLE " + "Pageview(url VARCHAR(100) NOT NULL,"
            + " pageview BIGINT NOT NULL," + " PRIMARY KEY (url))";

    Statement st = conn.createStatement();
    try {//  ww  w  .j  a  v  a 2 s  . c  o  m
        st.executeUpdate(createAccess);
        if (!useView) {
            st.executeUpdate(createPageview);
        }
        conn.commit();
    } finally {
        st.close();
    }
}

From source file:com.china317.gmmp.gmmp_report_analysis.App.java

private static void PtmOverSpeedRecordsStoreIntoDB(Map<String, PtmOverSpeed> overSpeedRecords,
        ApplicationContext context) {// w  w w. ja  v a 2s.c  om
    // INSERT INTO
    // TAB_ALARM_OVERSPEED(LICENCE,BEGIN_TIME,END_TIME,SPEED,IS_END,AREA)
    // SELECT LICENSE,BEGINTIME,ENDTIME,AVGSPEED,'1',FLAG FROM
    // ALARMOVERSPEED_REA WHERE BUSINESSTYPE = '2'
    String sql = "";
    Connection conn = null;
    try {
        SqlMapClient sc = (SqlMapClient) context.getBean("sqlMapClientPtm");
        conn = sc.getDataSource().getConnection();
        conn.setAutoCommit(false);
        Statement st = conn.createStatement();
        Iterator<String> it = overSpeedRecords.keySet().iterator();
        while (it.hasNext()) {
            String key = it.next();
            PtmOverSpeed pos = overSpeedRecords.get(key);
            sql = "insert into TAB_ALARM_OVERSPEED " + " (LICENCE,BEGIN_TIME,END_TIME,SPEED,IS_END,AREA) "
                    + " values ('" + pos.getLicense() + "','" + pos.getBeginTime() + "','" + pos.getEndTIme()
                    + "'," + pos.getAvgSpeed() + "," + "1" + "," + pos.getFlag() + ")";
            log.info(sql);
            st.addBatch(sql);
        }
        st.executeBatch();
        conn.commit();
        log.info("[insertIntoDB OverSpeed success!!!]");
    } catch (Exception e) {
        e.printStackTrace();
        log.error(sql);
    } finally {
        overSpeedRecords.clear();
        if (conn != null) {
            try {
                conn.close();
            } catch (SQLException e) {
                e.printStackTrace();
            }
        }
    }

}

From source file:org.opencron.server.dao.HibernateDao.java

@Transactional(readOnly = false)
public void executeBatch(final String[] sqlList) {
    getSession().doWork(new Work() {

        public void execute(Connection connection) throws SQLException {
            connection.setAutoCommit(false);
            Statement stmt = connection.createStatement();
            for (String sql : sqlList) {
                stmt.addBatch(sql);//from w  w w .j  ava  2s  .c  o m
            }
            stmt.executeBatch();
            connection.commit();
        }
    });
}

From source file:edu.umd.cs.submitServer.servlets.UploadSubmission.java

public static Submission uploadSubmission(Project project, StudentRegistration studentRegistration,
        byte[] zipOutput, HttpServletRequest request, Timestamp submissionTimestamp, String clientTool,
        String clientVersion, String cvsTimestamp, SubmitServerDatabaseProperties db, Logger log)
        throws ServletException, IOException {

    Connection conn;
    try {/*from w  w w  . j  av a 2s  .  com*/
        conn = db.getConnection();
    } catch (SQLException e) {
        throw new ServletException(e);
    }
    Submission submission = null;
    boolean transactionSuccess = false;

    try {
        Integer baselinePK = project.getArchivePK();
        int testSetupPK = project.getTestSetupPK();
        byte baseLineSubmission[] = null;
        if (baselinePK != null && baselinePK.intValue() != 0) {
            baseLineSubmission = project.getBaselineZip(conn);
        } else if (testSetupPK != 0) {
            baseLineSubmission = Submission.lookupCanonicalSubmissionArchive(project.getProjectPK(), conn);
        }
        zipOutput = FixZip.adjustZipNames(baseLineSubmission, zipOutput);

        int archivePK = Submission.uploadSubmissionArchive(zipOutput, conn);

        synchronized (UPLOAD_LOCK) {
            final int NUMBER_OF_ATTEMPTS = 2;
            int attempt = 1;
            while (true) {
                try {
                    conn.setAutoCommit(false);
                    conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);

                    submission = Submission.submit(archivePK, studentRegistration, project, cvsTimestamp,
                            clientTool, clientVersion, submissionTimestamp, conn);

                    conn.commit();
                    transactionSuccess = true;
                    break;
                } catch (SQLException e) {
                    conn.rollback();

                    if (attempt++ >= NUMBER_OF_ATTEMPTS) {
                        Submission.deleteAbortedSubmissionArchive(archivePK, conn);
                        throw e;
                    }

                }
            }
        }

    } catch (SQLException e) {
        throw new ServletException(e);
    } finally {
        rollbackIfUnsuccessfulAndAlwaysReleaseConnection(transactionSuccess, request, conn, db, log);
    }
    logSubmission(studentRegistration, zipOutput, submission);

    if (submission.getBuildStatus() == Submission.BuildStatus.NEW)
        WaitingBuildServer.offerSubmission(project, submission);
    return submission;
}

From source file:azkaban.db.DatabaseSetup.java

private void runTableScripts(final Connection conn, final String table) throws IOException, SQLException {
    logger.info("Creating new table " + table);

    final String dbSpecificScript = "create." + table + ".sql";
    final File script = new File(this.scriptPath, dbSpecificScript);
    BufferedInputStream buff = null;
    try {/*www.java2s. c o m*/
        buff = new BufferedInputStream(new FileInputStream(script));
        final String queryStr = IOUtils.toString(buff);
        final String[] splitQuery = queryStr.split(";\\s*\n");
        final QueryRunner runner = new QueryRunner();
        for (final String query : splitQuery) {
            runner.update(conn, query);
        }
        conn.commit();
    } finally {
        IOUtils.closeQuietly(buff);
    }
}

From source file:com.saasovation.common.port.adapter.persistence.eventsourcing.mysql.MySQLJDBCEventStore.java

@Override
public EventStream fullEventStreamFor(EventStreamId anIdentity) {

    Connection connection = this.connection();

    ResultSet result = null;//  ww w. ja  va  2  s . c  o  m

    try {
        PreparedStatement statement = connection
                .prepareStatement("SELECT stream_version, event_type, event_body FROM tbl_es_event_store "
                        + "WHERE stream_name = ? " + "ORDER BY stream_version");

        statement.setString(1, anIdentity.streamName());

        result = statement.executeQuery();

        connection.commit();

        return this.buildEventStream(result);

    } catch (Throwable t) {
        throw new EventStoreException("Cannot query full event stream for: " + anIdentity.streamName()
                + " because: " + t.getMessage(), t);
    } finally {
        if (result != null) {
            try {
                result.close();
            } catch (SQLException e) {
                // ignore
            }
        }
        try {
            connection.close();
        } catch (SQLException e) {
            // ignore
        }
    }
}

From source file:com.tera.common.database.query.CQueryService.java

@Override
public <T> boolean batchUpdate(String batchUpdate, BatchUpdateQuery<T> query, String errorMessage,
        boolean autoCommit) {
    Connection connection = null;
    PreparedStatement statement = null;

    try {/*  www  . j  av a 2  s  .co  m*/
        connection = databaseFactory.getConnection();
        statement = connection.prepareStatement(batchUpdate);
        connection.setAutoCommit(autoCommit);

        Collection<T> items = query.getItems();
        for (T item : items) {
            query.handleBatch(statement, item);
            statement.addBatch();
        }
        statement.executeBatch();

        if (!autoCommit) {
            connection.commit();
        }
    } catch (Exception e) {
        if (errorMessage == null)
            log.error("Failed to execute BatchUpdate query {}", e, e);
        else
            log.error(errorMessage + " " + e, e);
        return false;
    } finally {

        close(null, statement, connection);
    }
    return true;
}

From source file:com.claim.controller.FileTransferController.java

public int createLogFileTransfer(ObjFileTransfer ftpObj) {
    Connection connection = null;
    int exec = 0;
    try {/* w w w.java2  s.  co  m*/
        connection = new DBManage().open();
        FileTransferDao ftpDao = new FileTransferDao();
        ftpDao.setConnection(connection);

        exec = ftpDao.createLog(ftpObj);

    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        if (connection != null) {
            try {
                connection.commit();
                connection.close();
            } catch (SQLException ex) {
                Logger.getLogger(InitProgramController.class.getName()).log(Level.SEVERE, null, ex);
            }
        }
    }
    return exec;
}

From source file:iudex.da.ContentUpdater.java

/**
 * Update first any content REFERENCES and then the content itself.
 *//*from   www.  j a  va 2 s . co m*/
public void update(UniMap content) throws SQLException {
    Connection conn = dataSource().getConnection();
    try {
        conn.setAutoCommit(false);
        conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
        //FIXME: Correct isolation?

        List<UniMap> refs = content.get(ContentKeys.REFERENCES);
        if (refs != null) {
            update(refs, conn);
        }

        UniMap referer = content.get(ContentKeys.REFERER);
        if (referer != null) {
            //FIXME: Really sufficient as same path as content?
            update(referer, conn);
        }

        update(content, conn);

        conn.commit();
    } finally {
        if (conn != null)
            conn.close();
    }
}

From source file:dk.netarkivet.archive.arcrepositoryadmin.ReplicaCacheHelpers.java

/**
 * Method for updating the checksum status of a replicafileinfo instance.
 * Updates the following fields for the entry in the replicafileinfo:
 * <br/> checksum_status = UNKNOWN.
 * <br/> checksum_checkdatetime = current time.
 *
 * The replicafileinfo is in the filelist.
 *
 * @param replicafileinfoId The id of the replicafileinfo.
 * @param con An open connection to the archive database
 *///  w  w w.j a  v  a 2 s  . c  o  m
protected static void updateReplicaFileInfoChecksumUnknown(long replicafileinfoId, Connection con) {
    PreparedStatement statement = null;
    try {
        // The SQL statement
        final String sql = "UPDATE replicafileinfo SET checksum_status = ?, " + "checksum_checkdatetime = ? "
                + "WHERE replicafileinfo_guid = ?";

        Date now = new Date(Calendar.getInstance().getTimeInMillis());

        // complete the SQL statement.
        statement = DBUtils.prepareStatement(con, sql, ChecksumStatus.UNKNOWN.ordinal(), now,
                replicafileinfoId);

        // execute the SQL statement
        statement.executeUpdate();
        con.commit();
    } catch (Exception e) {
        String msg = "Problems updating the replicafileinfo.";
        log.warn(msg);
        throw new IOFailure(msg, e);
    } finally {
        DBUtils.closeStatementIfOpen(statement);
    }
}