Example usage for java.sql Connection commit

List of usage examples for java.sql Connection commit

Introduction

In this page you can find the example usage for java.sql Connection commit.

Prototype

void commit() throws SQLException;

Source Link

Document

Makes all changes made since the previous commit/rollback permanent and releases any database locks currently held by this Connection object.

Usage

From source file:com.healthcit.cacure.utils.DBSchemaUpdater.java

@Override
public void afterPropertiesSet() throws Exception {
    Connection connection = DataSourceUtils.getConnection(dataSource);
    connection.setAutoCommit(false);/*from   w  w  w.java  2 s  . co  m*/
    try {
        Statement statement = connection.createStatement();
        try {
            long version = 0;
            try {
                ResultSet rs = statement.executeQuery("select schema_version from sys_variables limit 1;");
                try {
                    if (!rs.next()) {
                        throw new RuntimeException("Seems there is no any row in sys_variables table.");
                    }
                    version = rs.getLong(1);
                } finally {
                    rs.close();
                }
            } catch (PSQLException e) {
                //               it's needed for executing more scripts successfully
                connection.rollback();
                log.info("Can't find sys_variables tables. Appling initial script.");
                String initialScriptStatements = getStatementsFor(0);
                if (initialScriptStatements == null) {
                    throw new RuntimeException("Can't find initial script.");
                }
                statement.executeUpdate(initialScriptStatements);
                //there is already schema_version at 0
                connection.commit();
                log.info("Initial script succesfully executed.");
            }
            for (long v = version + 1;; v++) {
                String statements = getStatementsFor(v);
                if (statements == null) {
                    break;
                }
                log.info("Updating schema to " + v + " version...");
                statement.execute(statements);
                statement.executeUpdate("update sys_variables set schema_version = " + v + ";");
                connection.commit();
                log.info("OK");
            }
        } catch (BatchUpdateException e) {
            if (e.getNextException() != null) {
                e.getNextException().printStackTrace();
            }
            e.printStackTrace();
        } catch (Exception e) {
            e.printStackTrace();
            connection.rollback();
        } finally {
            statement.close();
        }
    } finally {
        DataSourceUtils.releaseConnection(connection, dataSource);
    }
}

From source file:eionet.cr.dao.virtuoso.VirtuosoHarvestScriptDAO.java

/**
 * @see eionet.cr.dao.HarvestScriptDAO#insert(eionet.cr.dto.HarvestScriptDTO.TargetType, java.lang.String, java.lang.String,
 *      java.lang.String, boolean, boolean, Phase)
 *///  w ww. ja va  2  s  .  c om
@Override
public int insert(TargetType targetType, String targetUrl, String title, String script, boolean active,
        boolean runOnce, Phase phase) throws DAOException {

    String sourceUrl = targetType != null && targetType.equals(TargetType.SOURCE) ? targetUrl : null;
    String typeUrl = targetType != null && targetType.equals(TargetType.TYPE) ? targetUrl : null;

    if (phase == null) {
        phase = HarvestScriptDTO.DEFAULT_PHASE;
    }

    Connection conn = null;
    try {
        conn = getSQLConnection();
        conn.setAutoCommit(false);

        ArrayList<Object> values = new ArrayList<Object>();
        values.add(sourceUrl == null ? "" : sourceUrl);
        values.add(typeUrl == null ? "" : typeUrl);

        Object o = SQLUtil.executeSingleReturnValueQuery(GET_LAST_POSITION_SQL, values, conn);
        int position = o == null ? 1 : Integer.parseInt(o.toString()) + 1;

        values = new ArrayList<Object>();
        values.add(sourceUrl);
        values.add(typeUrl);
        values.add(title);
        values.add(script);
        values.add(Integer.valueOf(position));
        values.add(YesNoBoolean.format(active));
        values.add(YesNoBoolean.format(runOnce));
        values.add(phase.name());

        int result = SQLUtil.executeUpdateReturnAutoID(INSERT_SQL, values, conn);
        conn.commit();
        return result;
    } catch (Exception e) {
        SQLUtil.rollback(conn);
        throw new DAOException(e.getMessage(), e);
    } finally {
        SQLUtil.close(conn);
    }
}

From source file:azkaban.project.JdbcProjectLoader.java

@Override
public void uploadFlows(Project project, int version, Collection<Flow> flows) throws ProjectManagerException {
    // We do one at a time instead of batch... because well, the batch could be
    // large.// w  w  w .j  ava2s  .  c om
    logger.info("Uploading flows");
    Connection connection = getConnection();

    try {
        for (Flow flow : flows) {
            uploadFlow(connection, project, version, flow, defaultEncodingType);
        }
        connection.commit();
    } catch (IOException e) {
        throw new ProjectManagerException("Flow Upload failed.", e);
    } catch (SQLException e) {
        throw new ProjectManagerException("Flow Upload failed.", e);
    } finally {
        DbUtils.closeQuietly(connection);
    }
}

From source file:azkaban.project.JdbcProjectLoader.java

@Override
public void uploadProjectProperties(Project project, List<Props> properties) throws ProjectManagerException {
    Connection connection = getConnection();

    try {/*ww  w . ja  v  a 2s .  co m*/
        for (Props props : properties) {
            uploadProjectProperty(connection, project, props.getSource(), props);
        }
        connection.commit();
    } catch (SQLException e) {
        throw new ProjectManagerException("Error uploading project property files", e);
    } catch (IOException e) {
        throw new ProjectManagerException("Error uploading project property files", e);
    } finally {
        DbUtils.closeQuietly(connection);
    }
}

From source file:azkaban.executor.JdbcExecutorLoader.java

@Override
public void uploadAttachmentFile(ExecutableNode node, File file) throws ExecutorManagerException {
    Connection connection = getConnection();
    try {/*from  w ww . j  a v a2 s.c o  m*/
        uploadAttachmentFile(connection, node, file, defaultEncodingType);
        connection.commit();
    } catch (SQLException e) {
        throw new ExecutorManagerException("Error committing attachments ", e);
    } catch (IOException e) {
        throw new ExecutorManagerException("Error uploading attachments ", e);
    } finally {
        DbUtils.closeQuietly(connection);
    }
}

From source file:com.cloudera.sqoop.manager.CubridManagerImportTest.java

public void setUpData(String tableName, boolean nullEntry) {
    SqoopOptions options = new SqoopOptions(CubridTestUtils.getConnectString(), tableName);
    options.setUsername(CubridTestUtils.getCurrentUser());
    options.setPassword(CubridTestUtils.getPassword());

    LOG.debug("Setting up another CubridImport test: " + CubridTestUtils.getConnectString());

    manager = new CubridManager(options);

    Connection connection = null;
    Statement st = null;/*from w  w w.ja  v a2s .  c o  m*/

    try {
        connection = manager.getConnection();
        connection.setAutoCommit(false);
        st = connection.createStatement();

        // create the database table and populate it with data.
        st.executeUpdate("DROP TABLE IF EXISTS " + tableName);
        st.executeUpdate("CREATE TABLE " + tableName + " (" + manager.escapeColName("id")
                + " INT NOT NULL PRIMARY KEY, " + manager.escapeColName("name") + " VARCHAR(24) NOT NULL, "
                + manager.escapeColName("start_date") + " DATE, " + manager.escapeColName("Salary") + " FLOAT, "
                + manager.escapeColName("dept") + " VARCHAR(32));");

        st.executeUpdate(
                "INSERT INTO " + tableName + " VALUES(1,'Aaron','2009-05-14'," + "1000000.00,'engineering');");
        st.executeUpdate("INSERT INTO " + tableName + " VALUES(2,'Bob','2009-04-20',400.00,'sales');");
        st.executeUpdate("INSERT INTO " + tableName + " VALUES(3,'Fred','2009-01-23'," + "15.00,'marketing');");
        if (nullEntry) {
            st.executeUpdate("INSERT INTO " + tableName + " VALUES(4,'Mike',NULL,NULL,NULL);");
        }

        connection.commit();
    } catch (SQLException sqlE) {
        LOG.error("Encountered SQL Exception: " + sqlE);
        sqlE.printStackTrace();
        fail("SQLException when running test setUp(): " + sqlE);
    } finally {
        try {
            if (null != st) {
                st.close();
            }

            if (null != connection) {
                connection.close();
            }
        } catch (SQLException sqlE) {
            LOG.warn("Got SQLException when closing connection: " + sqlE);
        }
    }
}

From source file:azkaban.executor.JdbcExecutorLoader.java

@Override
public void uploadLogFile(int execId, String name, int attempt, File... files) throws ExecutorManagerException {
    Connection connection = getConnection();
    try {/*from www. j  a va  2 s  . c  om*/
        uploadLogFile(connection, execId, name, attempt, files, defaultEncodingType);
        connection.commit();
    } catch (SQLException e) {
        throw new ExecutorManagerException("Error committing log", e);
    } catch (IOException e) {
        throw new ExecutorManagerException("Error committing log", e);
    } finally {
        DbUtils.closeQuietly(connection);
    }
}

From source file:com.uber.stream.kafka.chaperone.collector.reporter.DbAuditReporter.java

@Override
public void report(String sourceTopic, int recordPartition, long recordOffset, JSONObject record)
        throws InterruptedException {
    if (!aggregator.addRecord(sourceTopic, recordPartition, recordOffset, record)) {
        return;/*from   w  ww .  j av a  2s  .c o m*/
    }

    // aggregator aggregates is the fast path. only account for database report latency
    final Timer.Context timerCtx = DB_REPORT_LATENCY_TIMER.time();
    try {
        Map<Long, Map<String, TimeBucket>> buffer = aggregator.getAndResetBuffer();
        Map<String, Map<Integer, Long>> topicOffsetsMap = aggregator.getOffsets();
        logger.debug("Reporting the buffered auditMsgs={} and offsets={}", buffer, topicOffsetsMap);

        int retryTimes = 1;
        // retry until done successfully, backpressure is imposed to kafka consumers via Disruptor.
        while (true) {
            Connection conn = null;
            PreparedStatement insertMetricsStmt = null;
            PreparedStatement selectMetricsStmt = null;
            PreparedStatement updateMetricsStmt = null;
            PreparedStatement offsetInsertStmt = null;

            try {
                conn = getConnection();
                conn.setAutoCommit(false);

                insertMetricsStmt = conn.prepareStatement(String.format(INSERT_METRICS_SQL, dataTableName));
                selectMetricsStmt = conn.prepareStatement(String.format(SELECT_METRICS_SQL, dataTableName));
                updateMetricsStmt = conn.prepareStatement(String.format(UPDATE_METRICS_SQL, dataTableName));

                offsetInsertStmt = conn.prepareStatement(String.format(INSERT_OFFSET_SQL, offsetTableName));

                addOrUpdateOffsets(offsetInsertStmt, topicOffsetsMap);

                addOrUpdateRecord(selectMetricsStmt, updateMetricsStmt, insertMetricsStmt, buffer);

                conn.commit();
                return;
            } catch (Exception e) {
                int sleepInMs = Math.max(500, Math.min(60000, retryTimes * 500));
                logger.warn(String.format("Got exception to insert buckets=%d, retryTimes=%d, sleepInMs=%d",
                        buffer.size(), retryTimes++, sleepInMs), e);
                int count = 0;
                for (Map<String, TimeBucket> buckets : buffer.values()) {
                    count += buckets.size();
                }
                FAILED_TO_REPORT_COUNTER.mark(count);
                rollback(conn);
                Thread.sleep(sleepInMs);
            } finally {
                closeStatement(offsetInsertStmt);
                closeStatement(insertMetricsStmt);
                closeStatement(updateMetricsStmt);
                closeStatement(selectMetricsStmt);
                closeConnection(conn);
            }
        }
    } finally {
        timerCtx.stop();
    }
}

From source file:ca.sqlpower.matchmaker.address.AddressPool.java

public void clear() throws SQLException {
    SQLTable resultTable = project.getResultTable();
    Connection con = null;
    Statement stmt = null;/*from  w  w  w. j a  v a 2 s.  c o  m*/

    try {
        con = project.createResultTableConnection();
        stmt = con.createStatement();

        con.setAutoCommit(false);
        String sql = "DELETE FROM " + DDLUtils.toQualifiedName(resultTable) + " WHERE 1=1";
        stmt.execute(sql);
        con.commit();
    } catch (Exception ex) {
        if (con != null) {
            con.rollback();
        }
        if (ex instanceof SQLException) {
            throw (SQLException) ex;
        } else {
            throw new RuntimeException("An unexpected error occured while clearing the Address Pool", ex);
        }
    } finally {
        if (stmt != null)
            stmt.close();
        if (con != null)
            con.close();
    }

    addresses.clear();
}