Example usage for java.sql Connection commit

List of usage examples for java.sql Connection commit

Introduction

In this page you can find the example usage for java.sql Connection commit.

Prototype

void commit() throws SQLException;

Source Link

Document

Makes all changes made since the previous commit/rollback permanent and releases any database locks currently held by this Connection object.

Usage

From source file:fitmon.WorkoutData.java

public void addData(String workout, String intensity, int minutes, double calories, String date, int userId)
        throws IOException, NoSuchAlgorithmException, InvalidKeyException, JSONException, SQLException,
        ClassNotFoundException {//from  w w w . ja v  a  2s.co  m

    //ArrayList arr = new ArrayList(al);
    PreparedStatement st = null;
    Connection conn = null;

    try {
        Class.forName("com.mysql.jdbc.Driver");
        conn = (Connection) DriverManager.getConnection("jdbc:mysql://localhost:3306/fitmon", "root",
                "april-23");
        String query = "INSERT into workout (type,calories,date,intensity,duration,userId) values (?,?,?,?,?,?);";
        st = conn.prepareStatement(query);
        conn.setAutoCommit(false);

        //st.setInt(1,7);
        st.setString(1, workout);
        st.setDouble(2, calories);
        st.setString(3, date);
        st.setString(4, intensity);
        st.setInt(5, minutes);
        st.setInt(6, userId);
        st.addBatch();
        st.executeBatch();

        conn.commit();
        System.out.println("Record is inserted into workout table!");

        st.close();
        conn.close();

    } catch (SQLException e) {

        System.out.println(e.getMessage());
        conn.rollback();
    } finally {

        if (st != null) {
            st.close();
        }

        if (conn != null) {
            conn.close();
        }

    }

}

From source file:com.netflix.metacat.usermetadata.mysql.MySqlLookupService.java

/**
 * Saves the lookup value./*from www  .j  av a 2 s. c  o  m*/
 * @param name lookup name
 * @param values multiple values
 * @return returns the lookup with the given name.
 */
@Override
public Lookup addValues(final String name, final Set<String> values) {
    Lookup lookup = null;
    try {
        final Connection conn = getDataSource().getConnection();
        try {
            lookup = findOrCreateLookupByName(name, conn);
            Set<String> inserts = Sets.newHashSet();
            final Set<String> lookupValues = lookup.getValues();
            if (lookupValues == null || lookupValues.isEmpty()) {
                inserts = values;
                lookup.setValues(values);
            } else {
                inserts = Sets.difference(values, lookupValues);
            }
            if (!inserts.isEmpty()) {
                insertLookupValues(lookup.getId(), inserts, conn);
            }
            conn.commit();
        } catch (SQLException e) {
            conn.rollback();
            throw e;
        } finally {
            conn.close();
        }
    } catch (SQLException e) {
        final String message = String.format("Failed to set the lookup values for name %s", name);
        log.error(message, e);
        throw new UserMetadataServiceException(message, e);
    }
    return lookup;
}

From source file:com.china317.gmmp.gmmp_report_analysis.App.java

private static void FatigueRecordsStoreIntoDB(Map<String, FatigueAlarmEntity> map, ApplicationContext context) {
    /**/*from   w  ww .  j  av a2s .c  o  m*/
     * INSERT INTO TAB_GPSEVENT_FATIGUE SELECT
     * LICENCE,'',ALARMSTARTTIME,ALARMENDTIME,'' FROM ALARMFATIGUE_REA
     */
    Connection conn = null;
    String sql = "";
    try {
        log.info("--------store Fatigue");
        SqlMapClient sc = (SqlMapClient) context.getBean("sqlMapClientDgm");
        conn = sc.getDataSource().getConnection();
        conn.setAutoCommit(false);
        Statement st = conn.createStatement();
        Iterator<String> it = map.keySet().iterator();
        while (it.hasNext()) {
            String key = it.next();
            FatigueAlarmEntity pos = map.get(key);
            sql = "insert into TAB_GPSEVENT_FATIGUE "
                    + " (license,license_color,start_time,end_time,pointcount) " + " values (" + "'"
                    + pos.getLicence() + "'," + "'" + pos.getLicenceColor() + "'," + "'"
                    + pos.getAlarmStartTime() + "'," + "'" + pos.getAlarmEndTime() + "'," + "'"
                    + pos.getPointCount() + "')";
            log.info(sql);
            st.addBatch(sql);
        }
        st.executeBatch();
        conn.commit();
        log.info("[insertIntoDB FatigueAlarmEntity success!!!]");
    } catch (Exception e) {
        log.info(e);
        log.error(sql);
    } finally {
        DgmAnalysisImp.getInstance().getFatigueMap().clear();
        if (conn != null) {
            try {
                conn.close();
            } catch (SQLException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.octo.captcha.engine.bufferedengine.buffer.DatabaseCaptchaBuffer.java

/**
 * Get all the locales used//from  ww  w  .j  a va 2s  .  co m
 */
public Collection getLocales() {
    Connection con = null;
    PreparedStatement ps = null;
    ResultSet rs = null;
    Set set = new HashSet();

    try {
        con = datasource.getConnection();
        ps = con.prepareStatement("select distinct " + localeColumn + " from " + table);
        rs = ps.executeQuery();
        while (rs.next()) {
            set.add(rs.getString(1));
        }
        rs.close();
        con.commit();
    } catch (SQLException e) {
        log.error(DB_ERROR, e);
        if (rs != null) {
            try {
                rs.close();
            } catch (SQLException ex) {
            }
        }
    } finally {
        if (ps != null) {
            try {
                ps.close();
            } catch (SQLException e) {
            }
        }
        if (con != null) {
            try {
                con.close();
            } catch (SQLException e) {
            }
        }
    }

    return set;
}

From source file:org.ulyssis.ipp.processor.Processor.java

private void registerInitialTags() {
    Snapshot oldSnapshot = this.snapshot;
    Connection connection = null;
    try {/*  w  w w  .j av  a  2 s  .  c  o  m*/
        connection = Database.createConnection(EnumSet.of(READ_WRITE));
        for (Team team : Config.getCurrentConfig().getTeams()) {
            for (TagId tag : team.getTags()) {
                AddTagEvent e = new AddTagEvent(Instant.EPOCH, tag, team.getTeamNb());
                e.save(connection);
                this.snapshot = e.apply(this.snapshot);
                this.snapshot.save(connection);
            }
        }
        connection.commit();
    } catch (SQLException e) {
        LOG.error("An error occurred when registering initial tags!", e);
        this.snapshot = oldSnapshot;
        try {
            if (connection != null) {
                connection.rollback();
            }
        } catch (SQLException e2) {
            LOG.error("Error in rollback after previous error", e2);
        }
    } finally {
        if (connection != null) {
            try {
                connection.close();
            } catch (SQLException e) {
                LOG.error("Error while closing connection", e);
            }
        }
    }
}

From source file:com.cloudera.sqoop.manager.MySQLAuthTest.java

@Before
public void setUp() {
    super.setUp();
    SqoopOptions options = new SqoopOptions(AUTH_CONNECT_STRING, AUTH_TABLE_NAME);
    options.setUsername(AUTH_TEST_USER);
    options.setPassword(AUTH_TEST_PASS);

    LOG.debug("Setting up another MySQLAuthTest: " + AUTH_CONNECT_STRING);

    manager = new DirectMySQLManager(options);

    Connection connection = null;
    Statement st = null;//  w ww.  j a  va 2 s .  c o  m

    try {
        connection = manager.getConnection();
        connection.setAutoCommit(false);
        st = connection.createStatement();

        // create the database table and populate it with data.
        st.executeUpdate("DROP TABLE IF EXISTS " + AUTH_TABLE_NAME);
        st.executeUpdate("CREATE TABLE " + AUTH_TABLE_NAME + " ("
                + "id INT NOT NULL PRIMARY KEY AUTO_INCREMENT, " + "name VARCHAR(24) NOT NULL)");

        st.executeUpdate("INSERT INTO " + AUTH_TABLE_NAME + " VALUES(" + "NULL,'Aaron')");
        connection.commit();
    } catch (SQLException sqlE) {
        LOG.error("Encountered SQL Exception: " + sqlE);
        sqlE.printStackTrace();
        fail("SQLException when running test setUp(): " + sqlE);
    } finally {
        try {
            if (null != st) {
                st.close();
            }

            if (null != connection) {
                connection.close();
            }
        } catch (SQLException sqlE) {
            LOG.warn("Got SQLException when closing connection: " + sqlE);
        }
    }
}

From source file:com.cloudera.sqoop.TestIncrementalImport.java

/**
 * Create a table with an 'id' column full of integers.
 *//*from ww w. ja  v  a  2s. co  m*/
private void createIdTable(String tableName, int insertRows) throws SQLException {
    SqoopOptions options = new SqoopOptions();
    options.setConnectString(SOURCE_DB_URL);
    HsqldbManager manager = new HsqldbManager(options);
    Connection c = manager.getConnection();
    PreparedStatement s = null;
    try {
        s = c.prepareStatement("CREATE TABLE " + tableName + "(id INT NOT NULL)");
        s.executeUpdate();
        c.commit();
        insertIdRows(tableName, 0, insertRows);
    } finally {
        s.close();
    }
}

From source file:com.streamsets.pipeline.lib.jdbc.JdbcMultiRowRecordWriter.java

/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override/*ww  w. ja va2  s.co m*/
public List<OnRecordErrorException> writeBatch(Collection<Record> batch) throws StageException {
    List<OnRecordErrorException> errorRecords = new LinkedList<>();
    Connection connection = null;
    try {
        connection = getDataSource().getConnection();

        // Since we are doing multi-row inserts we have to partition the batch into groups of the same
        // set of fields. We'll also sort each partition for optimal inserts into column stores.
        Multimap<Long, Record> partitions = partitionBatch(batch);

        for (Long partitionKey : partitions.keySet()) {
            processPartition(connection, partitions, partitionKey, errorRecords);
        }
    } catch (SQLException e) {
        handleSqlException(e);
    } finally {
        if (connection != null) {
            try {
                connection.commit();
                connection.close();
            } catch (SQLException e) {
                handleSqlException(e);
            }
        }
    }
    return errorRecords;
}

From source file:com.cloudera.sqoop.manager.MySQLAuthTest.java

public void doZeroTimestampTest(int testNum, boolean expectSuccess, String connectString)
        throws IOException, SQLException {

    LOG.info("Beginning zero-timestamp test #" + testNum);

    try {/*w  w w . ja v  a2  s .c  o m*/
        final String TABLE_NAME = "mysqlTimestampTable" + Integer.toString(testNum);

        // Create a table containing a full-zeros timestamp.
        SqoopOptions options = new SqoopOptions(connectString, TABLE_NAME);
        options.setUsername(AUTH_TEST_USER);
        options.setPassword(AUTH_TEST_PASS);

        manager = new DirectMySQLManager(options);

        Connection connection = null;
        Statement st = null;

        connection = manager.getConnection();
        connection.setAutoCommit(false);
        st = connection.createStatement();

        // create the database table and populate it with data.
        st.executeUpdate("DROP TABLE IF EXISTS " + TABLE_NAME);
        st.executeUpdate("CREATE TABLE " + TABLE_NAME + " (" + "id INT NOT NULL PRIMARY KEY AUTO_INCREMENT, "
                + "ts TIMESTAMP NOT NULL)");

        st.executeUpdate("INSERT INTO " + TABLE_NAME + " VALUES(" + "NULL,'0000-00-00 00:00:00.0')");
        connection.commit();
        st.close();
        connection.close();

        // Run the import.
        String[] argv = getArgv(true, false, connectString, TABLE_NAME);
        try {
            runImport(argv);
        } catch (Exception e) {
            if (expectSuccess) {
                // This is unexpected. rethrow.
                throw new RuntimeException(e);
            } else {
                // We expected an error.
                LOG.info("Got exception running import (expected). msg: " + e);
            }
        }

        // Make sure the result file is there.
        Path warehousePath = new Path(this.getWarehouseDir());
        Path tablePath = new Path(warehousePath, TABLE_NAME);
        Path filePath = new Path(tablePath, "part-m-00000");

        File f = new File(filePath.toString());
        if (expectSuccess) {
            assertTrue("Could not find imported data file", f.exists());
            BufferedReader r = new BufferedReader(new InputStreamReader(new FileInputStream(f)));
            assertEquals("1,null", r.readLine());
            IOUtils.closeStream(r);
        } else {
            assertFalse("Imported data when expected failure", f.exists());
        }
    } finally {
        LOG.info("Finished zero timestamp test #" + testNum);
    }
}

From source file:org.opencron.server.dao.HibernateDao.java

@Transactional(readOnly = false)
public void executeBatch(final String sql, final Object[]... parameters) {
    getSession().doWork(new Work() {

        public void execute(Connection connection) throws SQLException {
            connection.setAutoCommit(false);
            PreparedStatement stmt = connection.prepareStatement(sql);
            for (Object[] arr : parameters) {
                int i = 1;
                for (Object p : arr) {
                    stmt.setObject(i++, p);
                }// w  ww.  jav  a  2 s  .c om
                stmt.addBatch();
            }
            stmt.executeBatch();
            connection.commit();
        }
    });
}