List of usage examples for java.sql Connection commit
void commit() throws SQLException;
Connection
object. From source file:com.cloudera.sqoop.testutil.HsqldbTestServer.java
public void createEmployeeDemo() throws SQLException, ClassNotFoundException { Class.forName(DRIVER_CLASS);/*from w w w.j ava 2 s. c om*/ Connection connection = null; Statement st = null; try { connection = getConnection(); st = connection.createStatement(); st.executeUpdate("DROP TABLE " + EMPLOYEE_TABLE_NAME + " IF EXISTS"); st.executeUpdate( "CREATE TABLE " + EMPLOYEE_TABLE_NAME + "(emp_id INT NOT NULL PRIMARY KEY, name VARCHAR(64))"); st.executeUpdate("INSERT INTO " + EMPLOYEE_TABLE_NAME + " VALUES(1, 'Aaron')"); st.executeUpdate("INSERT INTO " + EMPLOYEE_TABLE_NAME + " VALUES(2, 'Joe')"); st.executeUpdate("INSERT INTO " + EMPLOYEE_TABLE_NAME + " VALUES(3, 'Jim')"); st.executeUpdate("INSERT INTO " + EMPLOYEE_TABLE_NAME + " VALUES(4, 'Lisa')"); connection.commit(); } finally { if (null != st) { st.close(); } if (null != connection) { connection.close(); } } }
From source file:dk.netarkivet.archive.arcrepositoryadmin.ReplicaCacheHelpers.java
/** * Method to create a new entry in the file table in the database. * The file_id is automatically created by the database, and the argument * is used for the filename for this new entry to the table. * This will also create a replicafileinfo entry for each replica. * * @param filename The filename for the new entry in the file table. * @param connection An open connection to the archive database * @throws IllegalState If the file cannot be inserted into the database. * @return created file_id for the new entry. *///ww w.j a va 2 s . co m protected static long insertFileIntoDB(String filename, Connection connection) throws IllegalState { log.debug("Insert file '" + filename + "' into database"); PreparedStatement statement = null; try { // Make the SQL statement for putting the replica into the database // and insert the variables for the entry to the replica table. statement = connection.prepareStatement("INSERT INTO file (filename) " + "VALUES ( ? )", Statement.RETURN_GENERATED_KEYS); statement.setString(1, filename); // execute the SQL statement statement.executeUpdate(); // Retrieve the fileId for the just inserted file. ResultSet resultset = statement.getGeneratedKeys(); resultset.next(); long fileId = resultset.getLong(1); connection.commit(); // Create replicafileinfo for each replica. createReplicaFileInfoEntriesInDB(fileId, connection); log.debug("Insert file '" + filename + "' into database completed. Assigned fileID=" + fileId); return fileId; } catch (SQLException e) { throw new IllegalState("Cannot add file '" + filename + "' to the database.", e); } finally { DBUtils.closeStatementIfOpen(statement); } }
From source file:edu.clemson.cs.nestbed.server.adaptation.sql.ProgramSymbolSqlAdapter.java
public ProgramSymbol deleteProgramSymbol(int id) throws AdaptationException { ProgramSymbol programSymbol = null;/*from w w w . jav a2 s .com*/ Connection connection = null; Statement statement = null; ResultSet resultSet = null; try { String query = "SELECT * FROM ProgramSymbols " + "WHERE id = " + id; connection = DriverManager.getConnection(CONN_STR); statement = connection.createStatement(); resultSet = statement.executeQuery(query); if (!resultSet.next()) { connection.rollback(); String msg = "Attempt to delete program symbol failed."; log.error(msg); throw new AdaptationException(msg); } programSymbol = getProgramSymbol(resultSet); query = "DELETE FROM ProgramSymbols " + "WHERE id = " + id; statement.executeUpdate(query); connection.commit(); } catch (SQLException ex) { try { connection.rollback(); } catch (Exception e) { } String msg = "SQLException in deleteProgramSymbol"; log.error(msg, ex); throw new AdaptationException(msg, ex); } finally { try { resultSet.close(); } catch (Exception ex) { } try { statement.close(); } catch (Exception ex) { } try { connection.close(); } catch (Exception ex) { } } return programSymbol; }
From source file:com.agiletec.plugins.jpcrowdsourcing.aps.system.services.idea.IdeaDAO.java
protected void updateTags(IIdea idea, Connection conn) { PreparedStatement stat = null; try {//from w w w . j a v a 2s.c om this.removeTags(idea.getId(), conn); stat = conn.prepareStatement(INSERT_IDEA_TAGS); this.addTagsRelationsRecord(idea, stat); stat.executeBatch(); conn.commit(); } catch (Throwable t) { _logger.error("Error updating Idea tags for {}", idea.getId(), t); throw new RuntimeException("Error updating Idea tags", t); } finally { closeDaoResources(null, stat, null); } }
From source file:edu.clemson.cs.nestbed.server.adaptation.sql.ProgramSqlAdapter.java
public Program deleteProgram(int id) throws AdaptationException { Program program = null;/*from w w w . j a va 2 s. c o m*/ Connection connection = null; Statement statement = null; ResultSet resultSet = null; try { String query = "SELECT * FROM Programs WHERE id = " + id; connection = DriverManager.getConnection(CONN_STR); statement = connection.createStatement(); resultSet = statement.executeQuery(query); if (!resultSet.next()) { connection.rollback(); String msg = "Attempt to delete program failed."; log.error(msg); throw new AdaptationException(msg); } program = getProgram(resultSet); query = "DELETE FROM Programs WHERE id = " + id; statement.executeUpdate(query); connection.commit(); } catch (SQLException ex) { try { connection.rollback(); } catch (Exception e) { } String msg = "SQLException in deleteProgram"; log.error(msg, ex); throw new AdaptationException(msg, ex); } finally { try { resultSet.close(); } catch (Exception ex) { } try { statement.close(); } catch (Exception ex) { } try { connection.close(); } catch (Exception ex) { } } return program; }
From source file:com.concursive.connect.web.modules.profile.dao.ProjectCategoryLogoFile.java
public boolean insert(Connection db) throws SQLException { boolean result = false; // The required linkModuleId linkModuleId = Constants.PROJECT_CATEGORY_FILES; // Determine if the database is in auto-commit mode boolean doCommit = false; try {/*from ww w .ja v a 2 s.c om*/ if (doCommit = db.getAutoCommit()) { db.setAutoCommit(false); } // Insert the record result = super.insert(db); // Update the referenced pointer if (result) { int i = 0; PreparedStatement pst = db.prepareStatement( "UPDATE lookup_project_category " + "SET logo_id = ? " + "WHERE code = ? "); pst.setInt(++i, id); pst.setInt(++i, linkItemId); int count = pst.executeUpdate(); result = (count == 1); } if (doCommit) { db.commit(); } } catch (Exception e) { if (doCommit) { db.rollback(); } throw new SQLException(e.getMessage()); } finally { if (doCommit) { db.setAutoCommit(true); } } return result; }
From source file:com.che.software.testato.domain.dao.jdbc.impl.ScriptDAO.java
/** * Creates the scripts for a given hierarchy. * //w w w . j av a 2 s. c o m * @author Clement HELIOU (clement.heliou@che-software.com). * @param hierarchyId the hierarchy id. * @param scripts the scripts to create. * @since July, 2011. * @throws ScriptCreationDAOException if an error occurs during the * creation. */ @Override public void createScriptsFromHierarchy(int hierarchyId, List<ScriptCreation> scripts) throws ScriptCreationDAOException { LOGGER.debug("createScriptsFromHierarchy(" + hierarchyId + "," + scripts.size() + " scripts)."); Connection connection = null; try { connection = getDataSource().getConnection(); connection.setAutoCommit(false); for (ScriptCreation script : scripts) { getQueryRunner().update(connection, "INSERT INTO script(script_id, hierarchy_id, label, depth) VALUES(nextval('script_seq'),?,'', ?) ", new Object[] { hierarchyId, 1 }); Integer createdScript = (Integer) getQueryRunner().query(connection, "SELECT MAX(script_id)::int AS scriptId FROM script ", new ScalarHandler("scriptId")); createItems(connection, script.getScriptArrows(), createdScript, 1, hierarchyId); } connection.commit(); } catch (SQLException e) { try { connection.rollback(); } catch (SQLException e1) { throw new ScriptCreationDAOException(e1); } throw new ScriptCreationDAOException(e); } finally { if (null != connection) { DbUtils.closeQuietly(connection); } } }
From source file:com.noelios.restlet.ext.jdbc.JdbcClientHelper.java
/** * Helper/*from www .ja v a 2s. c o m*/ * * @param connection * @param returnGeneratedKeys * @param sqlRequests * @return the result of the last executed SQL request */ private JdbcResult handleSqlRequests(Connection connection, boolean returnGeneratedKeys, List<String> sqlRequests) { JdbcResult result = null; try { connection.setAutoCommit(true); final Statement statement = connection.createStatement(); for (final String sqlRequest : sqlRequests) { statement.execute(sqlRequest, returnGeneratedKeys ? Statement.RETURN_GENERATED_KEYS : Statement.NO_GENERATED_KEYS); result = new JdbcResult(statement); } // Commit any changes to the database if (!connection.getAutoCommit()) { connection.commit(); } } catch (SQLException se) { getLogger().log(Level.WARNING, "Error while processing the SQL requests", se); try { if (!connection.getAutoCommit()) { connection.rollback(); } } catch (SQLException se2) { getLogger().log(Level.WARNING, "Error while rollbacking the transaction", se); } } return result; }
From source file:dk.netarkivet.archive.arcrepositoryadmin.ReplicaCacheHelpers.java
/** * This is used for updating a replicafileinfo instance based on the * results of a checksumjob./*from w w w .j ava 2s . c o m*/ * Updates the following fields for the entry in the replicafileinfo: * <br/>- checksum = checksum argument. * <br/>- upload_status = completed. * <br/>- filelist_status = ok. * <br/>- checksum_status = UNKNOWN. * <br/>- checksum_checkdatetime = now. * <br/>- filelist_checkdatetime = now. * * @param replicafileinfoId The unique id for the replicafileinfo. * @param checksum The new checksum for the entry. * @param con An open connection to the archive database */ protected static void updateReplicaFileInfoChecksum(long replicafileinfoId, String checksum, Connection con) { PreparedStatement statement = null; try { // The SQL statement final String sql = "UPDATE replicafileinfo SET checksum = ?, " + "upload_status = ?, filelist_status = ?, checksum_status " + "= ?, checksum_checkdatetime = ?, filelist_checkdatetime = ?" + " WHERE replicafileinfo_guid = ?"; Date now = new Date(Calendar.getInstance().getTimeInMillis()); // complete the SQL statement. statement = DBUtils.prepareStatement(con, sql, checksum, ReplicaStoreState.UPLOAD_COMPLETED.ordinal(), FileListStatus.OK.ordinal(), ChecksumStatus.UNKNOWN.ordinal(), now, now, replicafileinfoId); // execute the SQL statement statement.executeUpdate(); con.commit(); } catch (Exception e) { String msg = "Problems updating the replicafileinfo."; log.warn(msg); throw new IOFailure(msg, e); } finally { DBUtils.closeStatementIfOpen(statement); } }
From source file:gridool.db.sql.ParallelSQLMapTask.java
@Override protected ParallelSQLMapTaskResult execute() throws GridException { assert (registry != null); final File tmpFile; try {/* w w w . j av a 2s.co m*/ tmpFile = File.createTempFile("PSQLMap" + taskNumber + '_', '_' + NetUtils.getLocalHostAddress()); } catch (IOException e) { throw new GridException(e); } final QueryString[] queries = SQLTranslator.divideQuery(query, true); final boolean singleStatement = (queries.length == 1); final String selectQuery = singleStatement ? query : SQLTranslator.selectFirstSelectQuery(queries); GridNode localNode = config.getLocalNode(); GridNode senderNode = getSenderNode(); assert (senderNode != null); final boolean useCreateTableAS = senderNode.equals(localNode); final int fetchedRows; final LockManager lockMgr = registry.getLockManager(); final ReadWriteLock rwlock = lockMgr.obtainLock(DBAccessor.SYS_TABLE_SYMBOL); final Lock lock = rwlock.writeLock(); // REVIEWME tips: exclusive lock for system table in MonetDB long startQueryTime = System.currentTimeMillis(); final Connection dbConn = getDbConnection(taskMasterNode, registry); try { lock.lock(); if (singleStatement) { // #1 invoke COPY INTO file if (useCreateTableAS) { dbConn.setAutoCommit(true); fetchedRows = executeCreateTableAs(dbConn, selectQuery, taskTableName); } else { dbConn.setAutoCommit(false); fetchedRows = executeCopyIntoFile(dbConn, selectQuery, taskTableName, tmpFile); dbConn.commit(); } } else { dbConn.setAutoCommit(false); // #1-1 DDL before map SELECT queries (e.g., create view) issueDDLBeforeSelect(dbConn, queries); // #1-2 invoke COPY INTO file if (useCreateTableAS) { fetchedRows = executeCreateTableAs(dbConn, selectQuery, taskTableName); } else { fetchedRows = executeCopyIntoFile(dbConn, selectQuery, taskTableName, tmpFile); } // #1-3 DDL after map SELECT queries (e.g., drop view) issueDDLAfterSelect(dbConn, queries); dbConn.commit(); } assert (fetchedRows != -1); } catch (SQLException sqle) { String errmsg = "Failed to execute a query: \n" + query; LOG.error(errmsg, sqle); if (singleStatement) { try { dbConn.rollback(); } catch (SQLException rbe) { LOG.warn("Rollback failed", rbe); } } new FileDeletionThread(tmpFile, LOG).start(); throw new GridException(errmsg, sqle); } catch (Throwable e) { String errmsg = "Failed to execute a query: \n" + query; LOG.fatal(errmsg, e); if (singleStatement) { try { dbConn.rollback(); } catch (SQLException rbe) { LOG.warn("Rollback failed", rbe); } } new FileDeletionThread(tmpFile, LOG).start(); throw new GridException(errmsg, e); } finally { lock.unlock(); JDBCUtils.closeQuietly(dbConn); } long queryExecTime = System.currentTimeMillis() - startQueryTime; String sentFileName = null; long sendResultTime = -1L; // would be null for a local task if (fetchedRows > 0) { // #2 send file long startResultTime = System.currentTimeMillis(); try { TransferUtils.sendfile(tmpFile, dstAddr, dstPort, false, true); sendResultTime = System.currentTimeMillis() - startResultTime; sentFileName = tmpFile.getName(); } catch (IOException e) { throw new GridException("failed to sending a file", e); } finally { new FileDeletionThread(tmpFile, LOG).start(); } } return new ParallelSQLMapTaskResult(taskMasterNode, sentFileName, taskNumber, fetchedRows, queryExecTime, sendResultTime); }