List of usage examples for java.sql Connection rollback
void rollback() throws SQLException;
Connection
object. From source file:dao.ColTopicDaoDb.java
/** * add a new topic in collabrum or network blog * @param collabrumId/*from w w w. j a v a 2 s .c om*/ * @param topic * @param message * @param userId * @param userLogin * @throws BaseDaoException If we have a problem interpreting the data or the data is missing or incorrect */ public void addColTopic(String collabrumId, String topic, String message, String userId, String userLogin, String title, String fontSize, String fontFace, String fontColor, String moodId, String bgColor) throws BaseDaoException { //if ( RegexStrUtil.isNull(collabrumId) || RegexStrUtil.isNull(message) || // RegexStrUtil.isNull(topic) || RegexStrUtil.isNull(userId) || RegexStrUtil.isNull(userLogin) ) { if (RegexStrUtil.isNull(collabrumId) || RegexStrUtil.isNull(topic) || RegexStrUtil.isNull(userId) || RegexStrUtil.isNull(userLogin)) { throw new BaseDaoException("params are null"); } /** * check if this user is the diaryadmin or moderator for this collabrum topic - if yes, delete * otherwise barf! */ /* if (!isOrganizer(collabrumId, userLogin, userId) ) { throw new BaseDaoException("Cannot add collabrum topic, neither DiaryAdmin nor Moderator" + userLogin + collabrumId); } */ /** * Get scalability datasource for colltopic partitioned on collabrumid */ String sourceName = scalabilityManager.getWriteScalability(collabrumId); ds = scalabilityManager.getSource(sourceName); if (ds == null) { throw new BaseDaoException("ds null, addColTopic() " + sourceName + " userId = " + userId); } Connection conn = null; try { conn = ds.getConnection(); conn.setAutoCommit(false); addQuery.run(conn, collabrumId, message, topic, userId, title); addAttributeQuery.run(conn, "LAST_INSERT_ID()", fontSize, fontFace, fontColor, moodId, bgColor); } catch (Exception e) { try { conn.rollback(); } catch (Exception e1) { try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e2) { throw new BaseDaoException("connection close exception for adding coltopic & attr", e2); } throw new BaseDaoException("error occured while rollingback entries from adding coltopic", e1); } throw new BaseDaoException("AutoCommit(false) and add exception while adding coltopic", e); } try { conn.commit(); } catch (Exception e3) { throw new BaseDaoException("commit exception in adding coltopic", e3); } try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e4) { throw new BaseDaoException("connection close exception in adding coltopic", e4); } Fqn fqn = cacheUtil.fqn(DbConstants.COLTOPICS); if (treeCache.exists(fqn, collabrumId)) { treeCache.remove(fqn, collabrumId); } fqn = cacheUtil.fqn(DbConstants.COLLABRUM); if (treeCache.exists(fqn, collabrumId)) { treeCache.remove(fqn, collabrumId); } }
From source file:com.cloudera.sqoop.manager.OracleManager.java
@Override public String[] listTables() { Connection conn = null; Statement stmt = null;//from ww w. ja v a 2 s . c o m ResultSet rset = null; List<String> tables = new ArrayList<String>(); try { conn = getConnection(); stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); rset = stmt.executeQuery(QUERY_LIST_TABLES); while (rset.next()) { tables.add(rset.getString(1)); } conn.commit(); } catch (SQLException e) { try { conn.rollback(); } catch (Exception ex) { LOG.error("Failed to rollback transaction", ex); } LOG.error("Failed to list tables", e); } finally { if (rset != null) { try { rset.close(); } catch (SQLException ex) { LOG.error("Failed to close resultset", ex); } } if (stmt != null) { try { stmt.close(); } catch (Exception ex) { LOG.error("Failed to close statement", ex); } } try { close(); } catch (SQLException ex) { LOG.error("Unable to discard connection", ex); } } return tables.toArray(new String[tables.size()]); }
From source file:com.archivas.clienttools.arcutils.utils.database.ManagedJobSchema.java
private void rollback(Connection conn) { try {// w w w. j a va 2 s. c o m conn.rollback(); } catch (Exception ex) { // do nothing } }
From source file:net.ageto.gyrex.persistence.carbonado.storage.internal.SchemaSupportJob.java
private IStatus processSchemas(final Collection<RepositoryContentType> contentTypes, final JDBCConnectionCapability jdbcConnectionCapability, final IProgressMonitor monitor) throws Exception { // spin the migration loop Connection connection = null; boolean wasAutoCommit = true; // default to auto-commit try {/* www.j a va 2s.c o m*/ // get connection connection = jdbcConnectionCapability.getConnection(); // remember auto-commit state wasAutoCommit = connection.getAutoCommit(); // collect result final MultiStatus result = new MultiStatus(CarbonadoActivator.SYMBOLIC_NAME, 0, String.format( "Database schema verification result for database %s.", repository.getDescription()), null); // verify schemas final SubMonitor subMonitor = SubMonitor.convert(monitor, contentTypes.size()); for (final RepositoryContentType contentType : contentTypes) { result.add(processSchema(contentType, connection, subMonitor.newChild(1))); } // commit any pending changes if migration was allowed if (commitWhenDone) { connection.commit(); } else { connection.rollback(); } return result; } finally { if (null != connection) { try { // verify that auto-commit state was not modified if (wasAutoCommit != connection.getAutoCommit()) { // Carbonado uses auto-commit to detect if a transaction // was in progress whan the connection was acquired previously // in this case it does not close the connection, which is fine; // however, if any schema-support implementation removed the auto-commit flag // Carbonado will no longer close the connection because it thinks a // transaction is in progress; // thus we need to reset the auto-commit flag in this case! LOG.debug( "Resetting auto-commit flag on connection {} due to modifications during schema migration", connection); connection.setAutoCommit(wasAutoCommit); } jdbcConnectionCapability.yieldConnection(connection); } catch (final Exception e) { throw new IllegalStateException( "Unable to properly return a database connection to the pool. This will lead to resource leaks! " + e.getMessage(), e); } } } }
From source file:dao.CollModeratorDaoDb.java
/** * Unblock a member from collabrums// w w w . j a va 2s . c o m * @param colIdList the list of the collabrumid's * @param member member who is to be unblocked * @param userId admin of collabrum * @param userLogin login of the admin of collabrum * @throws BaseDaoException If we have a problem interpreting the data or the data is missing or incorrect */ public void unBlockMemberFromCollabrums(ArrayList colIdList, String member, String userId, String userLogin) throws BaseDaoException { if ((colIdList == null) || RegexStrUtil.isNull(userId) || RegexStrUtil.isNull(member) || RegexStrUtil.isNull(userLogin)) { throw new BaseDaoException("params are null"); } Hdlogin hdlogin = getLoginid(member); String memberId = hdlogin.getValue(DbConstants.LOGIN_ID); if (RegexStrUtil.isNull(memberId)) { throw new BaseDaoException("memberId is null"); } /** * check if this user has the permission to add the moderator - isDairyAdmin or Organizer */ for (int i = 0; i < colIdList.size(); i++) { if (!isOrganizer((String) colIdList.get(i), userLogin, userId)) { throw new BaseDaoException("User does not have permission to add moderators, collabrumId =" + colIdList.get(i) + " userId = " + userId); } } /** * Get scalability datasource for collabrum, colladmin (not partitioned) */ String sourceName = scalabilityManager.getWriteZeroScalability(); ds = scalabilityManager.getSource(sourceName); if (ds == null) { throw new BaseDaoException("ds null, unBlockMember() " + sourceName + " userId = " + userId); } Connection conn = null; try { conn = ds.getConnection(); conn.setAutoCommit(false); for (int i = 0; i < colIdList.size(); i++) { if (!isMember((String) colIdList.get(i), memberId)) { deleteBlockedMember.run(conn, (String) colIdList.get(i), memberId); } } } catch (Exception e) { try { conn.rollback(); } catch (Exception e1) { try { if (conn != null) { conn.close(); } } catch (Exception e2) { throw new BaseDaoException("conn.close() error, unBlockMember, memberid = " + memberId, e2); } throw new BaseDaoException("error occured while addModertor, memberId = " + memberId, e1); } } // connection commit try { conn.commit(); } catch (Exception e3) { throw new BaseDaoException(" commit() exception, for unBlockMember() " + "memberId = " + memberId, e3); } try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e4) { throw new BaseDaoException(" conn.close() exception, for commit(), unBlockMember() " + "memberId = " + memberId + " userId = " + userId, e4); } /** Jboss methods * fqn - full qualified name * check if the userpage already set in the cache * If it exists, return the userpage from the cache. */ Fqn fqn = cacheUtil.fqn(DbConstants.MEM_AS_MODERATOR_LIST); if (treeCache.exists(fqn, member)) { treeCache.remove(fqn, member); } fqn = cacheUtil.fqn(DbConstants.MEM_AS_ORGANIZER_LIST); if (treeCache.exists(fqn, member)) { treeCache.remove(fqn, member); } fqn = cacheUtil.fqn(DbConstants.USER_PAGE); if (treeCache.exists(fqn, member)) { treeCache.remove(fqn, member); } /** * remove this from the userlogin also */ fqn = cacheUtil.fqn(DbConstants.MEM_AS_MODERATOR_LIST); if (treeCache.exists(fqn, userLogin)) { treeCache.remove(fqn, userLogin); } StringBuffer sb = new StringBuffer(); for (int i = 0; i < colIdList.size(); i++) { sb.append(colIdList.get(i)); sb.append("-"); sb.append(memberId); fqn = cacheUtil.fqn(DbConstants.ORGANIZER); if (treeCache.exists(fqn, sb.toString())) { treeCache.remove(fqn, sb.toString()); } sb.delete(0, sb.length()); fqn = cacheUtil.fqn(DbConstants.COLLABRUM); if (treeCache.exists(fqn, colIdList.get(i))) { treeCache.remove(fqn, colIdList.get(i)); } } fqn = cacheUtil.fqn(DbConstants.BLOCKED_COLLABRUM_LIST); Object obj = treeCache.get(fqn, memberId); if (treeCache.exists(fqn, memberId)) { treeCache.remove(fqn, memberId); } }
From source file:com.cloudera.sqoop.manager.OracleManager.java
/** * The concept of database in Oracle is mapped to schemas. Each schema * is identified by the corresponding username. *//*ww w .j ava2s .c om*/ @Override public String[] listDatabases() { Connection conn = null; Statement stmt = null; ResultSet rset = null; List<String> databases = new ArrayList<String>(); try { conn = getConnection(); stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); rset = stmt.executeQuery(QUERY_LIST_DATABASES); while (rset.next()) { databases.add(rset.getString(1)); } conn.commit(); } catch (SQLException e) { try { conn.rollback(); } catch (Exception ex) { LOG.error("Failed to rollback transaction", ex); } if (e.getErrorCode() == ERROR_TABLE_OR_VIEW_DOES_NOT_EXIST) { LOG.error("The catalog view DBA_USERS was not found. " + "This may happen if the user does not have DBA privileges. " + "Please check privileges and try again."); LOG.debug("Full trace for ORA-00942 exception", e); } else { LOG.error("Failed to list databases", e); } } finally { if (rset != null) { try { rset.close(); } catch (SQLException ex) { LOG.error("Failed to close resultset", ex); } } if (stmt != null) { try { stmt.close(); } catch (Exception ex) { LOG.error("Failed to close statement", ex); } } try { close(); } catch (SQLException ex) { LOG.error("Unable to discard connection", ex); } } return databases.toArray(new String[databases.size()]); }
From source file:com.noelios.restlet.ext.jdbc.JdbcClientHelper.java
/** * Helper/* w w w. java2 s . c om*/ * * @param connection * @param returnGeneratedKeys * @param sqlRequests * @return the result of the last executed SQL request */ private JdbcResult handleSqlRequests(Connection connection, boolean returnGeneratedKeys, List<String> sqlRequests) { JdbcResult result = null; try { connection.setAutoCommit(true); final Statement statement = connection.createStatement(); for (final String sqlRequest : sqlRequests) { statement.execute(sqlRequest, returnGeneratedKeys ? Statement.RETURN_GENERATED_KEYS : Statement.NO_GENERATED_KEYS); result = new JdbcResult(statement); } // Commit any changes to the database if (!connection.getAutoCommit()) { connection.commit(); } } catch (SQLException se) { getLogger().log(Level.WARNING, "Error while processing the SQL requests", se); try { if (!connection.getAutoCommit()) { connection.rollback(); } } catch (SQLException se2) { getLogger().log(Level.WARNING, "Error while rollbacking the transaction", se); } } return result; }
From source file:com.netflix.metacat.usermetadata.mysql.MySqlLookupService.java
/** * Saves the lookup value./*from w ww .j ava2 s. co m*/ * @param name lookup name * @param values multiple values * @return returns the lookup with the given name. */ @Override public Lookup setValues(final String name, final Set<String> values) { Lookup lookup = null; try { final Connection conn = getDataSource().getConnection(); try { lookup = findOrCreateLookupByName(name, conn); Set<String> inserts = Sets.newHashSet(); Set<String> deletes = Sets.newHashSet(); final Set<String> lookupValues = lookup.getValues(); if (lookupValues == null || lookupValues.isEmpty()) { inserts = values; } else { inserts = Sets.difference(values, lookupValues).immutableCopy(); deletes = Sets.difference(lookupValues, values).immutableCopy(); } lookup.setValues(values); if (!inserts.isEmpty()) { insertLookupValues(lookup.getId(), inserts, conn); } if (!deletes.isEmpty()) { deleteLookupValues(lookup.getId(), deletes, conn); } conn.commit(); } catch (SQLException e) { conn.rollback(); throw e; } finally { conn.close(); } } catch (SQLException e) { final String message = String.format("Failed to set the lookup values for name %s", name); log.error(message, e); throw new UserMetadataServiceException(message, e); } return lookup; }
From source file:org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler.java
/** * This will grab the next compaction request off of * the queue, and assign it to the worker. * @param workerId id of the worker calling this, will be recorded in the db * @return an info element for this compaction request, or null if there is no work to do now. */// ww w. j av a 2 s . c om public CompactionInfo findNextToCompact(String workerId) throws MetaException { try { Connection dbConn = null; CompactionInfo info = new CompactionInfo(); Statement stmt = null; try { dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE); stmt = dbConn.createStatement(); String s = "select cq_id, cq_database, cq_table, cq_partition, " + "cq_type from COMPACTION_QUEUE where cq_state = '" + INITIATED_STATE + "'"; LOG.debug("Going to execute query <" + s + ">"); ResultSet rs = stmt.executeQuery(s); if (!rs.next()) { LOG.debug("No compactions found ready to compact"); dbConn.rollback(); return null; } info.id = rs.getLong(1); info.dbname = rs.getString(2); info.tableName = rs.getString(3); info.partName = rs.getString(4); switch (rs.getString(5).charAt(0)) { case MAJOR_TYPE: info.type = CompactionType.MAJOR; break; case MINOR_TYPE: info.type = CompactionType.MINOR; break; default: throw new MetaException("Unexpected compaction type " + rs.getString(5)); } // Now, update this record as being worked on by this worker. long now = getDbTime(dbConn); s = "update COMPACTION_QUEUE set cq_worker_id = '" + workerId + "', " + "cq_start = " + now + ", cq_state = '" + WORKING_STATE + "' where cq_id = " + info.id; LOG.debug("Going to execute update <" + s + ">"); if (stmt.executeUpdate(s) != 1) { LOG.error("Unable to update compaction record"); LOG.debug("Going to rollback"); dbConn.rollback(); } LOG.debug("Going to commit"); dbConn.commit(); return info; } catch (SQLException e) { LOG.error("Unable to select next element for compaction, " + e.getMessage()); LOG.debug("Going to rollback"); rollbackDBConn(dbConn); checkRetryable(dbConn, e, "findNextToCompact(workerId:" + workerId + ")"); throw new MetaException( "Unable to connect to transaction database " + StringUtils.stringifyException(e)); } finally { closeDbConn(dbConn); closeStmt(stmt); } } catch (RetryException e) { return findNextToCompact(workerId); } }
From source file:com.iucosoft.eavertizare.dao.impl.ClientsDaoImpl.java
@Override public void saveLocal(Firma firma, List<Client> clientsList) { String query = "INSERT INTO " + firma.getTabelaClientiLocal() + " VALUES(?, ?, ?, ?, ?, ?, ?, ?)"; Connection con = null; PreparedStatement ps = null;/*from w w w . j a v a 2s. co m*/ try { con = dataSource.getConnection(); ps = con.prepareStatement(query); // Set auto-commit to false con.setAutoCommit(false); for (Client client : clientsList) { ps.setInt(1, client.getId()); ps.setString(2, client.getNume()); ps.setString(3, client.getPrenume()); ps.setInt(4, client.getNrTelefon()); ps.setString(5, client.getEmail()); ps.setTimestamp(6, (new java.sql.Timestamp(client.getDateExpirare().getTime()))); ps.setInt(7, firma.getId()); ps.setInt(8, 0); ps.addBatch(); } // Create an int[] to hold returned values int[] count = ps.executeBatch(); //Explicitly commit statements to apply changes con.commit(); } catch (SQLException e) { e.printStackTrace(); try { con.rollback(); } catch (SQLException ex) { Logger.getLogger(ClientsDaoImpl.class.getName()).log(Level.SEVERE, null, ex); } } finally { try { ps.close(); con.close(); } catch (SQLException e) { e.printStackTrace(); } } }