List of usage examples for java.sql Connection commit
void commit() throws SQLException;
Connection
object. From source file:com.glaf.core.db.TableDataManager.java
public Collection<TableModel> insertAll(String systemName, TableDefinition tableDefinition, String seqNo, Collection<TableModel> rows) { com.glaf.core.config.Environment.setCurrentSystemName(systemName); logger.debug("tableDefinition=" + tableDefinition); logger.debug("idColumn=" + tableDefinition.getIdColumn().toString()); if (tableDefinition.getTableName() != null) { tableDefinition.setTableName(tableDefinition.getTableName().toUpperCase()); }/*from w w w . j av a 2 s .c o m*/ Map<String, Object> colMap = new java.util.HashMap<String, Object>(); Map<String, String> exprMap = new java.util.HashMap<String, String>(); List<ColumnDefinition> exprColumns = new java.util.ArrayList<ColumnDefinition>(); ColumnModel idColumn = new ColumnModel(); ColumnDefinition idCol = tableDefinition.getIdColumn(); if (idCol != null && idCol.getColumnName() != null) { idColumn.setColumnName(idCol.getColumnName()); idColumn.setJavaType(idCol.getJavaType()); idColumn.setValueExpression(idCol.getValueExpression()); exprColumns.add(idCol); exprMap.put(idCol.getColumnName().toLowerCase(), idCol.getValueExpression()); } Iterator<ColumnDefinition> iter = tableDefinition.getColumns().iterator(); while (iter.hasNext()) { ColumnDefinition cell = iter.next(); if (StringUtils.isNotEmpty(cell.getValueExpression())) { exprMap.put(cell.getColumnName().toLowerCase(), cell.getValueExpression()); exprColumns.add(cell); } } logger.debug("expr map:" + exprMap); List<TableModel> inertRows = new java.util.ArrayList<TableModel>(); logger.debug(" rows size = " + rows.size()); // logger.debug(" key map: " + keyMap); Iterator<TableModel> iterator = rows.iterator(); while (iterator.hasNext()) { TableModel tableData = iterator.next(); ColumnModel myPK = tableData.getIdColumn(); ColumnModel pkColumn = new ColumnModel(); pkColumn.setColumnName(idColumn.getColumnName()); pkColumn.setJavaType(idColumn.getJavaType()); for (ColumnModel column : tableData.getColumns()) { colMap.put(column.getColumnName(), column.getValue()); } for (ColumnDefinition c : exprColumns) { ColumnModel x = new ColumnModel(); x.setColumnName(c.getColumnName()); x.setJavaType(c.getJavaType()); x.setValueExpression(c.getValueExpression()); tableData.addColumn(x); } for (ColumnModel cell : tableData.getColumns()) { String expr = exprMap.get(cell.getColumnName().toLowerCase()); if (StringUtils.isNotEmpty(expr)) { if (ExpressionConstants.NOW_EXPRESSION.equals(expr) || ExpressionConstants.CURRENT_YYYYMMDD_EXPRESSION.equals(expr)) { if (cell.getDateValue() == null) { cell.setDateValue(new Date()); cell.setValue(cell.getDateValue()); } } if (ExpressionConstants.ID_EXPRESSION.equals(expr)) { if (cell.getValue() == null) { if (StringUtils.equals(cell.getJavaType(), "Integer")) { cell.setValue(getEntityService().nextId().intValue()); } else if (StringUtils.equals(cell.getJavaType(), "Long")) { cell.setValue(getEntityService().nextId()); } else { cell.setValue(getEntityService().getNextId()); } } } if (ExpressionConstants.SEQNO_EXPRESSION.equals(expr)) { cell.setValue(seqNo); } if (ExpressionConstants.UUID_EXPRESSION.equals(expr)) { cell.setValue(UUID32.getUUID()); } } } if (myPK != null && myPK.getValue() != null) { pkColumn.setValue(myPK.getValue()); } else { if (StringUtils.equals(pkColumn.getJavaType(), "Integer")) { pkColumn.setValue(getEntityService().nextId().intValue()); logger.debug("------------int--------------"); } else if (StringUtils.equals(pkColumn.getJavaType(), "Long")) { pkColumn.setValue(getEntityService().nextId()); } else { pkColumn.setValue(getEntityService().getNextId()); } } tableData.removeColumn(pkColumn); tableData.addColumn(pkColumn); tableData.setIdColumn(pkColumn); inertRows.add(tableData); } if (!inertRows.isEmpty()) { logger.debug("inert rows size:" + inertRows.size()); for (TableModel tableData : inertRows) { tableData.setTableName(tableDefinition.getTableName()); logger.debug(tableData.toString()); SqlSession sqlSession = null; Connection conn = null; try { conn = DBConnectionFactory.getConnection(systemName); conn.setAutoCommit(false); sqlSession = getSqlSessionFactory().openSession(ExecutorType.BATCH, conn); sqlSession.insert("insertTableData", tableData); sqlSession.commit(); conn.commit(); } catch (Exception ex) { JdbcUtils.rollback(conn); logger.error(ex); ex.printStackTrace(); throw new RuntimeException(ex); } finally { JdbcUtils.close(sqlSession); JdbcUtils.close(conn); } } } return inertRows; }
From source file:com.cloudera.sqoop.manager.DB2ManagerImportManualTest.java
@Before public void setUp() { super.setUp(); SqoopOptions options = new SqoopOptions(CONNECT_STRING, getTableName()); options.setUsername(DATABASE_USER);//w ww . j a v a 2s. co m options.setPassword(DATABASE_PASSWORD); manager = new Db2Manager(options); // Drop the existing table, if there is one. Connection conn = null; Statement stmt = null; try { conn = manager.getConnection(); stmt = conn.createStatement(); stmt.execute("DROP TABLE " + getTableName()); } catch (SQLException sqlE) { LOG.info("Table was not dropped: " + sqlE.getMessage()); } finally { try { if (null != stmt) { stmt.close(); } } catch (Exception ex) { LOG.warn("Exception while closing stmt", ex); } } // Create and populate table try { conn = manager.getConnection(); conn.setAutoCommit(false); stmt = conn.createStatement(); // create the database table and populate it with data. stmt.executeUpdate( "CREATE TABLE " + getTableName() + " (" + "id INT NOT NULL, " + "name VARCHAR(24) NOT NULL, " + "salary FLOAT, " + "dept VARCHAR(32), " + "PRIMARY KEY (id))"); stmt.executeUpdate( "INSERT INTO " + getTableName() + " VALUES(" + "1,'Aaron', " + "1000000.00,'engineering')"); stmt.executeUpdate("INSERT INTO " + getTableName() + " VALUES(" + "2,'Bob', " + "400.00,'sales')"); stmt.executeUpdate("INSERT INTO " + getTableName() + " VALUES(" + "3,'Fred', 15.00," + "'marketing')"); conn.commit(); } catch (SQLException sqlE) { LOG.error("Encountered SQL Exception: ", sqlE); sqlE.printStackTrace(); fail("SQLException when running test setUp(): " + sqlE); } finally { try { if (null != stmt) { stmt.close(); } } catch (Exception ex) { LOG.warn("Exception while closing connection/stmt", ex); } } }
From source file:com.globalsight.everest.permission.Permission.java
/** * Update Table permissiongroup. If permission id is greater than 300, the * id should plus 1. Then update permission_set to new string. *///w ww .jav a2 s . com private static void updateUnbalancedPermissionGroupSet() { Connection c = null; PreparedStatement stmt = null; PreparedStatement stmt1 = null; ResultSet rs = null; try { c = ConnectionPool.getConnection(); c.setAutoCommit(false); stmt = c.prepareStatement(SQL_SELECT_PERMISSION_SET_FROM_PERMISSION_GROUP); stmt1 = c.prepareStatement(SQL_UPDATE_PERMISSION_SET); rs = stmt.executeQuery(); while (rs.next()) { long id = rs.getLong(1); String permissionSet = rs.getString(2); String[] permissionIdArray = permissionSet.split("\\|"); StringBuffer newPermissionSet = new StringBuffer(); for (String permissionId : permissionIdArray) { if (StringUtils.isNotEmpty(permissionId)) { long lId = Long.parseLong(permissionId); if (lId >= 300) { lId += 1; } newPermissionSet.append("|").append(lId); } } newPermissionSet.append("|"); stmt1.setString(1, newPermissionSet.toString()); stmt1.setLong(2, id); stmt1.addBatch(); } stmt1.executeBatch(); c.commit(); } catch (Exception e) { logger.error("Failed to update permission_group from database.", e); } finally { ConnectionPool.silentClose(rs); ConnectionPool.silentClose(stmt); ConnectionPool.silentClose(stmt1); ConnectionPool.silentReturnConnection(c); } }
From source file:com.glaf.core.db.TableDataManager.java
public void insertAllTableData(String systemName, List<TableModel> rows) { SqlSession sqlSession = null;//ww w. j av a 2 s . c om Connection conn = null; try { conn = DBConnectionFactory.getConnection(systemName); conn.setAutoCommit(false); sqlSession = getSqlSessionFactory().openSession(ExecutorType.BATCH, conn); for (TableModel model : rows) { if (model.getTableName() != null) { model.setTableName(model.getTableName().toUpperCase()); } sqlSession.insert("insertTableData", model); } sqlSession.commit(); conn.commit(); } catch (Exception ex) { JdbcUtils.rollback(conn); logger.error(ex); ex.printStackTrace(); throw new RuntimeException(ex); } finally { JdbcUtils.close(sqlSession); JdbcUtils.close(conn); } }
From source file:com.mmnaseri.dragonfly.data.impl.DefaultDataAccess.java
private synchronized List<Integer> endBatch() { if (!isInBatchMode()) { throw new NoBatchOperationError(); }/* www. j av a2 s . co m*/ localCounts.get().clear(); final List<BatchOperationDescriptor> descriptors = batchOperation.get(); batchOperation.remove(); batch.set(false); final ArrayList<Integer> result = new ArrayList<Integer>(); if (descriptors == null) { return result; } log.info("There are " + descriptors.size() + " operation stack(s) to perform"); while (!descriptors.isEmpty()) { final BatchOperationDescriptor descriptor = descriptors.get(0); descriptors.remove(0); final int[] batchResult; log.info("Executing batch operation for statement: " + descriptor.getSql()); final PreparedStatement preparedStatement = descriptor.getPreparedStatement(); final Connection connection; try { connection = preparedStatement.getConnection(); long time = System.nanoTime(); batchResult = preparedStatement.executeBatch(); connection.commit(); log.info(batchResult.length + " operation(s) completed successfully in " + (System.nanoTime() - time) + "ns"); } catch (SQLException e) { throw new BatchOperationExecutionError("Failed to execute operation batch", e); } if (StatementType.getStatementType(descriptor.getSql()).equals(StatementType.INSERT)) { try { final List<Object> deferredEntities = deferredKeys.get(); final ResultSet generatedKeys = preparedStatement.getGeneratedKeys(); while (generatedKeys.next()) { final Object entity = deferredEntities.get(0); deferredEntities.remove(0); final EntityHandler<Object> entityHandler = entityHandlerContext.getHandler(entity); entityHandler.setKey(entity, session.getDatabaseDialect().retrieveKey(generatedKeys)); } } catch (SQLException e) { throw new BatchOperationExecutionError("Failed to retrieve generated keys", e); } } for (int i : batchResult) { result.add(i); } cleanUpStatement(preparedStatement); } return result; }
From source file:com.glaf.core.db.TableDataManager.java
/** * ?JSON?/* ww w . ja v a2 s.co m*/ * * @param tableName * @param rows */ public void saveTableData(String systemName, String tableName, JSONArray rows) { if (rows == null || rows.isEmpty()) { return; } com.glaf.core.config.Environment.setCurrentSystemName(systemName); TableDefinition tableDefinition = getTableDefinitionService().getTableDefinition(tableName); if (tableDefinition != null && tableDefinition.getIdColumn() != null) { if (tableDefinition.getTableName() != null) { tableDefinition.setTableName(tableDefinition.getTableName().toUpperCase()); } SqlSession sqlSession = null; Connection conn = null; try { conn = DBConnectionFactory.getConnection(systemName); conn.setAutoCommit(false); sqlSession = getSqlSessionFactory().openSession(ExecutorType.BATCH, conn); for (int i = 0, len = rows.size(); i < len; i++) { JSONObject jsonObject = rows.getJSONObject(i); this.saveTableData(sqlSession, systemName, tableName, jsonObject); } sqlSession.commit(); conn.commit(); } catch (Exception ex) { JdbcUtils.rollback(conn); logger.error(ex); ex.printStackTrace(); throw new RuntimeException(ex); } finally { JdbcUtils.close(sqlSession); JdbcUtils.close(conn); } } }
From source file:dao.ColMessageDaoDb.java
public void addColMessage(String tid, String mid, String message, String topic, String userId, String userLogin, String collabrumId, boolean personalBlog, String fontSize, String fontFace, String fontColor, String moodId, String bgColor) throws BaseDaoException { if (RegexStrUtil.isNull(tid) || RegexStrUtil.isNull(mid) || RegexStrUtil.isNull(userId)) { throw new BaseDaoException("params are null"); }/*from w w w.j a v a 2s. c o m*/ if (RegexStrUtil.isNull(message) && RegexStrUtil.isNull(topic)) { throw new BaseDaoException("message & topic are null"); } /** * check if this is personal blog. if not, check the permission - diaryAdmin or Organizer */ if (!personalBlog) { if (!isOrganizer(collabrumId, userLogin, userId) && !isColMember(collabrumId, userId)) { throw new BaseDaoException("permission denied as this user is not a member of collabrum " + userId); } } /** * Get scalability datasource for collmessages partitioned on tid */ String sourceName = scalabilityManager.getWriteScalability(tid); ds = scalabilityManager.getSource(sourceName); if (ds == null) { throw new BaseDaoException("ds null, addColMessage() " + sourceName + " userId = " + userId); } Connection conn = null; try { conn = ds.getConnection(); conn.setAutoCommit(false); addQuery.run(conn, tid, mid, message, topic, userId); addAttrQuery.run(conn, tid, "LAST_INSERT_ID()", fontSize, fontFace, fontColor, moodId, bgColor); } catch (Exception e) { try { conn.rollback(); } catch (Exception e1) { try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e2) { throw new BaseDaoException("connection close exception", e2); } throw new BaseDaoException("error occured while rollingback entries from ColmessageDaoDb", e1); } } try { conn.commit(); } catch (Exception e3) { throw new BaseDaoException("commit exception", e3); } try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e4) { throw new BaseDaoException("connection close exception", e4); } /** * delete collabrum messages */ StringBuffer sb = new StringBuffer(collabrumId); sb.append("-"); sb.append(tid); String key = sb.toString(); Fqn fqn = cacheUtil.fqn(DbConstants.COLMSGS); if (treeCache.exists(fqn, key)) { treeCache.remove(fqn, key); } fqn = cacheUtil.fqn(DbConstants.COLTOPIC); if (treeCache.exists(fqn, key)) { treeCache.remove(fqn, key); } fqn = cacheUtil.fqn(DbConstants.COLTOPICS); if (treeCache.exists(fqn, collabrumId)) { treeCache.remove(fqn, collabrumId); } fqn = cacheUtil.fqn(DbConstants.COLLABRUM); if (treeCache.exists(fqn, collabrumId)) { treeCache.remove(fqn, collabrumId); } }
From source file:eagle.storage.jdbc.entity.impl.JdbcEntityWriterImpl.java
@Override public List<String> write(List<E> entities) throws Exception { List<String> keys = new ArrayList<String>(); if (LOG.isDebugEnabled()) LOG.debug("Writing " + entities.size() + " entities"); StopWatch stopWatch = new StopWatch(); stopWatch.start();//w ww. ja v a 2 s. co m Connection connection = ConnectionManagerFactory.getInstance().getConnection(); // set auto commit false and commit by hands for 3x~5x better performance connection.setAutoCommit(false); try { TorqueStatementPeerImpl<E> peer = connectionManager .getStatementExecutor(this.jdbcEntityDefinition.getJdbcTableName()); for (E entity : entities) { entity.setEncodedRowkey(peer.getPrimaryKeyBuilder().build(entity)); ColumnValues columnValues = JdbcEntitySerDeserHelper.buildColumnValues(entity, this.jdbcEntityDefinition); // TODO: implement batch insert for better performance ObjectKey key = peer.delegate().doInsert(columnValues, connection); try { if (key != null) { keys.add((String) key.getValue()); } else { keys.add(entity.getEncodedRowkey()); } } catch (ClassCastException ex) { throw new RuntimeException( "Key is not in type of String (VARCHAR) , but JdbcType (java.sql.Types): " + key.getJdbcType() + ", value: " + key.getValue(), ex); } } // Why not commit in finally: give up all if any single entity throws exception to make sure consistency guarantee if (LOG.isDebugEnabled()) { LOG.debug("Committing writing"); } connection.commit(); } catch (Exception ex) { LOG.error("Failed to write records, rolling back", ex); connection.rollback(); throw ex; } finally { stopWatch.stop(); if (LOG.isDebugEnabled()) LOG.debug("Closing connection"); connection.close(); } LOG.info(String.format("Wrote %s records in %s ms (table: %s)", keys.size(), stopWatch.getTime(), this.jdbcEntityDefinition.getJdbcTableName())); return keys; }
From source file:com.cloudera.sqoop.manager.PostgresqlTest.java
@Before public void setUp() { super.setUp(); LOG.debug("Setting up another postgresql test: " + CONNECT_STRING); SqoopOptions options = new SqoopOptions(CONNECT_STRING, TABLE_NAME); options.setUsername(DATABASE_USER);/*from w ww .j a va 2s .com*/ ConnManager manager = null; Connection connection = null; Statement st = null; try { manager = new PostgresqlManager(options); connection = manager.getConnection(); connection.setAutoCommit(false); st = connection.createStatement(); // create the database table and populate it with data. try { // Try to remove the table first. DROP TABLE IF EXISTS didn't // get added until pg 8.3, so we just use "DROP TABLE" and ignore // any exception here if one occurs. st.executeUpdate("DROP TABLE " + TABLE_NAME); } catch (SQLException e) { LOG.info("Couldn't drop table " + TABLE_NAME + " (ok)"); LOG.info(e.toString()); // Now we need to reset the transaction. connection.rollback(); } st.executeUpdate("CREATE TABLE " + TABLE_NAME + " (" + "id INT NOT NULL PRIMARY KEY, " + "name VARCHAR(24) NOT NULL, " + "start_date DATE, " + "salary FLOAT, " + "dept VARCHAR(32))"); st.executeUpdate( "INSERT INTO " + TABLE_NAME + " VALUES(" + "1,'Aaron','2009-05-14',1000000.00,'engineering')"); st.executeUpdate("INSERT INTO " + TABLE_NAME + " VALUES(" + "2,'Bob','2009-04-20',400.00,'sales')"); st.executeUpdate("INSERT INTO " + TABLE_NAME + " VALUES(" + "3,'Fred','2009-01-23',15.00,'marketing')"); connection.commit(); } catch (SQLException sqlE) { LOG.error("Encountered SQL Exception: " + sqlE); sqlE.printStackTrace(); fail("SQLException when running test setUp(): " + sqlE); } finally { try { if (null != st) { st.close(); } if (null != manager) { manager.close(); } } catch (SQLException sqlE) { LOG.warn("Got SQLException when closing connection: " + sqlE); } } LOG.debug("setUp complete."); }