List of usage examples for java.sql PreparedStatement clearWarnings
void clearWarnings() throws SQLException;
Statement
object. From source file:org.horizontaldb.integration.InterceptorMockEnvironmentTest.java
@Test public void shouldValidateMultiLevelShardedCalls() throws SQLException { ConversationRegistry mockRegistry = EasyMock.createMock(ConversationRegistry.class); TenantContext mockTenantContext = EasyMock.createMock(TenantContext.class); org.apache.tomcat.jdbc.pool.DataSource mockDataSource = EasyMock .createMock(org.apache.tomcat.jdbc.pool.DataSource.class); DataSourceResource mockDataSourceResource = new DataSourceResource(mockDataSource); ShardBeanResolver mockShardBeanResolver = EasyMock.createMock(ShardBeanResolver.class); ShardBeanEnricher mockShardBeanEnricher = EasyMock.createMock(ShardBeanEnricher.class); Connection mockConnection = EasyMock.createMock(Connection.class); PreparedStatement mockStatement = EasyMock.createMock(PreparedStatement.class); ResultSet mockResultset = EasyMock.createMock(ResultSet.class); conversationRegistryMockProxy.setMockRegistry(mockRegistry); tenantContextMockProxy.setMockTenantContext(mockTenantContext); dataSourceFactoryMockProxy.setMockDataSourceResource(mockDataSourceResource); shardBeanResolverMockProxy.setMockResolver(mockShardBeanResolver); shardBeanEnricherMockProxy.setMockEnricher(mockShardBeanEnricher); // This is the protocol that the interceptors should follow during a sharded call mockRegistry.startConversation(testUserHelper.getJoeToken()); expect(mockRegistry.hasConversation(TestUser.JOE.name())).andReturn(true); expect(mockTenantContext.resolveCurrentTenantIdentifier()).andReturn(TestUser.JOE.name()); mockRegistry.addResource(TestUser.JOE.name(), mockDataSourceResource); // resolve Dao for TestServiceTwo expect(mockShardBeanResolver.getBean(same(PersonDao.class), anyObject(ShardContext.class))).andReturn(null); mockRegistry.addResource(same(TestUser.JOE.name()), anyObject(PersonDao.class)); mockShardBeanEnricher.setup(anyObject(PersonDao.class), anyObject(ShardContext.class)); mockShardBeanEnricher.tearDown(anyObject(PersonDao.class), anyObject(ShardContext.class)); // Hibernate transaction flow expect(mockDataSource.getConnection()).andReturn(mockConnection); mockConnection.setReadOnly(true);//from www. j a v a2 s .c o m expect(mockConnection.getAutoCommit()).andReturn(false); expect(mockConnection.prepareStatement(anyObject(String.class))).andReturn(mockStatement); expect(mockStatement.executeQuery()).andReturn(mockResultset); expect(mockStatement.getWarnings()).andReturn(null); mockStatement.clearWarnings(); expect(mockStatement.getMaxRows()).andReturn(0); expect(mockStatement.getQueryTimeout()).andReturn(0); expect(mockResultset.next()).andReturn(true); expect(mockResultset.next()).andReturn(false); expect(mockResultset.getLong(anyObject(String.class))).andReturn(0l); expect(mockResultset.wasNull()).andReturn(false); expect(mockResultset.getLong(anyObject(String.class))).andReturn(0l); expect(mockResultset.wasNull()).andReturn(true); expect(mockResultset.getString(anyObject(String.class))).andReturn("mockPerson"); expect(mockResultset.wasNull()).andReturn(false); mockResultset.close(); mockStatement.close(); mockConnection.commit(); // end Hibernate transaction // resolve Dao for TestServiceThree expect(mockRegistry.hasConversation(TestUser.JOE.name())).andReturn(true); expect(mockShardBeanResolver.getBean(same(DepartmentDao.class), anyObject(ShardContext.class))) .andReturn(null); mockRegistry.addResource(same(TestUser.JOE.name()), anyObject(DepartmentDaoImpl.class)); mockShardBeanEnricher.setup(anyObject(DepartmentDaoImpl.class), anyObject(ShardContext.class)); mockShardBeanEnricher.tearDown(anyObject(DepartmentDaoImpl.class), anyObject(ShardContext.class)); // Hibernate transaction flow expect(mockConnection.prepareStatement(anyObject(String.class))).andReturn(mockStatement); expect(mockStatement.executeQuery()).andReturn(mockResultset); expect(mockStatement.getWarnings()).andReturn(null); mockStatement.clearWarnings(); expect(mockStatement.getMaxRows()).andReturn(0); expect(mockStatement.getQueryTimeout()).andReturn(0); expect(mockResultset.next()).andReturn(true); expect(mockResultset.next()).andReturn(false); expect(mockResultset.getLong(anyObject(String.class))).andReturn(0l); expect(mockResultset.wasNull()).andReturn(false); expect(mockResultset.getString(anyObject(String.class))).andReturn("mockDepartment"); expect(mockResultset.wasNull()).andReturn(false); mockResultset.close(); mockStatement.close(); // end Hibernate transaction // cleanup after service calls mockDataSource.close(true); mockRegistry.teardownConversation(testUserHelper.getJoeToken()); replay(mockRegistry, mockTenantContext, mockShardBeanResolver, mockShardBeanEnricher, mockDataSource, mockConnection, mockStatement, mockResultset); try { testService.authenticate(testUserHelper.getJoeToken()); testService.callNestedServiceChain(TestUser.JOE.name()); } finally { testService.logoff(testUserHelper.getJoeToken()); } verify(mockRegistry, mockTenantContext, mockShardBeanResolver, mockShardBeanEnricher, mockDataSource, mockConnection, mockStatement, mockResultset); }
From source file:org.horizontaldb.integration.InterceptorMockEnvironmentTest.java
@Test public void shouldValidateOneLevelShardedCall() throws SQLException { ConversationRegistry mockRegistry = EasyMock.createMock(ConversationRegistry.class); TenantContext mockTenantContext = EasyMock.createMock(TenantContext.class); org.apache.tomcat.jdbc.pool.DataSource mockDataSource = EasyMock .createMock(org.apache.tomcat.jdbc.pool.DataSource.class); DataSourceResource mockDataSourceResource = new DataSourceResource(mockDataSource); ShardBeanResolver mockShardBeanResolver = EasyMock.createMock(ShardBeanResolver.class); ShardBeanEnricher mockShardBeanEnricher = EasyMock.createMock(ShardBeanEnricher.class); Connection mockConnection = EasyMock.createMock(Connection.class); PreparedStatement mockStatement = EasyMock.createMock(PreparedStatement.class); ResultSet mockResultset = EasyMock.createMock(ResultSet.class); conversationRegistryMockProxy.setMockRegistry(mockRegistry); tenantContextMockProxy.setMockTenantContext(mockTenantContext); dataSourceFactoryMockProxy.setMockDataSourceResource(mockDataSourceResource); shardBeanResolverMockProxy.setMockResolver(mockShardBeanResolver); shardBeanEnricherMockProxy.setMockEnricher(mockShardBeanEnricher); // This is the protocol that the interceptors should follow during a sharded call mockRegistry.startConversation(testUserHelper.getJoeToken()); expect(mockRegistry.hasConversation(TestUser.JOE.name())).andReturn(true); expect(mockTenantContext.resolveCurrentTenantIdentifier()).andReturn(TestUser.JOE.name()); mockRegistry.addResource(TestUser.JOE.name(), mockDataSourceResource); mockRegistry.addResource(same(TestUser.JOE.name()), anyObject(DepartmentDaoImpl.class)); expect(mockShardBeanResolver.getBean(same(DepartmentDao.class), anyObject(ShardContext.class))) .andReturn(null);//from w ww. jav a 2 s. c o m mockShardBeanEnricher.setup(anyObject(DepartmentDaoImpl.class), anyObject(ShardContext.class)); mockShardBeanEnricher.tearDown(anyObject(DepartmentDaoImpl.class), anyObject(ShardContext.class)); mockDataSource.close(true); mockRegistry.teardownConversation(testUserHelper.getJoeToken()); // end protocol // This is the flow of a Hibernate transaction which is irrelevant, but had to be defined because of the // mocked dataSource. expect(mockDataSource.getConnection()).andReturn(mockConnection); mockConnection.setReadOnly(true); expect(mockConnection.getAutoCommit()).andReturn(false); expect(mockConnection.prepareStatement(anyObject(String.class))).andReturn(mockStatement); expect(mockStatement.executeQuery()).andReturn(mockResultset); expect(mockStatement.getWarnings()).andReturn(null); mockStatement.clearWarnings(); expect(mockStatement.getMaxRows()).andReturn(0); expect(mockStatement.getQueryTimeout()).andReturn(0); expect(mockResultset.next()).andReturn(true); expect(mockResultset.next()).andReturn(false); expect(mockResultset.getLong(anyObject(String.class))).andReturn(0l); expect(mockResultset.wasNull()).andReturn(false); mockResultset.close(); mockStatement.close(); mockConnection.commit(); // end Hibernate transaction replay(mockRegistry, mockTenantContext, mockShardBeanResolver, mockShardBeanEnricher, mockDataSource, mockConnection, mockStatement, mockResultset); try { ShardContext context = new ShardContext(TestUser.JOE.name()); testService.authenticate(testUserHelper.getJoeToken()); Long actualCount = testService.getCountOfDepartments(context); assertEquals(0, actualCount.longValue()); } finally { testService.logoff(testUserHelper.getJoeToken()); } verify(mockRegistry, mockTenantContext, mockShardBeanResolver, mockShardBeanEnricher, mockDataSource, mockConnection, mockStatement, mockResultset); }
From source file:org.apache.jackrabbit.core.fs.db.DatabaseFileSystem.java
/** * Resets the given <code>PreparedStatement</code> by clearing the parameters * and warnings contained.//from w w w .j a v a 2 s.co m * <p/> * NOTE: This method MUST be called in a synchronized context as neither * this method nor the <code>PreparedStatement</code> instance on which it * operates are thread safe. * * @param stmt The <code>PreparedStatement</code> to reset. If * <code>null</code> this method does nothing. */ protected void resetStatement(PreparedStatement stmt) { if (stmt != null) { try { stmt.clearParameters(); stmt.clearWarnings(); } catch (SQLException se) { log.error("failed resetting PreparedStatement", se); } } }
From source file:org.apache.jackrabbit.core.persistence.bundle.BundleDbPersistenceManager.java
/** * Resets the given <code>PreparedStatement</code> by clearing the * parameters and warnings contained./* ww w. j av a 2s. c om*/ * * @param stmt The <code>PreparedStatement</code> to reset. If * <code>null</code> this method does nothing. */ protected synchronized void resetStatement(PreparedStatement stmt) { if (stmt != null) { try { stmt.clearParameters(); stmt.clearWarnings(); } catch (SQLException se) { logException("Failed resetting PreparedStatement", se); } } }
From source file:org.apache.jackrabbit.core.persistence.db.DatabasePersistenceManager.java
/** * Resets the given <code>PreparedStatement</code> by clearing the parameters * and warnings contained.//from w ww. ja v a2 s. co m * <p/> * NOTE: This method MUST be called in a synchronized context as neither * this method nor the <code>PreparedStatement</code> instance on which it * operates are thread safe. * * @param stmt The <code>PreparedStatement</code> to reset. If * <code>null</code> this method does nothing. */ protected void resetStatement(PreparedStatement stmt) { if (stmt != null) { try { stmt.clearParameters(); stmt.clearWarnings(); } catch (SQLException se) { logException("failed resetting PreparedStatement", se); } } }
From source file:org.LexGrid.util.sql.lgTables.SQLTableUtilities.java
/** * Remove the root ('@') or tail ('@@') relationship node for the given * coding scheme.//from w w w.ja v a 2s .c o m * * @param codingScheme * The coding scheme to remove the root node from. * @param relationName * The relation container for the root node. If null, the native * relation for the coding scheme is used. * @param root * - true for root ('@'), false for tail ('@@'). * @throws SQLException */ public void removeRootRelationNode(String codingScheme, String relationName, boolean root) throws SQLException { if (!doTablesExist()) return; int count = 0; Connection conn = getConnection(); try { // Define the SQL statements to locate and delete affected entries // ... StringBuffer sb = new StringBuffer("SELECT * FROM ") .append(stc_.getTableName(SQLTableConstants.ENTITY_ASSOCIATION_TO_ENTITY)) .append(" WHERE " + stc_.codingSchemeNameOrId + " = ? AND " + stc_.containerNameOrContainerDC + " = ? AND ") .append(root ? (stc_.sourceEntityCodeOrId + " = '@'") : (stc_.targetEntityCodeOrId + " = '@@'")); PreparedStatement getRoots = conn.prepareStatement(gsm_.modifySQL(sb.toString())); sb = new StringBuffer("DELETE FROM ") .append(stc_.getTableName(SQLTableConstants.ENTITY_ASSOCIATION_TO_ENTITY)) .append(" WHERE " + stc_.codingSchemeNameOrId + " = ? AND " + stc_.containerNameOrContainerDC + " = ? AND " + stc_.entityCodeOrAssociationId + " = ?") .append(" AND " + stc_.sourceCSIdOrEntityCodeNS + " = ? AND " + stc_.sourceEntityCodeOrId + " = ?") .append(" AND " + stc_.targetCSIdOrEntityCodeNS + " = ? AND " + stc_.targetEntityCodeOrId + " = ?"); PreparedStatement deleteAssoc = conn.prepareStatement(gsm_.modifySQL(sb.toString())); sb = new StringBuffer("DELETE FROM ") .append(stc_.getTableName(SQLTableConstants.ENTITY_ASSOCIATION_TO_E_QUALS)) .append(" WHERE " + stc_.codingSchemeNameOrId + " = ? AND " + SQLTableConstants.TBLCOL_MULTIATTRIBUTESKEY + " = ?"); PreparedStatement deleteCQual = conn.prepareStatement(gsm_.modifySQL(sb.toString())); // Locate matching entries and clear, along with associated // qualifiers ... try { getRoots.setString(1, codingScheme); getRoots.setString(2, relationName != null ? relationName : getNativeRelation(codingScheme)); ResultSet rs = getRoots.executeQuery(); while (rs.next()) { // Remove matching qualifiers ... String multiKey = rs.getString(SQLTableConstants.TBLCOL_MULTIATTRIBUTESKEY); if (multiKey != null && multiKey.length() > 0) { deleteCQual.clearParameters(); deleteCQual.clearWarnings(); deleteCQual.setString(1, codingScheme); deleteCQual.setString(2, multiKey); deleteCQual.execute(); } // Remove the association/source/target ... deleteAssoc.clearParameters(); deleteAssoc.clearWarnings(); deleteAssoc.setString(1, codingScheme); deleteAssoc.setString(2, relationName); deleteAssoc.setString(3, rs.getString(stc_.entityCodeOrAssociationId)); deleteAssoc.setString(4, rs.getString(stc_.sourceCSIdOrEntityCodeNS)); deleteAssoc.setString(5, rs.getString(stc_.sourceEntityCodeOrId)); deleteAssoc.setString(6, rs.getString(stc_.targetCSIdOrEntityCodeNS)); deleteAssoc.setString(7, rs.getString(stc_.targetEntityCodeOrId)); if (!deleteAssoc.execute() && deleteAssoc.getUpdateCount() > 0) count += deleteAssoc.getUpdateCount(); } rs.close(); } finally { getRoots.close(); deleteAssoc.close(); deleteCQual.close(); } } finally { returnConnection(conn); log.info("Removed " + count + " root associations."); } }
From source file:org.sakaiproject.nakamura.lite.storage.jdbc.JDBCStorageClient.java
private Map<String, Object> internalGet(String keySpace, String columnFamily, String rid) throws StorageClientException { ResultSet body = null;// w ww .j a v a 2 s. co m Map<String, Object> result = Maps.newHashMap(); PreparedStatement selectStringRow = null; try { selectStringRow = getStatement(keySpace, columnFamily, SQL_BLOCK_SELECT_ROW, rid, null); inc("A"); selectStringRow.clearWarnings(); selectStringRow.clearParameters(); selectStringRow.setString(1, rid); body = selectStringRow.executeQuery(); inc("B"); if (body.next()) { Types.loadFromStream(rid, result, body.getBinaryStream(1), columnFamily); } } catch (SQLException e) { LOGGER.warn("Failed to perform get operation on " + keySpace + ":" + columnFamily + ":" + rid, e); if (passivate != null) { LOGGER.warn("Was Pasivated ", passivate); } if (closed != null) { LOGGER.warn("Was Closed ", closed); } throw new StorageClientException(e.getMessage(), e); } catch (IOException e) { LOGGER.warn("Failed to perform get operation on " + keySpace + ":" + columnFamily + ":" + rid, e); if (passivate != null) { LOGGER.warn("Was Pasivated ", passivate); } if (closed != null) { LOGGER.warn("Was Closed ", closed); } throw new StorageClientException(e.getMessage(), e); } finally { close(body, "B"); close(selectStringRow, "A"); } return result; }
From source file:org.sakaiproject.nakamura.lite.storage.jdbc.JDBCStorageClient.java
public void insert(String keySpace, String columnFamily, String key, Map<String, Object> values, boolean probablyNew) throws StorageClientException { checkClosed();//w ww. j a v a 2s . com Map<String, PreparedStatement> statementCache = Maps.newHashMap(); boolean autoCommit = true; try { autoCommit = startBlock(); String rid = rowHash(keySpace, columnFamily, key); for (Entry<String, Object> e : values.entrySet()) { String k = e.getKey(); Object o = e.getValue(); if (o instanceof byte[]) { throw new RuntimeException( "Invalid content in " + k + ", storing byte[] rather than streaming it"); } } Map<String, Object> m = get(keySpace, columnFamily, key); for (Entry<String, Object> e : values.entrySet()) { String k = e.getKey(); Object o = e.getValue(); if (o instanceof RemoveProperty || o == null) { m.remove(k); } else { m.put(k, o); } } LOGGER.debug("Saving {} {} {} ", new Object[] { key, rid, m }); if (probablyNew && !UPDATE_FIRST_SEQUENCE.equals(getSql(SQL_STATEMENT_SEQUENCE))) { PreparedStatement insertBlockRow = getStatement(keySpace, columnFamily, SQL_BLOCK_INSERT_ROW, rid, statementCache); insertBlockRow.clearWarnings(); insertBlockRow.clearParameters(); insertBlockRow.setString(1, rid); InputStream insertStream = null; try { insertStream = Types.storeMapToStream(rid, m, columnFamily); } catch (UTFDataFormatException e) { throw new DataFormatException(INVALID_DATA_ERROR, e); } if ("1.5".equals(getSql(JDBC_SUPPORT_LEVEL))) { insertBlockRow.setBinaryStream(2, insertStream, insertStream.available()); } else { insertBlockRow.setBinaryStream(2, insertStream); } int rowsInserted = 0; try { rowsInserted = insertBlockRow.executeUpdate(); } catch (SQLException e) { LOGGER.debug(e.getMessage(), e); } if (rowsInserted == 0) { PreparedStatement updateBlockRow = getStatement(keySpace, columnFamily, SQL_BLOCK_UPDATE_ROW, rid, statementCache); updateBlockRow.clearWarnings(); updateBlockRow.clearParameters(); updateBlockRow.setString(2, rid); try { insertStream = Types.storeMapToStream(rid, m, columnFamily); } catch (UTFDataFormatException e) { throw new DataFormatException(INVALID_DATA_ERROR, e); } if ("1.5".equals(getSql(JDBC_SUPPORT_LEVEL))) { updateBlockRow.setBinaryStream(1, insertStream, insertStream.available()); } else { updateBlockRow.setBinaryStream(1, insertStream); } if (updateBlockRow.executeUpdate() == 0) { throw new StorageClientException("Failed to save " + rid); } else { LOGGER.debug("Updated {} ", rid); } } else { LOGGER.debug("Inserted {} ", rid); } } else { PreparedStatement updateBlockRow = getStatement(keySpace, columnFamily, SQL_BLOCK_UPDATE_ROW, rid, statementCache); updateBlockRow.clearWarnings(); updateBlockRow.clearParameters(); updateBlockRow.setString(2, rid); InputStream updateStream = null; try { updateStream = Types.storeMapToStream(rid, m, columnFamily); } catch (UTFDataFormatException e) { throw new DataFormatException(INVALID_DATA_ERROR, e); } if ("1.5".equals(getSql(JDBC_SUPPORT_LEVEL))) { updateBlockRow.setBinaryStream(1, updateStream, updateStream.available()); } else { updateBlockRow.setBinaryStream(1, updateStream); } if (updateBlockRow.executeUpdate() == 0) { PreparedStatement insertBlockRow = getStatement(keySpace, columnFamily, SQL_BLOCK_INSERT_ROW, rid, statementCache); insertBlockRow.clearWarnings(); insertBlockRow.clearParameters(); insertBlockRow.setString(1, rid); try { updateStream = Types.storeMapToStream(rid, m, columnFamily); } catch (UTFDataFormatException e) { throw new DataFormatException(INVALID_DATA_ERROR, e); } if ("1.5".equals(getSql(JDBC_SUPPORT_LEVEL))) { insertBlockRow.setBinaryStream(2, updateStream, updateStream.available()); } else { insertBlockRow.setBinaryStream(2, updateStream); } if (insertBlockRow.executeUpdate() == 0) { throw new StorageClientException("Failed to save " + rid); } else { LOGGER.debug("Inserted {} ", rid); } } else { LOGGER.debug("Updated {} ", rid); } } if ("1".equals(getSql(USE_BATCH_INSERTS))) { Set<PreparedStatement> removeSet = Sets.newHashSet(); // execute the updates and add the necessary inserts. Map<PreparedStatement, List<Entry<String, Object>>> insertSequence = Maps.newHashMap(); Set<PreparedStatement> insertSet = Sets.newHashSet(); for (Entry<String, Object> e : values.entrySet()) { String k = e.getKey(); Object o = e.getValue(); if (shouldIndex(keySpace, columnFamily, k)) { if (o instanceof RemoveProperty || o == null) { PreparedStatement removeStringColumn = getStatement(keySpace, columnFamily, SQL_REMOVE_STRING_COLUMN, rid, statementCache); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, k); removeStringColumn.addBatch(); removeSet.add(removeStringColumn); } else { // remove all previous values PreparedStatement removeStringColumn = getStatement(keySpace, columnFamily, SQL_REMOVE_STRING_COLUMN, rid, statementCache); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, k); removeStringColumn.addBatch(); removeSet.add(removeStringColumn); // insert new values, as we just removed them we know we can insert, no need to attempt update // the only thing that we know is the colum value changes so we have to re-index the whole // property Object[] valueMembers = (o instanceof Object[]) ? (Object[]) o : new Object[] { o }; for (Object ov : valueMembers) { String valueMember = ov.toString(); PreparedStatement insertStringColumn = getStatement(keySpace, columnFamily, SQL_INSERT_STRING_COLUMN, rid, statementCache); insertStringColumn.setString(1, valueMember); insertStringColumn.setString(2, rid); insertStringColumn.setString(3, k); insertStringColumn.addBatch(); LOGGER.debug("Insert Index {} {}", k, valueMember); insertSet.add(insertStringColumn); List<Entry<String, Object>> insertSeq = insertSequence.get(insertStringColumn); if (insertSeq == null) { insertSeq = Lists.newArrayList(); insertSequence.put(insertStringColumn, insertSeq); } insertSeq.add(e); } } } } if (!StorageClientUtils.isRoot(key)) { // create a holding map containing a rowhash of the parent and then process the entry to generate a update operation. Map<String, Object> autoIndexMap = ImmutableMap.of(InternalContent.PARENT_HASH_FIELD, (Object) rowHash(keySpace, columnFamily, StorageClientUtils.getParentObjectPath(key))); for (Entry<String, Object> e : autoIndexMap.entrySet()) { // remove all previous values PreparedStatement removeStringColumn = getStatement(keySpace, columnFamily, SQL_REMOVE_STRING_COLUMN, rid, statementCache); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, e.getKey()); removeStringColumn.addBatch(); removeSet.add(removeStringColumn); PreparedStatement insertStringColumn = getStatement(keySpace, columnFamily, SQL_INSERT_STRING_COLUMN, rid, statementCache); insertStringColumn.setString(1, (String) e.getValue()); insertStringColumn.setString(2, rid); insertStringColumn.setString(3, e.getKey()); insertStringColumn.addBatch(); LOGGER.debug("Insert {} {}", e.getKey(), e.getValue()); insertSet.add(insertStringColumn); List<Entry<String, Object>> insertSeq = insertSequence.get(insertStringColumn); if (insertSeq == null) { insertSeq = Lists.newArrayList(); insertSequence.put(insertStringColumn, insertSeq); } insertSeq.add(e); } } LOGGER.debug("Remove set {}", removeSet); for (PreparedStatement pst : removeSet) { pst.executeBatch(); } LOGGER.debug("Insert set {}", insertSet); for (PreparedStatement pst : insertSet) { int[] res = pst.executeBatch(); List<Entry<String, Object>> insertSeq = insertSequence.get(pst); for (int i = 0; i < res.length; i++) { Entry<String, Object> e = insertSeq.get(i); if (res[i] <= 0 && res[i] != -2) { // Oracle drivers respond with -2 on a successful insert when the number is not known http://download.oracle.com/javase/1.3/docs/guide/jdbc/spec2/jdbc2.1.frame6.html LOGGER.warn("Index failed for {} {} ", new Object[] { rid, e.getKey(), e.getValue() }); } else { LOGGER.debug("Index inserted for {} {} ", new Object[] { rid, e.getKey(), e.getValue() }); } } } } else { for (Entry<String, Object> e : values.entrySet()) { String k = e.getKey(); Object o = e.getValue(); if (shouldIndex(keySpace, columnFamily, k)) { if (o instanceof RemoveProperty || o == null) { PreparedStatement removeStringColumn = getStatement(keySpace, columnFamily, SQL_REMOVE_STRING_COLUMN, rid, statementCache); removeStringColumn.clearWarnings(); removeStringColumn.clearParameters(); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, k); int nrows = removeStringColumn.executeUpdate(); if (nrows == 0) { m = get(keySpace, columnFamily, key); LOGGER.debug("Column Not present did not remove {} {} Current Column:{} ", new Object[] { getRowId(keySpace, columnFamily, key), k, m }); } else { LOGGER.debug("Removed Index {} {} {} ", new Object[] { getRowId(keySpace, columnFamily, key), k, nrows }); } } else { PreparedStatement removeStringColumn = getStatement(keySpace, columnFamily, SQL_REMOVE_STRING_COLUMN, rid, statementCache); removeStringColumn.clearWarnings(); removeStringColumn.clearParameters(); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, k); int nrows = removeStringColumn.executeUpdate(); if (nrows == 0) { m = get(keySpace, columnFamily, key); LOGGER.debug("Column Not present did not remove {} {} Current Column:{} ", new Object[] { getRowId(keySpace, columnFamily, key), k, m }); } else { LOGGER.debug("Removed Index {} {} {} ", new Object[] { getRowId(keySpace, columnFamily, key), k, nrows }); } Object[] os = (o instanceof Object[]) ? (Object[]) o : new Object[] { o }; for (Object ov : os) { String v = ov.toString(); PreparedStatement insertStringColumn = getStatement(keySpace, columnFamily, SQL_INSERT_STRING_COLUMN, rid, statementCache); insertStringColumn.clearWarnings(); insertStringColumn.clearParameters(); insertStringColumn.setString(1, v); insertStringColumn.setString(2, rid); insertStringColumn.setString(3, k); LOGGER.debug("Non Batch Insert Index {} {}", k, v); if (insertStringColumn.executeUpdate() == 0) { throw new StorageClientException("Failed to save " + getRowId(keySpace, columnFamily, key) + " column:[" + k + "] "); } else { LOGGER.debug("Inserted Index {} {} [{}]", new Object[] { getRowId(keySpace, columnFamily, key), k, v }); } } } } } if (!StorageClientUtils.isRoot(key)) { String parent = StorageClientUtils.getParentObjectPath(key); String hash = rowHash(keySpace, columnFamily, parent); LOGGER.debug("Hash of {}:{}:{} is {} ", new Object[] { keySpace, columnFamily, parent, hash }); Map<String, Object> autoIndexMap = ImmutableMap.of(InternalContent.PARENT_HASH_FIELD, (Object) hash); for (Entry<String, Object> e : autoIndexMap.entrySet()) { String k = e.getKey(); Object v = e.getValue(); PreparedStatement removeStringColumn = getStatement(keySpace, columnFamily, SQL_REMOVE_STRING_COLUMN, rid, statementCache); removeStringColumn.clearWarnings(); removeStringColumn.clearParameters(); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, k); int nrows = removeStringColumn.executeUpdate(); if (nrows == 0) { m = get(keySpace, columnFamily, key); LOGGER.debug("Column Not present did not remove {} {} Current Column:{} ", new Object[] { getRowId(keySpace, columnFamily, key), k, m }); } else { LOGGER.debug("Removed Index {} {} {} ", new Object[] { getRowId(keySpace, columnFamily, key), k, nrows }); } PreparedStatement insertStringColumn = getStatement(keySpace, columnFamily, SQL_INSERT_STRING_COLUMN, rid, statementCache); insertStringColumn.clearWarnings(); insertStringColumn.clearParameters(); insertStringColumn.setString(1, v.toString()); insertStringColumn.setString(2, rid); insertStringColumn.setString(3, k); LOGGER.debug("Non Batch Insert Index {} {}", k, v); if (insertStringColumn.executeUpdate() == 0) { throw new StorageClientException("Failed to save " + getRowId(keySpace, columnFamily, key) + " column:[" + k + "] "); } else { LOGGER.debug("Inserted Index {} {} [{}]", new Object[] { getRowId(keySpace, columnFamily, key), k, v }); } } } } endBlock(autoCommit); } catch (SQLException e) { abandonBlock(autoCommit); LOGGER.warn("Failed to perform insert/update operation on {}:{}:{} ", new Object[] { keySpace, columnFamily, key }, e); throw new StorageClientException(e.getMessage(), e); } catch (IOException e) { abandonBlock(autoCommit); LOGGER.warn("Failed to perform insert/update operation on {}:{}:{} ", new Object[] { keySpace, columnFamily, key }, e); throw new StorageClientException(e.getMessage(), e); } finally { close(statementCache); } }
From source file:org.sakaiproject.nakamura.lite.storage.jdbc.JDBCStorageClient.java
public void remove(String keySpace, String columnFamily, String key) throws StorageClientException { checkClosed();//from w ww .j ava2 s . c o m PreparedStatement deleteStringRow = null; PreparedStatement deleteBlockRow = null; String rid = rowHash(keySpace, columnFamily, key); boolean autoCommit = false; try { autoCommit = startBlock(); deleteStringRow = getStatement(keySpace, columnFamily, SQL_DELETE_STRING_ROW, rid, null); inc("deleteStringRow"); deleteStringRow.clearWarnings(); deleteStringRow.clearParameters(); deleteStringRow.setString(1, rid); deleteStringRow.executeUpdate(); deleteBlockRow = getStatement(keySpace, columnFamily, SQL_BLOCK_DELETE_ROW, rid, null); inc("deleteBlockRow"); deleteBlockRow.clearWarnings(); deleteBlockRow.clearParameters(); deleteBlockRow.setString(1, rid); deleteBlockRow.executeUpdate(); endBlock(autoCommit); } catch (SQLException e) { abandonBlock(autoCommit); LOGGER.warn("Failed to perform delete operation on {}:{}:{} ", new Object[] { keySpace, columnFamily, key }, e); throw new StorageClientException(e.getMessage(), e); } finally { close(deleteStringRow, "deleteStringRow"); close(deleteBlockRow, "deleteBlockRow"); } }
From source file:org.sakaiproject.nakamura.lite.storage.jdbc.WideColumnIndexer.java
public void index(Map<String, PreparedStatement> statementCache, String keySpace, String columnFamily, String key, String rid, Map<String, Object> values) throws StorageClientException, SQLException { ResultSet rs = null;//from w w w . j a v a2s. c om try { Set<String> removeArrayColumns = Sets.newHashSet(); Set<String> removeColumns = Sets.newHashSet(); Map<String, Object[]> updateArrayColumns = Maps.newHashMap(); Map<String, Object> updateColumns = Maps.newHashMap(); for (Entry<String, Object> e : values.entrySet()) { String k = e.getKey(); Object o = e.getValue(); Object[] valueMembers = (o instanceof Object[]) ? (Object[]) o : new Object[] { o }; if (shouldIndex(keySpace, columnFamily, k)) { if (isColumnArray(keySpace, columnFamily, k)) { if (o instanceof RemoveProperty || o == null || valueMembers.length == 0) { removeArrayColumns.add(k); } else { removeArrayColumns.add(k); updateArrayColumns.put(k, valueMembers); } } else { if (o instanceof RemoveProperty || o == null || valueMembers.length == 0) { removeColumns.add(k); } else { updateColumns.put(k, valueMembers[0]); } } } } if (!StorageClientUtils.isRoot(key) && getColumnName(keySpace, columnFamily, InternalContent.PARENT_HASH_FIELD) != null) { String parent = StorageClientUtils.getParentObjectPath(key); String hash = client.rowHash(keySpace, columnFamily, parent); LOGGER.debug("Hash of {}:{}:{} is {} ", new Object[] { keySpace, columnFamily, parent, hash }); updateColumns.put(InternalContent.PARENT_HASH_FIELD, hash); } LOGGER.debug("Removing Array {} ", removeArrayColumns); LOGGER.debug("Updating Array {} ", updateArrayColumns); LOGGER.debug("Removing {} ", removeColumns); LOGGER.debug("Updating {} ", updateColumns); // arrays are stored in css, so we can re-use css sql. PreparedStatement removeStringColumn = client.getStatement(keySpace, columnFamily, JDBCStorageClient.SQL_REMOVE_STRING_COLUMN, rid, statementCache); int nbatch = 0; for (String column : removeArrayColumns) { removeStringColumn.clearWarnings(); removeStringColumn.clearParameters(); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, column); removeStringColumn.addBatch(); LOGGER.debug("Removing {} {} ", rid, column); nbatch++; } if (nbatch > 0) { long t = System.currentTimeMillis(); removeStringColumn.executeBatch(); checkSlow(t, client.getSql(keySpace, columnFamily, JDBCStorageClient.SQL_REMOVE_STRING_COLUMN)); nbatch = 0; } // add the column values in PreparedStatement insertStringColumn = client.getStatement(keySpace, columnFamily, JDBCStorageClient.SQL_INSERT_STRING_COLUMN, rid, statementCache); for (Entry<String, Object[]> e : updateArrayColumns.entrySet()) { for (Object o : e.getValue()) { insertStringColumn.clearWarnings(); insertStringColumn.clearParameters(); insertStringColumn.setString(1, o.toString()); insertStringColumn.setString(2, rid); insertStringColumn.setString(3, e.getKey()); insertStringColumn.addBatch(); LOGGER.debug("Inserting {} {} {} ", new Object[] { o.toString(), rid, e.getKey() }); nbatch++; } } if (nbatch > 0) { long t = System.currentTimeMillis(); insertStringColumn.executeBatch(); checkSlow(t, client.getSql(keySpace, columnFamily, JDBCStorageClient.SQL_INSERT_STRING_COLUMN)); nbatch = 0; } if (removeColumns.size() == 0 && updateColumns.size() == 0) { return; // nothing to add or remove, do nothing. } if (removeColumns.size() > 0 && updateColumns.size() == 0) { // exists, columns to remove, none to update, therefore // delete row this assumes that the starting point is a // complete map PreparedStatement deleteWideStringColumn = client.getStatement(keySpace, columnFamily, SQL_DELETE_WIDESTRING_ROW, rid, statementCache); deleteWideStringColumn.clearParameters(); deleteWideStringColumn.setString(1, rid); long t = System.currentTimeMillis(); deleteWideStringColumn.execute(); checkSlow(t, client.getSql(keySpace, columnFamily, SQL_DELETE_WIDESTRING_ROW)); LOGGER.debug("Executed {} with {} ", deleteWideStringColumn, rid); } else if (updateColumns.size() > 0 || removeColumns.size() > 0) { // // build an update query, record does not exists, but there // is stuff to add String[] sqlParts = StringUtils .split(client.getSql(keySpace, columnFamily, SQL_UPDATE_WIDESTRING_ROW), ";"); StringBuilder setOperations = new StringBuilder(); for (Entry<String, Object> e : updateColumns.entrySet()) { join(setOperations, " ,").append( MessageFormat.format(sqlParts[1], getColumnName(keySpace, columnFamily, e.getKey()))); } for (String toRemove : removeColumns) { join(setOperations, " ,").append( MessageFormat.format(sqlParts[1], getColumnName(keySpace, columnFamily, toRemove))); } String finalSql = MessageFormat.format(sqlParts[0], setOperations); LOGGER.debug("Performing {} ", finalSql); PreparedStatement updateColumnPst = client.getStatement(finalSql, statementCache); updateColumnPst.clearWarnings(); updateColumnPst.clearParameters(); int i = 1; for (Entry<String, Object> e : updateColumns.entrySet()) { updateColumnPst.setString(i, e.getValue().toString()); LOGGER.debug(" Param {} {} ", i, e.getValue().toString()); i++; } for (String toRemove : removeColumns) { updateColumnPst.setNull(i, toSqlType(columnFamily, toRemove)); LOGGER.debug(" Param {} NULL ", i); i++; } updateColumnPst.setString(i, rid); long t = System.currentTimeMillis(); int n = updateColumnPst.executeUpdate(); checkSlow(t, finalSql); if (n == 0) { // part 0 is the final ,part 1 is the template for column names, // part 2 is the template for parameters. // insert into x ( columnsnames ) values () StringBuilder columnNames = new StringBuilder(); StringBuilder paramHolders = new StringBuilder(); for (Entry<String, Object> e : updateColumns.entrySet()) { columnNames.append(" ,").append(getColumnName(keySpace, columnFamily, e.getKey())); paramHolders.append(" ,").append("?"); } finalSql = MessageFormat.format( client.getSql(keySpace, columnFamily, SQL_INSERT_WIDESTRING_ROW), columnNames.toString(), paramHolders.toString()); LOGGER.debug("Insert SQL {} ", finalSql); PreparedStatement insertColumnPst = client.getStatement(finalSql, statementCache); insertColumnPst.clearWarnings(); insertColumnPst.clearParameters(); insertColumnPst.setString(1, rid); i = 2; for (Entry<String, Object> e : updateColumns.entrySet()) { LOGGER.debug(" Param {} {} ", i, e.getValue().toString()); insertColumnPst.setString(i, e.getValue().toString()); i++; } t = System.currentTimeMillis(); insertColumnPst.executeUpdate(); checkSlow(t, finalSql); } } } finally { if (rs != null) { rs.close(); } } }