List of usage examples for java.sql PreparedStatement executeBatch
int[] executeBatch() throws SQLException;
From source file:com.alibaba.wasp.jdbc.TestPreparedStatement.java
public void testLobTempFiles() throws SQLException { Statement stat = conn.createStatement(); stat.execute("CREATE TABLE TEST(ID INT PRIMARY KEY, DATA CLOB)"); PreparedStatement prep = conn.prepareStatement("INSERT INTO TEST VALUES(?, ?)"); for (int i = 0; i < 5; i++) { prep.setInt(1, i);//from ww w . ja v a 2s . c om if (i % 2 == 0) { prep.setCharacterStream(2, new StringReader(getString(i)), -1); } prep.execute(); } ResultSet rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); int check = 0; for (int i = 0; i < 5; i++) { assertTrue(rs.next()); if (i % 2 == 0) { check = i; } assertEquals(getString(check), rs.getString(2)); } assertFalse(rs.next()); stat.execute("DELETE FROM TEST"); for (int i = 0; i < 3; i++) { prep.setInt(1, i); prep.setCharacterStream(2, new StringReader(getString(i)), -1); prep.addBatch(); } prep.executeBatch(); rs = stat.executeQuery("SELECT * FROM TEST ORDER BY ID"); for (int i = 0; i < 3; i++) { assertTrue(rs.next()); assertEquals(getString(i), rs.getString(2)); } assertFalse(rs.next()); stat.execute("DROP TABLE TEST"); }
From source file:com.wso2telco.gsma.authenticators.dao.impl.AttributeConfigDaoImpl.java
@Override public void saveUserConsentedAttributes(List<UserConsentHistory> userConsentHistory) throws NamingException, DBUtilException { Connection connection = null; PreparedStatement preparedStatement = null; ResultSet resultSet = null;//from ww w . ja v a 2s . co m String query = "INSERT INTO " + TableName.USER_CONSENT + "(consent_id,msisdn,expire_time,consent_status," + "client_id,operator) " + "VALUES (?,?,?,?,?,?);"; try { connection = getConnectDBConnection(); preparedStatement = connection.prepareStatement(query); for (UserConsentHistory userConsentHistory1 : userConsentHistory) { preparedStatement.setInt(1, userConsentHistory1.getConsentId()); preparedStatement.setString(2, userConsentHistory1.getMsisdn()); preparedStatement.setString(3, userConsentHistory1.getConsentExpireTime()); preparedStatement.setBoolean(4, Boolean.parseBoolean(userConsentHistory1.getConsentStatus())); preparedStatement.setString(5, userConsentHistory1.getClientId()); preparedStatement.setString(6, userConsentHistory1.getOperatorName()); preparedStatement.addBatch(); } if (log.isDebugEnabled()) { log.debug("Query in method saveUserConsentedAttributes:" + preparedStatement); } preparedStatement.executeBatch(); } catch (SQLException e) { log.error("Exception occurred while inserting data to the database for history : " + userConsentHistory.toString() + " :" + e.getMessage()); throw new DBUtilException(e.getMessage(), e); } finally { IdentityDatabaseUtil.closeAllConnections(connection, resultSet, preparedStatement); } }
From source file:org.rhq.enterprise.server.measurement.MeasurementDataManagerBean.java
/** * Add metrics data to the database. Data that is passed can come from several Schedules, but needs to be of only * one type of MeasurementGathering. For good performance it is important that the agent sends batches as big as * possible (ok, perhaps not more than 100 items at a time). * * @param data the actual data points//from w w w . j av a2 s. co m */ @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void addNumericData(Set<MeasurementDataNumeric> data) { if ((data == null) || (data.isEmpty())) { return; } int expectedCount = data.size(); Connection conn = null; DatabaseType dbType = null; Map<String, PreparedStatement> statements = new HashMap<String, PreparedStatement>(); try { conn = rhqDs.getConnection(); dbType = DatabaseTypeFactory.getDatabaseType(conn); if (dbType instanceof Postgresql83DatabaseType) { Statement st = null; try { // Take advantage of async commit here st = conn.createStatement(); st.execute("SET synchronous_commit = off"); } finally { JDBCUtil.safeClose(st); } } for (MeasurementDataNumeric aData : data) { Double value = aData.getValue(); if ((value == null) || Double.isNaN(value) || Double.isInfinite(value)) { expectedCount--; continue; } String table = MeasurementDataManagerUtility.getTable(aData.getTimestamp()); PreparedStatement ps = statements.get(table); if (ps == null) { String insertSql = "INSERT /*+ APPEND */ INTO " + table + "(schedule_id,time_stamp,value) VALUES(?,?,?)"; ps = conn.prepareStatement(insertSql); statements.put(table, ps); } ps.setInt(1, aData.getScheduleId()); ps.setLong(2, aData.getTimestamp()); ps.setDouble(3, value); ps.addBatch(); } int count = 0; for (PreparedStatement ps : statements.values()) { int[] res = ps.executeBatch(); for (int updates : res) { if ((updates != 1) && (updates != -2)) // oracle returns -2 on success { throw new MeasurementStorageException("Unexpected batch update size [" + updates + "]"); } count++; } } if (count != expectedCount) { throw new MeasurementStorageException("Failure to store measurement data."); } notifyAlertConditionCacheManager("mergeMeasurementReport", data.toArray(new MeasurementData[data.size()])); } catch (SQLException e) { log.warn("Failure saving measurement numeric data:\n" + ThrowableUtil.getAllMessages(e)); } catch (Exception e) { log.error("Error persisting numeric data", e); } finally { for (PreparedStatement ps : statements.values()) { JDBCUtil.safeClose(ps); } JDBCUtil.safeClose(conn); } }
From source file:com.flexive.ejb.beans.BriefcaseEngineBean.java
private void replaceMetaData(Connection con, long id, Collection<FxReferenceMetaData<FxPK>> metadata) throws FxUpdateException { PreparedStatement stmt = null; boolean success = false; try {//w w w . j av a 2s . c om stmt = con.prepareStatement( "UPDATE " + TBL_BRIEFCASE_DATA + " SET metadata=? WHERE briefcase_id=? AND id=?"); stmt.setLong(2, id); for (FxReferenceMetaData<FxPK> metaData : metadata) { final String meta = metaData.getSerializedForm(); stmt.setString(1, meta); stmt.setLong(3, metaData.getReference().getId()); stmt.addBatch(); } stmt.executeBatch(); success = true; } catch (SQLException e) { throw new FxUpdateException(LOG, e); } finally { if (!success) { EJBUtils.rollback(ctx); } closeObjects(BriefcaseEngineBean.class, null, stmt); } }
From source file:com.globalsight.everest.permission.Permission.java
/** * Update Table permissiongroup. If permission id is greater than 300, the * id should plus 1. Then update permission_set to new string. *///from w w w . j a v a 2s .c om private static void updateUnbalancedPermissionGroupSet() { Connection c = null; PreparedStatement stmt = null; PreparedStatement stmt1 = null; ResultSet rs = null; try { c = ConnectionPool.getConnection(); c.setAutoCommit(false); stmt = c.prepareStatement(SQL_SELECT_PERMISSION_SET_FROM_PERMISSION_GROUP); stmt1 = c.prepareStatement(SQL_UPDATE_PERMISSION_SET); rs = stmt.executeQuery(); while (rs.next()) { long id = rs.getLong(1); String permissionSet = rs.getString(2); String[] permissionIdArray = permissionSet.split("\\|"); StringBuffer newPermissionSet = new StringBuffer(); for (String permissionId : permissionIdArray) { if (StringUtils.isNotEmpty(permissionId)) { long lId = Long.parseLong(permissionId); if (lId >= 300) { lId += 1; } newPermissionSet.append("|").append(lId); } } newPermissionSet.append("|"); stmt1.setString(1, newPermissionSet.toString()); stmt1.setLong(2, id); stmt1.addBatch(); } stmt1.executeBatch(); c.commit(); } catch (Exception e) { logger.error("Failed to update permission_group from database.", e); } finally { ConnectionPool.silentClose(rs); ConnectionPool.silentClose(stmt); ConnectionPool.silentClose(stmt1); ConnectionPool.silentReturnConnection(c); } }
From source file:com.flexive.ejb.beans.configuration.DivisionConfigurationEngineBean.java
/** * {@inheritDoc}//from w w w.j av a 2s . co m */ @Override @TransactionAttribute(TransactionAttributeType.REQUIRED) public void setResourceValue(String key, FxString value) throws FxApplicationException { if (StringUtils.isBlank(key)) return; key = key.trim(); if (key.length() > 250) throw new FxApplicationException("ex.configuration.resource.key.tooLong", key); if (!StringUtils.isAsciiPrintable(key)) throw new FxApplicationException("ex.configuration.resource.key.nonAscii", key); Connection con = null; PreparedStatement ps = null; try { con = Database.getDbConnection(); ps = con.prepareStatement("DELETE FROM " + TBL_RESOURCES + " WHERE RKEY=?"); ps.setString(1, key); ps.executeUpdate(); if (value != null && !value.isEmpty()) { ps.close(); ps = con.prepareStatement("INSERT INTO " + TBL_RESOURCES + " (RKEY,LANG,RVAL)VALUES(?,?,?)"); ps.setString(1, key); for (long lang : value.getTranslatedLanguages()) { ps.setLong(2, lang); ps.setString(3, value.getTranslation(lang)); ps.addBatch(); } ps.executeBatch(); } } catch (SQLException e) { throw new FxApplicationException(e, "ex.db.sqlError", e.getMessage()); } finally { Database.closeObjects(DivisionConfigurationEngine.class, con, ps); } }
From source file:eu.celarcloud.celar_ms.ServerPack.Database.MySQL.DBHandlerWithConnPool.java
public void createSubscription(SubObj sub, MetricObj metric) { PreparedStatement stmt = null; Connection c = null;//from w ww .ja v a 2s . c om try { c = this.getConnection(); stmt = c.prepareStatement(CREATE_SUBSCRIPTION); stmt.setString(1, sub.getSubID()); stmt.setString(2, sub.getGroupingFunc().name()); stmt.setString(3, sub.getOriginMetric()); stmt.setInt(4, sub.getPeriod()); stmt.executeUpdate(); stmt = c.prepareStatement(CREATE_METRIC_FOR_SUB); stmt.setString(1, metric.getMetricID()); stmt.setString(2, metric.getAgentID()); stmt.setString(3, metric.getName()); stmt.setString(4, metric.getGroup()); stmt.setString(5, metric.getUnits()); stmt.setString(6, metric.getType()); stmt.setString(7, "yes"); stmt.executeUpdate(); stmt = c.prepareStatement(ADD_AGENT_TO_SUB); String subID = sub.getSubID(); for (String agentID : sub.getAgentList()) { stmt.setString(1, subID); stmt.setString(2, agentID); stmt.addBatch(); } stmt.executeBatch(); } catch (SQLException e) { server.writeToLog(Level.SEVERE, "MySQL Handler createSubscription>> " + e); } catch (Exception e) { server.writeToLog(Level.SEVERE, "MySQL Handler createSubscription>> " + e); } finally { this.release(stmt, c); } }
From source file:com.strider.datadefender.DatabaseAnonymizer.java
/** * Anonymization function for a single table. * /*ww w.j a v a 2 s .co m*/ * Sets up queries, loops over columns and anonymizes columns for the passed * Table. * * @param table */ private void anonymizeTable(final int batchSize, final IDBFactory dbFactory, final Table table) throws DatabaseAnonymizerException { log.info("Table [" + table.getName() + "]. Start ..."); final List<Column> tableColumns = table.getColumns(); // colNames is looked up with contains, and iterated over. Using LinkedHashSet means // duplicate column names won't be added to the query, so a check in the column loop // below was created to ensure a reasonable warning message is logged if that happens. final Set<String> colNames = new LinkedHashSet<>(tableColumns.size()); // keyNames is only iterated over, so no need for a hash set final List<String> keyNames = new LinkedList<>(); fillColumnNames(table, colNames); fillPrimaryKeyNamesList(table, keyNames); // required in this scope for 'catch' block PreparedStatement selectStmt = null; PreparedStatement updateStmt = null; ResultSet rs = null; final Connection updateCon = dbFactory.getUpdateConnection(); try { selectStmt = getSelectQueryStatement(dbFactory, table, keyNames, colNames); rs = selectStmt.executeQuery(); final List<MatchMetaData> columnMetaData = dbFactory.fetchMetaData().getMetaDataForRs(rs); final String updateString = getUpdateQuery(table, colNames, keyNames); updateStmt = updateCon.prepareStatement(updateString); int batchCounter = 0; int rowCount = 0; while (rs.next()) { anonymizeRow(updateStmt, tableColumns, keyNames, updateCon, rs, columnMetaData, dbFactory.getVendorName()); batchCounter++; if (batchCounter == batchSize) { updateStmt.executeBatch(); updateCon.commit(); batchCounter = 0; } rowCount++; } log.debug("Rows processed: " + rowCount); updateStmt.executeBatch(); log.debug("Batch executed"); updateCon.commit(); log.debug("Commit"); selectStmt.close(); updateStmt.close(); rs.close(); log.debug("Closing open resources"); } catch (SQLException | NoSuchMethodException | SecurityException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | DatabaseDiscoveryException ex) { log.error(ex.toString()); if (ex.getCause() != null) { log.error(ex.getCause().toString()); } try { if (selectStmt != null) { selectStmt.close(); } if (updateStmt != null) { updateStmt.close(); } if (rs != null) { rs.close(); } } catch (SQLException sqlex) { log.error(sqlex.toString()); } } finally { try { if (selectStmt != null) { selectStmt.close(); } if (updateStmt != null) { updateStmt.close(); } if (rs != null) { rs.close(); } } catch (SQLException sqlex) { log.error(sqlex.toString()); } } log.info("Table " + table.getName() + ". End ..."); log.info(""); }
From source file:org.sakaiproject.nakamura.lite.storage.jdbc.JDBCStorageClient.java
public void insert(String keySpace, String columnFamily, String key, Map<String, Object> values, boolean probablyNew) throws StorageClientException { checkClosed();//from www . j a va 2 s . c om Map<String, PreparedStatement> statementCache = Maps.newHashMap(); boolean autoCommit = true; try { autoCommit = startBlock(); String rid = rowHash(keySpace, columnFamily, key); for (Entry<String, Object> e : values.entrySet()) { String k = e.getKey(); Object o = e.getValue(); if (o instanceof byte[]) { throw new RuntimeException( "Invalid content in " + k + ", storing byte[] rather than streaming it"); } } Map<String, Object> m = get(keySpace, columnFamily, key); for (Entry<String, Object> e : values.entrySet()) { String k = e.getKey(); Object o = e.getValue(); if (o instanceof RemoveProperty || o == null) { m.remove(k); } else { m.put(k, o); } } LOGGER.debug("Saving {} {} {} ", new Object[] { key, rid, m }); if (probablyNew && !UPDATE_FIRST_SEQUENCE.equals(getSql(SQL_STATEMENT_SEQUENCE))) { PreparedStatement insertBlockRow = getStatement(keySpace, columnFamily, SQL_BLOCK_INSERT_ROW, rid, statementCache); insertBlockRow.clearWarnings(); insertBlockRow.clearParameters(); insertBlockRow.setString(1, rid); InputStream insertStream = null; try { insertStream = Types.storeMapToStream(rid, m, columnFamily); } catch (UTFDataFormatException e) { throw new DataFormatException(INVALID_DATA_ERROR, e); } if ("1.5".equals(getSql(JDBC_SUPPORT_LEVEL))) { insertBlockRow.setBinaryStream(2, insertStream, insertStream.available()); } else { insertBlockRow.setBinaryStream(2, insertStream); } int rowsInserted = 0; try { rowsInserted = insertBlockRow.executeUpdate(); } catch (SQLException e) { LOGGER.debug(e.getMessage(), e); } if (rowsInserted == 0) { PreparedStatement updateBlockRow = getStatement(keySpace, columnFamily, SQL_BLOCK_UPDATE_ROW, rid, statementCache); updateBlockRow.clearWarnings(); updateBlockRow.clearParameters(); updateBlockRow.setString(2, rid); try { insertStream = Types.storeMapToStream(rid, m, columnFamily); } catch (UTFDataFormatException e) { throw new DataFormatException(INVALID_DATA_ERROR, e); } if ("1.5".equals(getSql(JDBC_SUPPORT_LEVEL))) { updateBlockRow.setBinaryStream(1, insertStream, insertStream.available()); } else { updateBlockRow.setBinaryStream(1, insertStream); } if (updateBlockRow.executeUpdate() == 0) { throw new StorageClientException("Failed to save " + rid); } else { LOGGER.debug("Updated {} ", rid); } } else { LOGGER.debug("Inserted {} ", rid); } } else { PreparedStatement updateBlockRow = getStatement(keySpace, columnFamily, SQL_BLOCK_UPDATE_ROW, rid, statementCache); updateBlockRow.clearWarnings(); updateBlockRow.clearParameters(); updateBlockRow.setString(2, rid); InputStream updateStream = null; try { updateStream = Types.storeMapToStream(rid, m, columnFamily); } catch (UTFDataFormatException e) { throw new DataFormatException(INVALID_DATA_ERROR, e); } if ("1.5".equals(getSql(JDBC_SUPPORT_LEVEL))) { updateBlockRow.setBinaryStream(1, updateStream, updateStream.available()); } else { updateBlockRow.setBinaryStream(1, updateStream); } if (updateBlockRow.executeUpdate() == 0) { PreparedStatement insertBlockRow = getStatement(keySpace, columnFamily, SQL_BLOCK_INSERT_ROW, rid, statementCache); insertBlockRow.clearWarnings(); insertBlockRow.clearParameters(); insertBlockRow.setString(1, rid); try { updateStream = Types.storeMapToStream(rid, m, columnFamily); } catch (UTFDataFormatException e) { throw new DataFormatException(INVALID_DATA_ERROR, e); } if ("1.5".equals(getSql(JDBC_SUPPORT_LEVEL))) { insertBlockRow.setBinaryStream(2, updateStream, updateStream.available()); } else { insertBlockRow.setBinaryStream(2, updateStream); } if (insertBlockRow.executeUpdate() == 0) { throw new StorageClientException("Failed to save " + rid); } else { LOGGER.debug("Inserted {} ", rid); } } else { LOGGER.debug("Updated {} ", rid); } } if ("1".equals(getSql(USE_BATCH_INSERTS))) { Set<PreparedStatement> removeSet = Sets.newHashSet(); // execute the updates and add the necessary inserts. Map<PreparedStatement, List<Entry<String, Object>>> insertSequence = Maps.newHashMap(); Set<PreparedStatement> insertSet = Sets.newHashSet(); for (Entry<String, Object> e : values.entrySet()) { String k = e.getKey(); Object o = e.getValue(); if (shouldIndex(keySpace, columnFamily, k)) { if (o instanceof RemoveProperty || o == null) { PreparedStatement removeStringColumn = getStatement(keySpace, columnFamily, SQL_REMOVE_STRING_COLUMN, rid, statementCache); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, k); removeStringColumn.addBatch(); removeSet.add(removeStringColumn); } else { // remove all previous values PreparedStatement removeStringColumn = getStatement(keySpace, columnFamily, SQL_REMOVE_STRING_COLUMN, rid, statementCache); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, k); removeStringColumn.addBatch(); removeSet.add(removeStringColumn); // insert new values, as we just removed them we know we can insert, no need to attempt update // the only thing that we know is the colum value changes so we have to re-index the whole // property Object[] valueMembers = (o instanceof Object[]) ? (Object[]) o : new Object[] { o }; for (Object ov : valueMembers) { String valueMember = ov.toString(); PreparedStatement insertStringColumn = getStatement(keySpace, columnFamily, SQL_INSERT_STRING_COLUMN, rid, statementCache); insertStringColumn.setString(1, valueMember); insertStringColumn.setString(2, rid); insertStringColumn.setString(3, k); insertStringColumn.addBatch(); LOGGER.debug("Insert Index {} {}", k, valueMember); insertSet.add(insertStringColumn); List<Entry<String, Object>> insertSeq = insertSequence.get(insertStringColumn); if (insertSeq == null) { insertSeq = Lists.newArrayList(); insertSequence.put(insertStringColumn, insertSeq); } insertSeq.add(e); } } } } if (!StorageClientUtils.isRoot(key)) { // create a holding map containing a rowhash of the parent and then process the entry to generate a update operation. Map<String, Object> autoIndexMap = ImmutableMap.of(InternalContent.PARENT_HASH_FIELD, (Object) rowHash(keySpace, columnFamily, StorageClientUtils.getParentObjectPath(key))); for (Entry<String, Object> e : autoIndexMap.entrySet()) { // remove all previous values PreparedStatement removeStringColumn = getStatement(keySpace, columnFamily, SQL_REMOVE_STRING_COLUMN, rid, statementCache); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, e.getKey()); removeStringColumn.addBatch(); removeSet.add(removeStringColumn); PreparedStatement insertStringColumn = getStatement(keySpace, columnFamily, SQL_INSERT_STRING_COLUMN, rid, statementCache); insertStringColumn.setString(1, (String) e.getValue()); insertStringColumn.setString(2, rid); insertStringColumn.setString(3, e.getKey()); insertStringColumn.addBatch(); LOGGER.debug("Insert {} {}", e.getKey(), e.getValue()); insertSet.add(insertStringColumn); List<Entry<String, Object>> insertSeq = insertSequence.get(insertStringColumn); if (insertSeq == null) { insertSeq = Lists.newArrayList(); insertSequence.put(insertStringColumn, insertSeq); } insertSeq.add(e); } } LOGGER.debug("Remove set {}", removeSet); for (PreparedStatement pst : removeSet) { pst.executeBatch(); } LOGGER.debug("Insert set {}", insertSet); for (PreparedStatement pst : insertSet) { int[] res = pst.executeBatch(); List<Entry<String, Object>> insertSeq = insertSequence.get(pst); for (int i = 0; i < res.length; i++) { Entry<String, Object> e = insertSeq.get(i); if (res[i] <= 0 && res[i] != -2) { // Oracle drivers respond with -2 on a successful insert when the number is not known http://download.oracle.com/javase/1.3/docs/guide/jdbc/spec2/jdbc2.1.frame6.html LOGGER.warn("Index failed for {} {} ", new Object[] { rid, e.getKey(), e.getValue() }); } else { LOGGER.debug("Index inserted for {} {} ", new Object[] { rid, e.getKey(), e.getValue() }); } } } } else { for (Entry<String, Object> e : values.entrySet()) { String k = e.getKey(); Object o = e.getValue(); if (shouldIndex(keySpace, columnFamily, k)) { if (o instanceof RemoveProperty || o == null) { PreparedStatement removeStringColumn = getStatement(keySpace, columnFamily, SQL_REMOVE_STRING_COLUMN, rid, statementCache); removeStringColumn.clearWarnings(); removeStringColumn.clearParameters(); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, k); int nrows = removeStringColumn.executeUpdate(); if (nrows == 0) { m = get(keySpace, columnFamily, key); LOGGER.debug("Column Not present did not remove {} {} Current Column:{} ", new Object[] { getRowId(keySpace, columnFamily, key), k, m }); } else { LOGGER.debug("Removed Index {} {} {} ", new Object[] { getRowId(keySpace, columnFamily, key), k, nrows }); } } else { PreparedStatement removeStringColumn = getStatement(keySpace, columnFamily, SQL_REMOVE_STRING_COLUMN, rid, statementCache); removeStringColumn.clearWarnings(); removeStringColumn.clearParameters(); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, k); int nrows = removeStringColumn.executeUpdate(); if (nrows == 0) { m = get(keySpace, columnFamily, key); LOGGER.debug("Column Not present did not remove {} {} Current Column:{} ", new Object[] { getRowId(keySpace, columnFamily, key), k, m }); } else { LOGGER.debug("Removed Index {} {} {} ", new Object[] { getRowId(keySpace, columnFamily, key), k, nrows }); } Object[] os = (o instanceof Object[]) ? (Object[]) o : new Object[] { o }; for (Object ov : os) { String v = ov.toString(); PreparedStatement insertStringColumn = getStatement(keySpace, columnFamily, SQL_INSERT_STRING_COLUMN, rid, statementCache); insertStringColumn.clearWarnings(); insertStringColumn.clearParameters(); insertStringColumn.setString(1, v); insertStringColumn.setString(2, rid); insertStringColumn.setString(3, k); LOGGER.debug("Non Batch Insert Index {} {}", k, v); if (insertStringColumn.executeUpdate() == 0) { throw new StorageClientException("Failed to save " + getRowId(keySpace, columnFamily, key) + " column:[" + k + "] "); } else { LOGGER.debug("Inserted Index {} {} [{}]", new Object[] { getRowId(keySpace, columnFamily, key), k, v }); } } } } } if (!StorageClientUtils.isRoot(key)) { String parent = StorageClientUtils.getParentObjectPath(key); String hash = rowHash(keySpace, columnFamily, parent); LOGGER.debug("Hash of {}:{}:{} is {} ", new Object[] { keySpace, columnFamily, parent, hash }); Map<String, Object> autoIndexMap = ImmutableMap.of(InternalContent.PARENT_HASH_FIELD, (Object) hash); for (Entry<String, Object> e : autoIndexMap.entrySet()) { String k = e.getKey(); Object v = e.getValue(); PreparedStatement removeStringColumn = getStatement(keySpace, columnFamily, SQL_REMOVE_STRING_COLUMN, rid, statementCache); removeStringColumn.clearWarnings(); removeStringColumn.clearParameters(); removeStringColumn.setString(1, rid); removeStringColumn.setString(2, k); int nrows = removeStringColumn.executeUpdate(); if (nrows == 0) { m = get(keySpace, columnFamily, key); LOGGER.debug("Column Not present did not remove {} {} Current Column:{} ", new Object[] { getRowId(keySpace, columnFamily, key), k, m }); } else { LOGGER.debug("Removed Index {} {} {} ", new Object[] { getRowId(keySpace, columnFamily, key), k, nrows }); } PreparedStatement insertStringColumn = getStatement(keySpace, columnFamily, SQL_INSERT_STRING_COLUMN, rid, statementCache); insertStringColumn.clearWarnings(); insertStringColumn.clearParameters(); insertStringColumn.setString(1, v.toString()); insertStringColumn.setString(2, rid); insertStringColumn.setString(3, k); LOGGER.debug("Non Batch Insert Index {} {}", k, v); if (insertStringColumn.executeUpdate() == 0) { throw new StorageClientException("Failed to save " + getRowId(keySpace, columnFamily, key) + " column:[" + k + "] "); } else { LOGGER.debug("Inserted Index {} {} [{}]", new Object[] { getRowId(keySpace, columnFamily, key), k, v }); } } } } endBlock(autoCommit); } catch (SQLException e) { abandonBlock(autoCommit); LOGGER.warn("Failed to perform insert/update operation on {}:{}:{} ", new Object[] { keySpace, columnFamily, key }, e); throw new StorageClientException(e.getMessage(), e); } catch (IOException e) { abandonBlock(autoCommit); LOGGER.warn("Failed to perform insert/update operation on {}:{}:{} ", new Object[] { keySpace, columnFamily, key }, e); throw new StorageClientException(e.getMessage(), e); } finally { close(statementCache); } }
From source file:org.wso2.carbon.device.mgt.core.archival.dao.impl.ArchivalDAOImpl.java
@Override public void moveOperations() throws ArchivalDAOException { Statement stmt = null;// w ww. j a va 2 s. co m PreparedStatement stmt2 = null; Statement stmt3 = null; ResultSet rs = null; try { Connection conn = ArchivalSourceDAOFactory.getConnection(); String sql = "SELECT * FROM DM_OPERATION WHERE ID IN (SELECT ID FROM DM_ARCHIVED_OPERATIONS)"; stmt = this.createMemoryEfficientStatement(conn); rs = stmt.executeQuery(sql); Connection conn2 = ArchivalDestinationDAOFactory.getConnection(); sql = "INSERT INTO DM_OPERATION_ARCH VALUES(?, ?, ?, ?, ?, ?)"; stmt2 = conn2.prepareStatement(sql); int count = 0; while (rs.next()) { stmt2.setInt(1, rs.getInt("ID")); stmt2.setString(2, rs.getString("TYPE")); stmt2.setTimestamp(3, rs.getTimestamp("CREATED_TIMESTAMP")); stmt2.setTimestamp(4, rs.getTimestamp("RECEIVED_TIMESTAMP")); stmt2.setString(5, rs.getString("OPERATION_CODE")); stmt2.setTimestamp(6, this.currentTimestamp); stmt2.addBatch(); if (++count % batchSize == 0) { stmt2.executeBatch(); } } stmt2.executeBatch(); if (log.isDebugEnabled()) { log.debug(count + " [OPERATIONS] Records copied to the archival table. Starting deletion"); } sql = "DELETE FROM DM_OPERATION WHERE ID IN (" + "SELECT ID FROM DM_ARCHIVED_OPERATIONS)"; stmt3 = conn.createStatement(); int affected = stmt3.executeUpdate(sql); if (log.isDebugEnabled()) { log.debug(affected + " Rows deleted"); } } catch (SQLException e) { throw new ArchivalDAOException("Error occurred while moving operations", e); } finally { ArchivalDAOUtil.cleanupResources(stmt, rs); ArchivalDAOUtil.cleanupResources(stmt2); ArchivalDAOUtil.cleanupResources(stmt3); } }