List of usage examples for java.sql PreparedStatement executeBatch
int[] executeBatch() throws SQLException;
From source file:org.cartoweb.stats.imports.Import.java
private void fillCacheHits(Connection con) throws SQLException { con.commit();// ww w. j a va 2 s . c o m con.setAutoCommit(true); JdbcUtilities.runDeleteQuery("vacuuming " + tableName, "VACUUM ANALYZE " + tableName, con, null); con.setAutoCommit(false); if (DB_SOLVE_HITS) { //take around 55m for 4M records and is not greate for incremental updates... JdbcUtilities.runDeleteQuery("solving cache hits", "UPDATE " + tableName + " f SET general_elapsed_time=s.general_elapsed_time, images_mainmap_width=s.images_mainmap_width, images_mainmap_height=s.images_mainmap_height, layers=s.layers, layers_switch_id=s.layers_switch_id, bbox_minx=s.bbox_minx, bbox_miny=s.bbox_miny, bbox_maxx=s.bbox_maxx, bbox_maxy=s.bbox_maxy, location_scale=s.location_scale, query_results_count=s.query_results_count, query_results_table_count=s.query_results_table_count FROM " + tableName + " s WHERE s.general_cache_id=f.general_cache_hit AND f.general_cache_hit IS NOT NULL AND f.general_elapsed_time IS NULL AND f.layers IS NULL", con, null); } else { //takes around 21m for the same 4M records and is optimal for incremental updates... try { final PreparedStatement updateStmt = con.prepareStatement("UPDATE " + tableName + " SET general_elapsed_time=?, images_mainmap_width=?, images_mainmap_height=?, layers=?, layers_switch_id=?, bbox_minx=?, bbox_miny=?, bbox_maxx=?, bbox_maxy=?, location_scale=?, query_results_count=?, query_results_table_count=? WHERE general_cache_hit=?"); if (hits.size() == 0) { return; } JdbcUtilities.runSelectQuery("reading cached values", "SELECT general_cache_id, general_elapsed_time, images_mainmap_width, images_mainmap_height, layers, layers_switch_id, bbox_minx, bbox_miny, bbox_maxx, bbox_maxy, location_scale, query_results_count, query_results_table_count FROM " + tableName + " WHERE general_cache_id IS NOT NULL", con, new JdbcUtilities.SelectTask() { private int cpt = 0; public void setupStatement(PreparedStatement stmt) throws SQLException { } public void run(ResultSet rs) throws SQLException { int count = 0; final int todo = hits.size(); Progress progress = new Progress(10 * 1000, todo, "Cache hit record updating", LOGGER); while (rs.next()) { String cacheId = rs.getString(1); //We can have the same general_cache_id multiple times. //So we have to remove it from the set. if (hits.remove(cacheId)) { StatementUtils.copyFloat(rs, 2, updateStmt, 1); StatementUtils.copyInt(rs, 3, updateStmt, 2); StatementUtils.copyInt(rs, 4, updateStmt, 3); StatementUtils.copyString(rs, 5, updateStmt, 4); StatementUtils.copyInt(rs, 6, updateStmt, 5); StatementUtils.copyFloat(rs, 7, updateStmt, 6); StatementUtils.copyFloat(rs, 8, updateStmt, 7); StatementUtils.copyFloat(rs, 9, updateStmt, 8); StatementUtils.copyFloat(rs, 10, updateStmt, 9); StatementUtils.copyFloat(rs, 11, updateStmt, 10); StatementUtils.copyInt(rs, 12, updateStmt, 11); StatementUtils.copyString(rs, 13, updateStmt, 12); updateStmt.setString(13, cacheId); updateStmt.addBatch(); if (++cpt % 50 == 0) { int[] counts = updateStmt.executeBatch(); for (int i = 0; i < counts.length; ++i) { count += counts[i]; } } progress.update(todo - hits.size()); } } ++cpt; int[] counts = updateStmt.executeBatch(); for (int i = 0; i < counts.length; ++i) { count += counts[i]; } LOGGER.info(count + " cache hit records updated from " + cpt + " cached values"); } }); updateStmt.close(); } catch (BatchUpdateException ex) { LOGGER.error(ex.getNextException()); throw ex; } } con.commit(); }
From source file:fr.aliacom.obm.common.calendar.CalendarDaoJdbcImpl.java
private void updateAttendees(AccessToken token, Connection con, Event event) throws SQLException { String q = "update EventLink set eventlink_state=?, eventlink_required=?, eventlink_userupdate=?, eventlink_percent=?, eventlink_is_organizer=? " + "where eventlink_event_id = ? AND eventlink_entity_id = ?"; PreparedStatement ps = null; int[] updatedAttendees; List<Attendee> mightInsert = new LinkedList<Attendee>(); List<Attendee> toInsert = new LinkedList<Attendee>(); try {/*from w w w.j a v a2 s . c om*/ ps = con.prepareStatement(q); for (Attendee at : event.getAttendees()) { int idx = 1; ps.setObject(idx++, obmHelper.getDBCP().getJdbcObject(ObmHelper.VPARTSTAT, at.getParticipation().getState().toString())); ps.setObject(idx++, obmHelper.getDBCP().getJdbcObject(ObmHelper.VROLE, at.getParticipationRole().toString())); ps.setInt(idx++, token.getObmId()); ps.setInt(idx++, at.getPercent()); ps.setBoolean(idx++, at.isOrganizer()); ps.setInt(idx++, event.getObmId().getObmId()); ps.setInt(idx++, at.getEntityId().getId()); ps.addBatch(); mightInsert.add(at); } updatedAttendees = ps.executeBatch(); } finally { obmHelper.cleanup(null, ps, null); } for (int i = 0; i < updatedAttendees.length; i++) { if (updatedAttendees[i] == 0) { Attendee at = mightInsert.get(i); toInsert.add(at); } } logger.info("event modification needs to add " + toInsert.size() + " attendees."); insertAttendees(token, event, con, toInsert); }
From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java
private boolean setStateMergeDB(Map<String, String> nameStates, int epoch, RCStates state, Set<NodeIDType> newActives) { String updateCmd = "update " + getRCRecordTable() + " set " + Columns.RC_GROUP_NAME.toString() + "=?, " + Columns.STRINGIFIED_RECORD.toString() + "=? where " + Columns.SERVICE_NAME.toString() + "=?"; PreparedStatement updateRC = null; Connection conn = null;//www . j a v a 2 s . c o m boolean updatedAll = true; try { if (conn == null) { conn = this.getDefaultConn(); conn.setAutoCommit(false); updateRC = conn.prepareStatement(updateCmd); } assert (nameStates != null && !nameStates.isEmpty()); String rcGroupName = this.getRCGroupName(nameStates.keySet().iterator().next()); int i = 0; long t1 = System.currentTimeMillis(); for (String name : nameStates.keySet()) { ReconfigurationRecord<NodeIDType> record = new ReconfigurationRecord<NodeIDType>(name, 0, newActives); record.setState(name, 0, state/* RCStates.READY_READY */).setActivesToNewActives(); ; updateRC.setString(1, rcGroupName); if (RC_RECORD_CLOB_OPTION) updateRC.setClob(2, new StringReader(record.toString())); else updateRC.setString(2, record.toString()); updateRC.setString(3, name); updateRC.addBatch(); i++; if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == nameStates.size()) { int[] executed = updateRC.executeBatch(); conn.commit(); updateRC.clearBatch(); for (int j : executed) updatedAll = updatedAll && (j > 0); if (updatedAll) log.log(Level.FINE, "{0} successfully logged the last {1} messages in {2} ms", new Object[] { this, (i + 1), (System.currentTimeMillis() - t1) }); t1 = System.currentTimeMillis(); } } } catch (SQLException sqle) { log.severe("SQLException while inserting batched RC records using " + updateCmd); sqle.printStackTrace(); } finally { cleanup(updateRC); cleanup(conn); } return updatedAll; }
From source file:com.jabyftw.lobstercraft.player.PlayerHandlerService.java
/** * This should run on server close, so we don't need to synchronize as every player join is denied before. * * @param connection MySQL connection//from w ww . j a v a 2 s. c o m * @throws SQLException in case of something going wrong */ private void saveChangedPlayers(@NotNull Connection connection) throws SQLException { long start = System.nanoTime(); int numberOfPlayersUpdated = 0; // Prepare statement PreparedStatement preparedStatement = connection.prepareStatement( "UPDATE `minecraft`.`user_profiles` SET `playerName` = ?, `password` = ?, `moneyAmount` = ?, `city_cityId` = ?," + " `cityOccupation` = ?, `lastTimeOnline` = ?, `timePlayed` = ?, `lastIp` = ? WHERE `playerId` = ?;"); // Iterate through all players for (OfflinePlayer offlinePlayer : registeredOfflinePlayers_id.values()) // Filter the ones needing updates => REGISTERED PLAYERS: they have money amount, passwords, last time online, last IP if (offlinePlayer.getDatabaseState() == DatabaseState.UPDATE_DATABASE) { preparedStatement.setString(1, offlinePlayer.getPlayerName()); preparedStatement.setString(2, offlinePlayer.getEncryptedPassword()); preparedStatement.setDouble(3, offlinePlayer.getMoneyAmount()); preparedStatement.setObject(4, offlinePlayer.getCityId(), Types.INTEGER); // Will write null if is null preparedStatement.setObject(5, offlinePlayer.getCityOccupation() != null ? offlinePlayer.getCityOccupation().getOccupationId() : null, Types.TINYINT); preparedStatement.setLong(6, offlinePlayer.getLastTimeOnline()); preparedStatement.setLong(7, offlinePlayer.getTimePlayed()); preparedStatement.setString(8, offlinePlayer.getLastIp()); preparedStatement.setLong(9, offlinePlayer.getPlayerId()); // Add batch preparedStatement.addBatch(); // Update their database state offlinePlayer.databaseState = DatabaseState.ON_DATABASE; numberOfPlayersUpdated++; } // Execute and announce if (numberOfPlayersUpdated > 0) { preparedStatement.executeBatch(); LobsterCraft.logger.info(Util.appendStrings("Took us ", Util.formatDecimal( (double) (System.nanoTime() - start) / (double) TimeUnit.MILLISECONDS.toNanos(1)), "ms to update ", numberOfPlayersUpdated, " players.")); } // Close statement preparedStatement.close(); }
From source file:org.wso2.carbon.apimgt.migration.client.MigrateFrom110to200.java
private boolean updateAMAppKeyDomainMapping(Connection connection) throws SQLException { log.info("Updating consumer keys in AM_APP_KEY_DOMAIN_MAPPING"); Statement selectStatement = null; Statement deleteStatement = null; PreparedStatement preparedStatement = null; ResultSet resultSet = null;/*w w w . j av a2 s . co m*/ boolean continueUpdatingDB = true; long totalRecords = 0; long decryptionFailedRecords = 0; try { ArrayList<KeyDomainMappingTableDTO> keyDomainMappingTableDTOs = new ArrayList<>(); String query = "SELECT * FROM AM_APP_KEY_DOMAIN_MAPPING"; selectStatement = connection.createStatement(); selectStatement.setFetchSize(50); resultSet = selectStatement.executeQuery(query); while (resultSet.next()) { ConsumerKeyDTO consumerKeyDTO = new ConsumerKeyDTO(); consumerKeyDTO.setEncryptedConsumerKey(resultSet.getString("CONSUMER_KEY")); totalRecords++; if (ResourceModifier.decryptConsumerKeyIfEncrypted(consumerKeyDTO)) { KeyDomainMappingTableDTO keyDomainMappingTableDTO = new KeyDomainMappingTableDTO(); keyDomainMappingTableDTO.setConsumerKey(consumerKeyDTO); keyDomainMappingTableDTO.setAuthzDomain(resultSet.getString("AUTHZ_DOMAIN")); keyDomainMappingTableDTOs.add(keyDomainMappingTableDTO); } else { log.error("Cannot decrypt consumer key : " + consumerKeyDTO.getEncryptedConsumerKey() + " in AM_APP_KEY_DOMAIN_MAPPING table"); decryptionFailedRecords++; //If its not allowed to remove decryption failed entries from DB, we will not continue updating // tables even with successfully decrypted entries to maintain DB integrity if (!removeDecryptionFailedKeysFromDB) { continueUpdatingDB = false; } } } if (continueUpdatingDB) { // Modify table only if decryption is successful preparedStatement = connection.prepareStatement( "INSERT INTO AM_APP_KEY_DOMAIN_MAPPING " + "(CONSUMER_KEY, AUTHZ_DOMAIN) VALUES (?, ?)"); for (KeyDomainMappingTableDTO keyDomainMappingTableDTO : keyDomainMappingTableDTOs) { preparedStatement.setString(1, keyDomainMappingTableDTO.getConsumerKey().getDecryptedConsumerKey()); preparedStatement.setString(2, keyDomainMappingTableDTO.getAuthzDomain()); preparedStatement.addBatch(); } deleteStatement = connection.createStatement(); deleteStatement.execute("DELETE FROM AM_APP_KEY_DOMAIN_MAPPING"); preparedStatement.executeBatch(); log.info("AM_APP_KEY_DOMAIN_MAPPING table updated with " + decryptionFailedRecords + "/" + totalRecords + " of the CONSUMER_KEY entries deleted as they cannot be decrypted"); } else { log.error("AM_APP_KEY_DOMAIN_MAPPING table not updated as " + decryptionFailedRecords + "/" + totalRecords + " of the CONSUMER_KEY entries" + " cannot be decrypted"); } } finally { if (selectStatement != null) selectStatement.close(); if (deleteStatement != null) deleteStatement.close(); if (preparedStatement != null) preparedStatement.close(); if (resultSet != null) resultSet.close(); } return continueUpdatingDB; }
From source file:org.hyperic.hq.measurement.server.session.DataManagerImpl.java
/** * This method is called to perform 'updates' for any inserts that failed. * //from www . j a v a 2 s . co m * @return The data insert result containing the data points that were not * updated. */ private List<DataPoint> updateData(Connection conn, List<DataPoint> data) { PreparedStatement stmt = null; List<DataPoint> left = new ArrayList<DataPoint>(); Map<String, List<DataPoint>> buckets = MeasRangeObj.getInstance().bucketData(data); for (Entry<String, List<DataPoint>> entry : buckets.entrySet()) { String table = entry.getKey(); List<DataPoint> dpts = entry.getValue(); try { // TODO need to set synchronous commit to off stmt = conn.prepareStatement( "UPDATE " + table + " SET value = ? WHERE timestamp = ? AND measurement_id = ?"); for (DataPoint pt : dpts) { Integer metricId = pt.getMeasurementId(); MetricValue val = pt.getMetricValue(); BigDecimal bigDec; bigDec = new BigDecimal(val.getValue()); stmt.setBigDecimal(1, getDecimalInRange(bigDec, metricId)); stmt.setLong(2, val.getTimestamp()); stmt.setInt(3, metricId.intValue()); stmt.addBatch(); } int[] execInfo = stmt.executeBatch(); left.addAll(getRemainingDataPoints(dpts, execInfo)); } catch (BatchUpdateException e) { left.addAll(getRemainingDataPointsAfterBatchFail(dpts, e.getUpdateCounts())); } catch (SQLException e) { // If the batch update is not within a transaction, then we // don't know which of the updates completed successfully. // Assume they all failed. left.addAll(dpts); if (log.isDebugEnabled()) { log.debug("A general SQLException occurred during the update. " + "Assuming that none of the " + dpts.size() + " data points were updated.", e); } } finally { DBUtil.closeStatement(LOG_CTX, stmt); } } return left; }
From source file:dk.netarkivet.harvester.datamodel.RunningJobsInfoDBDAO.java
/** * Store frontier report data to the persistent storage. * @param report the report to store//from w ww.j av a2 s. c o m * @param filterId the id of the filter that produced the report * @param jobId The ID of the job responsible for this report * @return the update count */ public int storeFrontierReport(String filterId, InMemoryFrontierReport report, Long jobId) { ArgumentNotValid.checkNotNull(report, "report"); ArgumentNotValid.checkNotNull(jobId, "jobId"); Connection c = HarvestDBConnection.get(); PreparedStatement stm = null; try { // First drop existing rows try { c.setAutoCommit(false); stm = c.prepareStatement("DELETE FROM frontierReportMonitor" + " WHERE jobId=? AND filterId=?"); stm.setLong(1, jobId); stm.setString(2, filterId); stm.executeUpdate(); c.commit(); } catch (SQLException e) { String message = "SQL error dropping records for job ID " + jobId + " and filterId " + filterId + "\n" + ExceptionUtils.getSQLExceptionCause(e); log.warn(message, e); return 0; } finally { DBUtils.closeStatementIfOpen(stm); DBUtils.rollbackIfNeeded(c, "storeFrontierReport delete", jobId); } // Now batch insert report lines try { c.setAutoCommit(false); stm = c.prepareStatement("INSERT INTO frontierReportMonitor(" + FR_COLUMN.getColumnsInOrder() + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"); for (FrontierReportLine frl : report.getLines()) { stm.setLong(FR_COLUMN.jobId.rank(), jobId); stm.setString(FR_COLUMN.filterId.rank(), filterId); stm.setTimestamp(FR_COLUMN.tstamp.rank(), new Timestamp(report.getTimestamp())); stm.setString(FR_COLUMN.domainName.rank(), frl.getDomainName()); stm.setLong(FR_COLUMN.currentSize.rank(), frl.getCurrentSize()); stm.setLong(FR_COLUMN.totalEnqueues.rank(), frl.getTotalEnqueues()); stm.setLong(FR_COLUMN.sessionBalance.rank(), frl.getSessionBalance()); stm.setDouble(FR_COLUMN.lastCost.rank(), frl.getLastCost()); stm.setDouble(FR_COLUMN.averageCost.rank(), correctNumericIfIllegalAverageCost(frl.getAverageCost())); stm.setString(FR_COLUMN.lastDequeueTime.rank(), frl.getLastDequeueTime()); stm.setString(FR_COLUMN.wakeTime.rank(), frl.getWakeTime()); stm.setLong(FR_COLUMN.totalSpend.rank(), frl.getTotalSpend()); stm.setLong(FR_COLUMN.totalBudget.rank(), frl.getTotalBudget()); stm.setLong(FR_COLUMN.errorCount.rank(), frl.getErrorCount()); // URIs are to be truncated to 1000 characters // (see SQL scripts) DBUtils.setStringMaxLength(stm, FR_COLUMN.lastPeekUri.rank(), frl.getLastPeekUri(), MAX_URL_LENGTH, frl, "lastPeekUri"); DBUtils.setStringMaxLength(stm, FR_COLUMN.lastQueuedUri.rank(), frl.getLastQueuedUri(), MAX_URL_LENGTH, frl, "lastQueuedUri"); stm.addBatch(); } int[] updCounts = stm.executeBatch(); int updCountTotal = 0; for (int count : updCounts) { updCountTotal += count; } c.commit(); return updCountTotal; } catch (SQLException e) { String message = "SQL error writing records for job ID " + jobId + " and filterId " + filterId + "\n" + ExceptionUtils.getSQLExceptionCause(e); log.warn(message, e); return 0; } finally { DBUtils.closeStatementIfOpen(stm); DBUtils.rollbackIfNeeded(c, "storeFrontierReport insert", jobId); } } finally { HarvestDBConnection.release(c); } }
From source file:com.flexive.ejb.beans.structure.TypeEngineBean.java
/** * {@inheritDoc}// w ww .java 2s.co m */ @Override @TransactionAttribute(TransactionAttributeType.REQUIRED) public void remove(long id) throws FxApplicationException { final UserTicket ticket = FxContext.getUserTicket(); FxPermissionUtils.checkRole(ticket, Role.StructureManagement); FxType type = CacheAdmin.getEnvironment().getType(id); Connection con = null; PreparedStatement ps = null; StringBuilder sql = new StringBuilder(500); try { con = Database.getDbConnection(); List<FxPropertyAssignment> allPropertyAssignments = new ArrayList<FxPropertyAssignment>(20); FxEnvironment env = CacheAdmin.getEnvironment(); for (FxPropertyAssignment fxpa : env.getPropertyAssignments(true)) if (fxpa.getAssignedType().getId() == id) allPropertyAssignments.add(fxpa); List<Long> rmStackProp = new ArrayList<Long>(allPropertyAssignments.size()); List<FxPropertyAssignment> rmProp = new ArrayList<FxPropertyAssignment>(allPropertyAssignments.size()); for (FxPropertyAssignment a : allPropertyAssignments) if (a.getBaseAssignmentId() == FxAssignment.NO_PARENT) rmStackProp.add(a.getId()); else { //check if base is from the same type if (env.getAssignment(a.getBaseAssignmentId()).getAssignedType().getId() == id) rmProp.add(a); else rmStackProp.add(a.getId()); } boolean found; while (rmProp.size() > 0) { found = false; for (FxPropertyAssignment a : rmProp) if (rmStackProp.contains(a.getBaseAssignmentId())) { rmProp.remove(a); rmStackProp.add(0, a.getId()); found = true; break; } assert found : "Internal error: no property assignment found to be removed!"; } //remove group assignments in the 'correct' order (ie not violating parentgroup references) ArrayList<Long> rmStack = new ArrayList<Long>(10); buildGroupAssignmentRemoveStack(type.getConnectedAssignments("/"), rmStack); rmStack.addAll(0, rmStackProp); sql.setLength(0); sql.append("DELETE FROM ").append(TBL_STRUCT_ASSIGNMENTS).append(ML).append(" WHERE ID=?"); ps = con.prepareStatement(sql.toString()); for (Long rmid : rmStack) { ps.setLong(1, rmid); ps.addBatch(); } ps.executeBatch(); ps.close(); //prevent base-related constraint issues by setting the base to null prior to removal sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_ASSIGNMENTS).append(" SET BASE=NULL WHERE TYPEDEF=? AND ID=?"); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getId()); for (Long rmid : rmStack) { ps.setLong(2, rmid); ps.addBatch(); } ps.executeBatch(); ps.close(); //remove property and group assignment option entries sql.setLength(0); for (FxPropertyAssignment pa : allPropertyAssignments) { if ( //exclude the "ID" property whose Id is "0" which is "NO_PARENT" !(pa.getProperty().getId() == FxAssignment.NO_PARENT)) { if (sql.length() == 0) { sql.append(" WHERE ASSID IN(").append(pa.getId()); } else sql.append(',').append(pa.getId()); } } if (sql.length() > 0) { sql.append(')'); ps = con.prepareStatement("DELETE FROM " + TBL_STRUCT_PROPERTY_OPTIONS + sql.toString()); ps.executeUpdate(); ps.close(); } sql.setLength(0); for (FxGroupAssignment ga : type.getAssignedGroups()) { if (ga.getBaseAssignmentId() == FxAssignment.NO_PARENT) { if (sql.length() == 0) { sql.append(" WHERE ASSID IN(").append(ga.getId()); } else sql.append(',').append(ga.getId()); } } if (sql.length() > 0) { sql.append(')'); ps = con.prepareStatement("DELETE FROM " + TBL_STRUCT_GROUP_OPTIONS + sql.toString()); ps.executeUpdate(); ps.close(); } // remove all type structure options storeTypeOptions(con, TBL_STRUCT_TYPES_OPTIONS, "ID", id, null, true); //remove all flat storage assignments for this type FxFlatStorageManager.getInstance().removeTypeMappings(con, type.getId()); //remove the assignments sql.setLength(0); //clear parent key refs for removal to avoid referential integrity issues within the type itself // sql.append("UPDATE ").append(TBL_STRUCT_ASSIGNMENTS).append(" SET PARENTGROUP=ID WHERE TYPEDEF=?"); // ps = con.prepareStatement(sql.toString()); // ps.setLong(1, type.getId()); // ps.executeUpdate(); // ps.close(); // sql.setLength(0); // ps = con.prepareStatement(sql.toString()); sql.append("DELETE FROM ").append(TBL_STRUCT_ASSIGNMENTS).append(" WHERE TYPEDEF=? AND ID=?"); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getId()); for (Long rmid : rmStack) { ps.setLong(2, rmid); ps.addBatch(); } ps.executeBatch(); ps.close(); sql.setLength(0); sql.append("DELETE FROM ").append(TBL_STRUCT_TYPERELATIONS) .append(" WHERE TYPEDEF=? OR TYPESRC=? OR TYPEDST=?"); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getId()); ps.setLong(2, type.getId()); ps.setLong(3, type.getId()); ps.executeUpdate(); ps.close(); sql.setLength(0); sql.append("DELETE FROM ").append(TBL_STRUCT_TYPES).append(ML).append(" WHERE ID=?"); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getId()); ps.executeUpdate(); ps.close(); sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_PROPERTIES).append(" SET REFTYPE=NULL WHERE REFTYPE=?"); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getId()); ps.executeUpdate(); ps.close(); sql.setLength(0); sql.append("DELETE FROM ").append(TBL_STRUCT_TYPES).append(" WHERE ID=?"); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getId()); ps.executeUpdate(); //remove eventually orphaned properties and groups FxStructureUtils.removeOrphanedProperties(con); FxStructureUtils.removeOrphanedGroups(con); StructureLoader.reload(con); htracker.track(type, "history.type.remove", type.getName(), type.getId()); } catch (SQLException e) { if (StorageManager.isForeignKeyViolation(e)) { EJBUtils.rollback(ctx); throw new FxRemoveException(LOG, e, "ex.structure.type.inUse", type.getName()); } EJBUtils.rollback(ctx); throw new FxRemoveException(LOG, e, "ex.db.sqlError", e.getMessage()); } catch (FxCacheException e) { EJBUtils.rollback(ctx); throw new FxRemoveException(LOG, e, "ex.cache", e.getMessage()); } catch (FxLoadException e) { EJBUtils.rollback(ctx); throw new FxRemoveException(e); } finally { Database.closeObjects(TypeEngineBean.class, con, ps); } }
From source file:org.apache.ctakes.jdl.data.loader.CsvLoader.java
/** * @param jdlConnection//ww w . j a v a2 s .com * the jdlConnection to manage */ @Override public final void dataInsert(final JdlConnection jdlConnection) { String sql = getSqlInsert(loader); if (log.isInfoEnabled()) log.info(sql); Number ncommit = loader.getCommit(); int rs = (loader.getSkip() == null) ? 0 : loader.getSkip().intValue(); PreparedStatement preparedStatement = null; try { jdlConnection.setAutoCommit(false); // String[][] values = parser.getAllValues(); preparedStatement = jdlConnection.getOpenConnection().prepareStatement(sql); boolean leftoversToCommit = false; // for (int r = rs; r < values.length; r++) { String[] row = null; int r = 0; do { row = parser.getLine(); if (row == null) break; if (r < rs) { r++; continue; } r++; try { int cs = 0; // columns to skip int ce = 0; // columns from external int c = 0; // PreparedStatement preparedStatement = jdlConnection // .getOpenConnection().prepareStatement(sql); // if (ncommit == null) { // jdlConnection.setAutoCommit(true); // } else { // jdlConnection.setAutoCommit(false); // } for (Column column : loader.getColumn()) { if (BooleanUtils.isTrue(column.isSkip())) { cs++; } else { c++; Object value = column.getConstant(); ce++; if (value == null) { if (column.getSeq() != null) { value = r + column.getSeq().intValue(); } else { // value = values[r][c + cs - ce]; value = row[c + cs - ce]; ce--; } } if (value == null || (value instanceof String && ((String) value).length() == 0)) preparedStatement.setObject(c, null); else { // if there is a formatter, parse the string if (this.formatMap.containsKey(column.getName())) { try { preparedStatement.setObject(c, this.formatMap.get(column.getName()).parseObject((String) value)); } catch (Exception e) { System.err.println("Could not format '" + value + "' for column " + column.getName() + " on line " + r); e.printStackTrace(System.err); throw new RuntimeException(e); } } else { preparedStatement.setObject(c, value); } } } } preparedStatement.addBatch(); leftoversToCommit = true; // preparedStatement.executeBatch(); // executeBatch(preparedStatement); // if (!jdlConnection.isAutoCommit() // && (r % ncommit.intValue() == 0)) { if (r % ncommit.intValue() == 0) { preparedStatement.executeBatch(); jdlConnection.commitConnection(); leftoversToCommit = false; log.info("inserted " + ncommit.intValue() + " rows"); } } catch (SQLException e) { // e.printStackTrace(); throw new RuntimeException(e); } } while (row != null); if (leftoversToCommit) { preparedStatement.executeBatch(); jdlConnection.commitConnection(); leftoversToCommit = false; } log.info("inserted " + (r - rs) + " rows total"); } catch (InstantiationException e) { log.error("", e); } catch (IllegalAccessException e) { log.error("", e); } catch (ClassNotFoundException e) { log.error("", e); } catch (IOException e) { log.error("", e); } catch (SQLException e) { throw new RuntimeException(e); } finally { if (preparedStatement != null) { try { preparedStatement.close(); } catch (Exception e) { } } } // try { // if (!jdlConnection.isAutoCommit()) { // jdlConnection.commitConnection(); // } // jdlConnection.closeConnection(); // } catch (SQLException e) { // // TODO Auto-generated catch block // e.printStackTrace(); // } }
From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java
private boolean createReconfigurationRecordsDB(Map<String, String> nameStates, Set<NodeIDType> newActives) { String insertCmd = "insert into " + getRCRecordTable() + " (" + Columns.RC_GROUP_NAME.toString() + ", " + Columns.STRINGIFIED_RECORD.toString() + ", " + Columns.SERVICE_NAME.toString() + " ) values (?,?,?)"; PreparedStatement insertRC = null; Connection conn = null;/* w w w. j a v a2 s . c o m*/ boolean insertedAll = true; Set<String> batch = new HashSet<String>(); Set<String> committed = new HashSet<String>(); try { if (conn == null) { conn = this.getDefaultConn(); conn.setAutoCommit(false); insertRC = conn.prepareStatement(insertCmd); } assert (nameStates != null && !nameStates.isEmpty()); String rcGroupName = this.getRCGroupName(nameStates.keySet().iterator().next()); int i = 0; long t1 = System.currentTimeMillis(); for (String name : nameStates.keySet()) { ReconfigurationRecord<NodeIDType> record = new ReconfigurationRecord<NodeIDType>(name, -1, newActives); /* We just directly initialize with WAIT_ACK_STOP:-1 instead of * starting with READY:-1 and pretending to go through the whole * reconfiguration protocol sequence. */ record.setState(name, -1, RCStates.WAIT_ACK_STOP); insertRC.setString(1, rcGroupName); if (RC_RECORD_CLOB_OPTION) insertRC.setClob(2, new StringReader(record.toString())); else insertRC.setString(2, record.toString()); insertRC.setString(3, name); insertRC.addBatch(); batch.add(name); i++; if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == nameStates.size()) { int[] executed = insertRC.executeBatch(); conn.commit(); insertRC.clearBatch(); committed.addAll(batch); batch.clear(); for (int j : executed) insertedAll = insertedAll && (j > 0); if (insertedAll) log.log(Level.FINE, "{0} successfully logged the last {1} messages in {2} ms", new Object[] { this, (i + 1), (System.currentTimeMillis() - t1) }); t1 = System.currentTimeMillis(); } } } catch (SQLException sqle) { log.severe("SQLException while inserting batched RC records using " + insertCmd); sqle.printStackTrace(); } finally { cleanup(insertRC); cleanup(conn); } // rollback if (!insertedAll) { for (String name : nameStates.keySet()) if (committed.contains(name)) this.deleteReconfigurationRecord(name, 0); } return insertedAll; }