List of usage examples for java.sql PreparedStatement executeBatch
int[] executeBatch() throws SQLException;
From source file:org.pentaho.di.core.database.Database.java
/** * Insert a row into the database using a prepared statement that has all values set. * * @param ps/*w w w.ja v a 2 s . c om*/ * The prepared statement * @param batch * True if you want to use batch inserts (size = commit size) * @param handleCommit * True if you want to handle the commit here after the commit size (False e.g. in case the step handles * this, see TableOutput) * @return true if the rows are safe: if batch of rows was sent to the database OR if a commit was done. * @throws KettleDatabaseException */ public boolean insertRow(PreparedStatement ps, boolean batch, boolean handleCommit) throws KettleDatabaseException { String debug = "insertRow start"; boolean rowsAreSafe = false; boolean isBatchUpdate = false; try { // Unique connections and Batch inserts don't mix when you want to roll // back on certain databases. // That's why we disable the batch insert in that case. // boolean useBatchInsert = batch && getDatabaseMetaData().supportsBatchUpdates() && databaseMeta.supportsBatchUpdates() && Const.isEmpty(connectionGroup); // // Add support for batch inserts... // if (!isAutoCommit()) { if (useBatchInsert) { debug = "insertRow add batch"; ps.addBatch(); // Add the batch, but don't forget to run the batch } else { debug = "insertRow exec update"; ps.executeUpdate(); } } else { ps.executeUpdate(); } written++; if (handleCommit) { // some steps handle the commit themselves (see e.g. // TableOutput step) if (!isAutoCommit() && (written % commitsize) == 0) { if (useBatchInsert) { isBatchUpdate = true; debug = "insertRow executeBatch commit"; ps.executeBatch(); commit(); ps.clearBatch(); } else { debug = "insertRow normal commit"; commit(); } written = 0; rowsAreSafe = true; } } return rowsAreSafe; } catch (BatchUpdateException ex) { throw createKettleDatabaseBatchException("Error updating batch", ex); } catch (SQLException ex) { if (isBatchUpdate) { throw createKettleDatabaseBatchException("Error updating batch", ex); } else { throw new KettleDatabaseException("Error inserting/updating row", ex); } } catch (Exception e) { // System.out.println("Unexpected exception in ["+debug+"] : "+e.getMessage()); throw new KettleDatabaseException("Unexpected error inserting/updating row in part [" + debug + "]", e); } }
From source file:com.flexive.ejb.beans.PhraseEngineBean.java
/** * {@inheritDoc}// w w w .j a v a 2 s .c o m */ @Override @TransactionAttribute(TransactionAttributeType.REQUIRED) public long savePhrase(int category, String phraseKey, FxString value, FxPhraseSearchValueConverter converter, Object tag, long mandator) throws FxNoAccessException { Connection con = null; PreparedStatement ps = null; final UserTicket userTicket = FxContext.getUserTicket(); checkMandatorAccess(mandator, userTicket); checkPhraseKey(phraseKey); try { // Obtain a database connection con = Database.getDbConnection(); long phraseId; ps = con.prepareStatement("SELECT ID FROM " + TBL_PHRASE + " WHERE PKEY=? AND MANDATOR=? AND CAT=?"); ps.setString(1, phraseKey); ps.setLong(2, mandator); ps.setInt(3, category); ResultSet rs = ps.executeQuery(); if (rs != null && rs.next()) { phraseId = rs.getLong(1); rs.close(); ps.close(); ps = con.prepareStatement("DELETE FROM " + TBL_PHRASE_VALUES + " WHERE ID=? AND MANDATOR=?"); ps.setLong(1, phraseId); ps.setLong(2, mandator); ps.executeUpdate(); } else { try { phraseId = fetchNextPhraseId(mandator); } catch (FxApplicationException e) { EJBUtils.rollback(ctx); throw e.asRuntimeException(); } ps.close(); ps = con.prepareStatement( "INSERT INTO " + TBL_PHRASE + "(ID,PKEY,MANDATOR,HID,CAT)VALUES(?,?,?,?,?)"); ps.setLong(1, phraseId); ps.setString(2, phraseKey); ps.setLong(3, mandator); ps.setBoolean(4, false); ps.setInt(5, category); ps.executeUpdate(); } if (!value.isEmpty()) { ps.close(); ps = con.prepareStatement( "INSERT INTO " + TBL_PHRASE_VALUES + "(ID,MANDATOR,LANG,PVAL,SVAL,TAG)VALUES(?,?,?,?,?,?)"); ps.setLong(1, phraseId); ps.setLong(2, mandator); FxString fxTag = tag instanceof FxString ? (FxString) tag : null; for (long lang : value.getTranslatedLanguages()) { ps.setLong(3, lang); final String translation = value.getTranslation(lang); if (StringUtils.isBlank(translation)) continue; ps.setString(4, translation); if (converter != null) ps.setString(5, converter.convert(translation, lang)); else ps.setString(5, translation.trim().toUpperCase()); if (fxTag != null) { if (!fxTag.isMultiLanguage() || fxTag.translationExists(lang)) ps.setString(6, fxTag.getTranslation(lang)); else ps.setNull(6, Types.VARCHAR); } else { if (tag != null && !StringUtils.isBlank(String.valueOf(tag))) ps.setString(6, String.valueOf(tag)); else ps.setNull(6, Types.VARCHAR); } ps.addBatch(); } ps.executeBatch(); } return phraseId; } catch (SQLException exc) { EJBUtils.rollback(ctx); throw new FxDbException(LOG, exc, "ex.db.sqlError", exc.getMessage()).asRuntimeException(); } finally { Database.closeObjects(PhraseEngineBean.class, con, ps); } }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
private synchronized boolean logBatchDB(PendingLogTask[] packets) { if (isClosed()) return false; if (!isLoggingEnabled() /* && !ENABLE_JOURNALING */) return true; boolean logged = true; PreparedStatement pstmt = null; Connection conn = null;//from w w w . j av a 2 s. c om String cmd = "insert into " + getMTable() + " values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; long t0 = System.nanoTime(), t0Millis = System.currentTimeMillis(), t1 = t0; int i = 0; try { for (i = 0; i < packets.length; i++) { if (conn == null) { conn = this.getDefaultConn(); conn.setAutoCommit(false); pstmt = conn.prepareStatement(cmd); } PaxosPacket packet = packets[i].lmTask.logMsg; // accept and decision use a faster implementation int[] sb = AbstractPaxosLogger.getSlotBallot(packet); pstmt.setString(1, packet.getPaxosID()); pstmt.setInt(2, packet.getVersion()); pstmt.setInt(3, sb[0]); pstmt.setInt(4, sb[1]); pstmt.setInt(5, sb[2]); pstmt.setInt(6, packet.getType().getInt()); pstmt.setString(7, packets[i].logfile); pstmt.setLong(8, packets[i].logfileOffset); byte[] msgBytes = isJournalingEnabled() ? new byte[0] : deflate(toBytes(packet)); if (getLogMessageBlobOption()) { pstmt.setInt(9, packets[i].length);// msgBytes.length); Blob blob = conn.createBlob(); blob.setBytes(1, msgBytes); pstmt.setBlob(10, blob); } else { String packetString = packet.toString(); pstmt.setInt(9, packetString.length()); pstmt.setString(10, packetString); } pstmt.addBatch(); if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == packets.length) { int[] executed = pstmt.executeBatch(); conn.commit(); pstmt.clearBatch(); for (int j : executed) logged = logged && (j > 0); if (logged) log.log(Level.FINE, "{0}{1}{2}{3}{4}{5}", new Object[] { this, " successfully logged the " + "last ", (i + 1) % MAX_DB_BATCH_SIZE == 0 ? MAX_DB_BATCH_SIZE : (i + 1) % MAX_DB_BATCH_SIZE, " messages in ", (System.nanoTime() - t1) / 1000, " us" }); t1 = System.nanoTime(); } } } catch (Exception sqle) { /* If any exception happens, we must return false to preserve * safety. We return true only if every message is logged * successfully. */ sqle.printStackTrace(); log.severe(this + " incurred " + sqle + " while logging batch of size:" + packets.length + "; packet_length = " + packets[i].toString().length()); assert (packets[i].toString().length() < MAX_LOG_MESSAGE_SIZE); logged = false; } finally { cleanup(pstmt); cleanup(conn); } if (ENABLE_JOURNALING) DelayProfiler.updateDelayNano("index", t0, packets.length); else DelayProfiler.updateDelay("logBatchDB", t0Millis); // DelayProfiler.updateCount("#logged", packets.length); DelayProfiler.updateMovAvg("#potential_batched", packets.length); return logged; }
From source file:com.flexive.ejb.beans.structure.AssignmentEngineBean.java
/** * Remove an assignment// ww w. ja v a2s . c o m * * @param assignmentId assignment to remove * @param removeSubAssignments if assignment is a group, remove all attached properties and groups? * @param removeDerivedAssignments if derivates of this assignment in derived types exist, remove them as well? * @param disableAssignment if false, find all derived assignments, flag them as 'regular' assignments and set them as new base * @param allowDerivedRemoval allow removal of derived assignments * @throws FxApplicationException on errors */ private void removeAssignment(long assignmentId, boolean removeSubAssignments, boolean removeDerivedAssignments, boolean disableAssignment, boolean allowDerivedRemoval) throws FxApplicationException { final UserTicket ticket = FxContext.getUserTicket(); FxPermissionUtils.checkRole(ticket, Role.StructureManagement); FxAssignment assignment; assignment = CacheAdmin.getEnvironment().getAssignment(assignmentId); assert assignment != null : "Assignment retrieved was null"; if (!disableAssignment) { //if removal, check if its a derived assignment which may not be removed if (!allowDerivedRemoval && assignment.isDerivedAssignment()) throw new FxRemoveException("ex.structure.assignment.delete.derived", assignment.getXPath()); } Connection con = null; PreparedStatement ps = null; StringBuilder sql = new StringBuilder(500); try { con = Database.getDbConnection(); List<FxAssignment> affectedAssignments = new ArrayList<FxAssignment>(10); affectedAssignments.add(assignment); if (assignment instanceof FxGroupAssignment && removeSubAssignments) { FxGroupAssignment ga = (FxGroupAssignment) assignment; _addSubAssignments(affectedAssignments, ga); } if (removeDerivedAssignments) { //find all derived assignments sql.append("SELECT ID FROM ").append(TBL_STRUCT_ASSIGNMENTS).append(" WHERE BASE=?"); ps = con.prepareStatement(sql.toString()); long prevSize = 0; while (prevSize != affectedAssignments.size()) { //run until no derived assignments are found prevSize = affectedAssignments.size(); List<FxAssignment> adds = new ArrayList<FxAssignment>(5); for (FxAssignment check : affectedAssignments) { ps.setLong(1, check.getId()); ResultSet rs = ps.executeQuery(); if (rs != null && rs.next()) { FxAssignment derived = CacheAdmin.getEnvironment().getAssignment(rs.getLong(1)); if (!adds.contains(derived) && !affectedAssignments.contains(derived)) adds.add(derived); } } affectedAssignments.addAll(adds); } ps.close(); sql.setLength(0); } else if (!disableAssignment) { //find all (directly) derived assignments, flag them as 'regular' assignments and set them as new base breakAssignmentInheritance(con, sql, affectedAssignments.toArray(new FxAssignment[affectedAssignments.size()])); } //security checks if (!ticket.isGlobalSupervisor()) { //assignment permission StringBuilder assignmentList = new StringBuilder(200); for (FxAssignment check : affectedAssignments) { assignmentList.append(",").append(check.getId()); if (check instanceof FxPropertyAssignment && check.getAssignedType().isUsePropertyPermissions()) { FxPropertyAssignment pa = (FxPropertyAssignment) check; if (!ticket.mayDeleteACL(pa.getACL().getId(), 0/*owner is irrelevant here*/)) throw new FxNoAccessException("ex.acl.noAccess.delete", pa.getACL().getName()); } } //affected content permission sql.append("SELECT DISTINCT O.ACL FROM ").append(TBL_CONTENT) .append(" O WHERE O.ID IN(SELECT D.ID FROM ").append(TBL_CONTENT_DATA) .append(" D WHERE D.ASSIGN IN(").append(assignmentList.substring(1)).append("))"); java.lang.System.out.println("SQL==" + sql.toString()); ps = con.prepareStatement(sql.toString()); sql.setLength(0); ResultSet rs = ps.executeQuery(); while (rs != null && rs.next()) { if (!ticket.mayDeleteACL(rs.getInt(1), 0/*owner is irrelevant here*/)) throw new FxNoAccessException("ex.acl.noAccess.delete", CacheAdmin.getEnvironment().getACL(rs.getInt(1))); } ps.close(); } if (disableAssignment) sql.append("UPDATE ").append(TBL_STRUCT_ASSIGNMENTS).append(" SET ENABLED=? WHERE ID=?"); else sql.append("DELETE FROM ").append(TBL_STRUCT_ASSIGNMENTS).append(" WHERE ID=?"); ps = con.prepareStatement(sql.toString()); //batch remove all multi language entries and content datas PreparedStatement psML = null; PreparedStatement psData = null; PreparedStatement psDataFT = null; PreparedStatement psBinaryGet = null; PreparedStatement psBinaryRemove = null; PreparedStatement psPropertyOptionRemove = null; PreparedStatement psGroupOptionRemove = null; try { sql.setLength(0); sql.append("DELETE FROM ").append(TBL_STRUCT_ASSIGNMENTS).append(ML).append(" WHERE ID=?"); psML = con.prepareStatement(sql.toString()); sql.setLength(0); sql.append("DELETE FROM ").append(TBL_STRUCT_PROPERTY_OPTIONS).append(" WHERE ASSID=?"); psPropertyOptionRemove = con.prepareStatement(sql.toString()); sql.setLength(0); sql.append("DELETE FROM ").append(TBL_STRUCT_GROUP_OPTIONS).append(" WHERE ASSID=?"); psGroupOptionRemove = con.prepareStatement(sql.toString()); sql.setLength(0); sql.append("DELETE FROM ").append(TBL_CONTENT_DATA).append(" WHERE ASSIGN=?"); psData = con.prepareStatement(sql.toString()); sql.setLength(0); sql.append("DELETE FROM ").append(TBL_CONTENT_DATA_FT).append(" WHERE ASSIGN=?"); psDataFT = con.prepareStatement(sql.toString()); sql.setLength(0); sql.append("SELECT DISTINCT FBLOB FROM ").append(TBL_CONTENT_DATA) .append(" WHERE ASSIGN=? AND FBLOB IS NOT NULL"); psBinaryGet = con.prepareStatement(sql.toString()); sql.setLength(0); sql.append("DELETE FROM ").append(TBL_CONTENT_BINARY).append(" WHERE ID=?"); psBinaryRemove = con.prepareStatement(sql.toString()); for (FxAssignment ml : affectedAssignments) { if (!disableAssignment) { psML.setLong(1, ml.getId()); psML.addBatch(); } psData.setLong(1, ml.getId()); psData.addBatch(); if (ml instanceof FxPropertyAssignment) { if (!disableAssignment) { if (((FxPropertyAssignment) ml).isFlatStorageEntry()) FxFlatStorageManager.getInstance().removeAssignmentMappings(con, ml.getId()); } psDataFT.setLong(1, ml.getId()); psDataFT.addBatch(); psPropertyOptionRemove.setLong(1, ml.getId()); psPropertyOptionRemove.addBatch(); //only need to remove binaries if its a binary type... switch (((FxPropertyAssignment) ml).getProperty().getDataType()) { case Binary: psBinaryGet.setLong(1, ml.getId()); ResultSet rs = psBinaryGet.executeQuery(); while (rs != null && rs.next()) { psBinaryRemove.setLong(1, rs.getLong(1)); psBinaryRemove.addBatch(); } } } else if (ml instanceof FxGroupAssignment) { psGroupOptionRemove.setLong(1, ml.getId()); psGroupOptionRemove.addBatch(); } } if (!disableAssignment) { psML.executeBatch(); psPropertyOptionRemove.executeBatch(); psGroupOptionRemove.executeBatch(); psBinaryRemove.executeBatch(); psDataFT.executeBatch(); psData.executeBatch(); } } finally { Database.closeObjects(AssignmentEngineBean.class, null, psML); Database.closeObjects(AssignmentEngineBean.class, null, psData); Database.closeObjects(AssignmentEngineBean.class, null, psDataFT); Database.closeObjects(AssignmentEngineBean.class, null, psBinaryGet); Database.closeObjects(AssignmentEngineBean.class, null, psBinaryRemove); Database.closeObjects(AssignmentEngineBean.class, null, psGroupOptionRemove); Database.closeObjects(AssignmentEngineBean.class, null, psPropertyOptionRemove); } if (disableAssignment) ps.setBoolean(1, false); if (affectedAssignments.size() > 1) affectedAssignments = FxStructureUtils.resolveRemoveDependencies(affectedAssignments); for (FxAssignment rm : affectedAssignments) { ps.setLong(disableAssignment ? 2 : 1, rm.getId()); ps.executeUpdate(); } FxStructureUtils.removeOrphanedProperties(con); FxStructureUtils.removeOrphanedGroups(con); StructureLoader.reload(con); htracker.track(assignment.getAssignedType(), disableAssignment ? "history.assignment.remove" : "history.assignment.disable", assignment.getXPath(), assignmentId, removeSubAssignments, removeDerivedAssignments); } catch (SQLException e) { EJBUtils.rollback(ctx); throw new FxRemoveException(LOG, e, "ex.db.sqlError", e.getMessage()); } catch (FxCacheException e) { EJBUtils.rollback(ctx); throw new FxRemoveException(LOG, e, "ex.cache", e.getMessage()); } catch (FxLoadException e) { EJBUtils.rollback(ctx); throw new FxRemoveException(e); } finally { Database.closeObjects(TypeEngineBean.class, con, ps); } }
From source file:com.pari.nm.utils.db.ReportDBHelper.java
public static void insertIntoTempPaginationTable(ServiceDescriptor descriptor, ServiceContainerImpl sImpl, String reportId, String sessionId, ArrayList<String> childColNameList) { Connection c = null;//from w ww . ja v a 2s . c o m PreparedStatement ps = null; if (sImpl != null) { try { StringBuffer sb = new StringBuffer(); String tblName = getTempPaginationTblName(reportId, sessionId); if (tblName == null) { throw new Exception( "No Table Exist with ReportId:\t" + reportId + "and SessionId:\t" + sessionId); } c = DBHelper.getConnection(); if (c == null) { logger.error("Unable to get Connection."); return; } Service[] allServs = sImpl.getAllServices(); if (allServs == null || allServs.length <= 0) { logger.info("Now Rows fetchd for ReportId:" + reportId + "and SessionId:\t" + sessionId); } else { logger.info("Number of Records are:\t" + allServs.length); } // Adding to check sb = new StringBuffer(); sb.append("INSERT INTO " + tblName + "\t Values("); ArrayList<String> colNameList = getColumnInfo(descriptor); if (colNameList != null && !colNameList.isEmpty()) { for (int i = 0; i < colNameList.size() - 1; i++) { sb.append("?,"); } sb.append("?)"); } try { ps = c.prepareStatement(sb.toString()); } catch (SQLException e1) { logger.error("Exception Occured while Executing statement:", e1); } Service[] rowsArr = sImpl.getAllServices(); if (rowsArr == null) { logger.info("Report Contians No Data."); } else { for (Service rowService : rowsArr) { int i = 1; if (colNameList != null && !colNameList.isEmpty()) { for (String colName : colNameList) { try { Object obj = rowService.getAttribute(colName); if (obj instanceof ServiceContainerImpl) { ps.setString(i++, String.valueOf(obj)); if (obj != null) { insertChildTableInfo(c, obj, reportId, sessionId, sImpl, childColNameList, null, colName); } } else if (obj == null) { ps.setString(i++, ""); } else { ps.setString(i++, String.valueOf(obj)); } } catch (SQLException e) { logger.error( "Exception Occured while Inserting Data into Temporary Pagination Table:", e); return; } } } try { ps.addBatch(); } catch (SQLException e) { e.printStackTrace(); } } } try { int[] count = ps.executeBatch(); logger.info("Number of Statements Executed was:\t" + count.length); } catch (SQLException e1) { logger.error("Exception Occured while Executing Batch Insert.", e1); } } catch (Exception e) { logger.error("Exception Occured while Inserting Data into Temporary Pagination Table:", e); } finally { if (ps != null) { try { ps.close(); } catch (SQLException e) { logger.error("Exception Occured while Closing the Prepared Statement Object.", e); } } if (c != null) { try { c.setAutoCommit(true); DBHelper.releaseConnection(c); } catch (SQLException e) { logger.error("Exception Occured while Closing the Connection Object.", e); } } } } }
From source file:org.wso2.carbon.idp.mgt.dao.IdPManagementDAO.java
/** * @param provisioningConnectors//from w w w.j a va2s . co m * @param dbConnection * @param idpId * @throws IdentityProviderManagementException * @throws SQLException */ private void addProvisioningConnectorConfigs(ProvisioningConnectorConfig[] provisioningConnectors, Connection dbConnection, int idpId, int tenantId) throws IdentityProviderManagementException, SQLException { PreparedStatement prepStmt = null; ResultSet rs = null; try { // SP_IDP_ID,SP_IDP_PROV_CONNECTOR_TYPE, SP_IDP_PROV_CONFIG_KEY, // SP_IDP_PROV_CONFIG_VALUE, SP_IDP_PROV_CONFIG_IS_SECRET // SP_IDP_PROV_CONFIG_PROPERTY // TENANT_ID, PROVISIONING_CONFIG_ID, PROPERTY_KEY, PROPERTY_VALUE, PROPERTY_TYPE, // IS_SECRET String sqlStmt = IdPManagementConstants.SQLQueries.ADD_IDP_PROVISIONING_PROPERTY_SQL; prepStmt = dbConnection.prepareStatement(sqlStmt); String sqlBaseStmt = IdPManagementConstants.SQLQueries.ADD_IDP_PROVISIONING_CONFIG_SQL; String dbProductName = dbConnection.getMetaData().getDatabaseProductName(); PreparedStatement prepBaseStmt = dbConnection.prepareStatement(sqlBaseStmt, new String[] { DBUtils.getConvertedAutoGeneratedColumnName(dbProductName, "ID") }); if (provisioningConnectors != null) { for (ProvisioningConnectorConfig connector : provisioningConnectors) { Property[] connctorProperties = connector.getProvisioningProperties(); if (connctorProperties != null) { // SP_IDP_PROVISIONING_CONFIG // TENANT_ID, IDP_ID, PROVISIONING_CONNECTOR_TYPE, IS_ENABLED, IS_DEFAULT prepBaseStmt.setInt(1, tenantId); prepBaseStmt.setInt(2, idpId); prepBaseStmt.setString(3, CharacterEncoder.getSafeText(connector.getName())); if (connector.isEnabled()) { prepBaseStmt.setString(4, "1"); } else { prepBaseStmt.setString(4, "0"); } if (connector.isBlocking()) { prepBaseStmt.setString(5, "1"); } else { prepBaseStmt.setString(5, "0"); } prepBaseStmt.executeUpdate(); rs = prepBaseStmt.getGeneratedKeys(); if (rs.next()) { int provisioningConfigID = rs.getInt(1); if (connctorProperties.length > 0) { for (Property config : connctorProperties) { if (config == null) { continue; } // SP_IDP_PROV_CONFIG_PROPERTY //TENANT_ID, PROVISIONING_CONFIG_ID, PROPERTY_KEY, // PROPERTY_VALUE, PROPERTY_BLOB_VALUE, PROPERTY_TYPE, IS_SECRET prepStmt.setInt(1, tenantId); prepStmt.setInt(2, provisioningConfigID); prepStmt.setString(3, CharacterEncoder.getSafeText(config.getName())); // TODO : Sect property type accordingly if (IdentityApplicationConstants.ConfigElements.PROPERTY_TYPE_BLOB .equals(config.getType())) { prepStmt.setString(4, null); setBlobValue(config.getValue(), prepStmt, 5); prepStmt.setString(6, config.getType()); } else { prepStmt.setString(4, CharacterEncoder.getSafeText(config.getValue())); setBlobValue(null, prepStmt, 5); prepStmt.setString(6, IdentityApplicationConstants.ConfigElements.PROPERTY_TYPE_STRING); } if (config.isConfidential()) { prepStmt.setString(7, "1"); } else { prepStmt.setString(7, "0"); } prepStmt.addBatch(); } } } // Adding properties for base config prepStmt.executeBatch(); } } } } catch (IOException e) { throw new IdentityProviderManagementException("An error occurred while processing content stream.", e); } finally { IdentityApplicationManagementUtil.closeResultSet(rs); IdentityApplicationManagementUtil.closeStatement(prepStmt); } }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
/** * Batched version of putCheckpointState. This is a complicated method with * very different behaviors for updates and inserts. If update is true, it * attempts to batch-update all the checkpoints and for those * updates/inserts that failed, it attempts to individually update/insert * them through/*from w w w . jav a2 s. c o m*/ * {@link #putCheckpointState(String, int, Set, int, Ballot, String, int)}. * It is still possible that only a subset of the updates succeed, but that * is okay as checkpoint failure is not fatal except in the case of initial * checkpoint insertion. * * If update is false, it means that this is a batch-insertion of initial * checkpoints, and it is critical that this batch operation is atomic. If * the batch operation only partly succeeds, it should throw an exception so * that the caller can not proceed any further with the batch insertion but * it should also rollback the changes. * * The reason batched creation of initial checkpoints should be atomic is * that otherwise, the checkpoints that did get written essentially are * created paxos instances, but there is no easy way for the caller to know * that they got created and this could lead to nasty surprises later. If * the caller always follows up failed batch creations with sequential * creation, then the rollback is not critical as the sequential creation * will simply "recover" from the checkpoint if any left behind during a * previous failed batch creation. If the caller chooses to keep re-issuing * the batch creation and expects to eventually succeed (assuming that the * instances in the batch didn't actually exist a priori), then rolling back * failed batch creations like below will not help in the event of crashes. * So, the caller should really just issue sequential creation requests if a * batch creation fails or times out for any reason. * * Note: this method just needs to be atomic, i.e., all or none, but not * synchronized. Synchronizing it will invert the invariant that messageLog * is always locked before (because of the getMinLogFile invocation) * SQLPaxosLogger. * * @param tasks * @param update */ @Override public boolean putCheckpointState(CheckpointTask[] tasks, boolean update) { if (isClosed() || DISABLE_CHECKPOINTING) return false; boolean batchSuccess = true; boolean[] committed = new boolean[tasks.length]; long t1 = System.currentTimeMillis(); String insertCmd = "insert into " + getCTable() + " (version,members,slot,ballotnum,coordinator,state,create_time, min_logfile, paxos_id) values (?,?,?,?,?,?,?,?,?)"; String updateCmd = "update " + getCTable() + " set version=?,members=?, slot=?, ballotnum=?, coordinator=?, state=?, create_time=?, min_logfile=? where paxos_id=?"; String cmd = update ? updateCmd : insertCmd; PreparedStatement insertCP = null; Connection conn = null; String minLogfile = null; ArrayList<Integer> batch = new ArrayList<Integer>(); try { for (int i = 0; i < tasks.length; i++) { CheckpointTask task = tasks[i]; assert (task != null); assert (update || task.slot == 0); if ((task.slot == 0) == update) { this.putCheckpointState(task.paxosID, task.version, (task.members), task.slot, task.ballot, task.state, task.gcSlot, task.createTime); committed[i] = true; continue; } if (conn == null) { conn = this.getDefaultConn(); conn.setAutoCommit(false); insertCP = conn.prepareStatement(cmd); } insertCP.setInt(1, task.version); insertCP.setString(2, Util.toJSONString(task.members)); insertCP.setInt(3, task.slot); insertCP.setInt(4, task.ballot.ballotNumber); insertCP.setInt(5, task.ballot.coordinatorID); if (getCheckpointBlobOption()) { Blob blob = conn.createBlob(); blob.setBytes(1, task.state.getBytes(CHARSET)); insertCP.setBlob(6, blob); } else insertCP.setString(6, task.state); insertCP.setLong(7, task.createTime); insertCP.setString(8, minLogfile = this.getSetGCAndGetMinLogfile(task.paxosID, task.version, task.slot - task.gcSlot < 0 ? task.slot : task.gcSlot)); insertCP.setString(9, task.paxosID); insertCP.addBatch(); batch.add(i); incrTotalCheckpoints(); if (shouldLogCheckpoint(1)) log.log(Level.INFO, "{0} checkpointed> ({1}:{2}, {3}{4}, {5}, ({6}, {7}) [{8}]) {9}", new Object[] { this, task.paxosID, task.version, (task.members), task.slot, task.ballot, task.gcSlot, minLogfile, Util.truncate(task.state, TRUNCATED_STATE_SIZE, TRUNCATED_STATE_SIZE), (tasks.length > 1 ? "(batched=" + tasks.length + ")" : "") }); if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == tasks.length) { int[] executed = insertCP.executeBatch(); conn.commit(); insertCP.clearBatch(); for (int j = 0; j < executed.length; j++) batchSuccess = batchSuccess && (committed[batch.get(j)] = (executed[j] > 0)); batch.clear(); } } if (ENABLE_INSTRUMENTATION && Util.oneIn(10)) DelayProfiler.updateDelay("checkpoint", t1, tasks.length); } catch (SQLException | UnsupportedEncodingException sqle) { log.log(Level.SEVERE, "{0} SQLException while batched checkpointing", new Object[] { this }); sqle.printStackTrace(); } finally { cleanup(insertCP); cleanup(conn); } if (!batchSuccess) { if (update) { for (int i = 0; i < tasks.length; i++) if (!committed[i]) this.putCheckpointState(tasks[i].paxosID, tasks[i].version, tasks[i].members, tasks[i].slot, tasks[i].ballot, tasks[i].state, tasks[i].gcSlot); } else { // rollback for (int i = 0; i < tasks.length; i++) if (committed[i]) this.deleteCheckpoint(tasks[i].paxosID, tasks[i].version, tasks[i].members, tasks[i].slot, tasks[i].ballot, tasks[i].state, tasks[i].gcSlot); throw new PaxosInstanceCreationException( "Rolled back failed batch-creation of " + tasks.length + " paxos instances"); } } for (CheckpointTask task : tasks) this.deleteOutdatedMessages(task.paxosID, task.version, task.ballot, task.slot, task.ballot.ballotNumber, task.ballot.coordinatorID, task.gcSlot); return true; }
From source file:com.oltpbenchmark.benchmarks.seats.SEATSLoader.java
/** * /*from w w w.java 2 s . co m*/ * @param catalog_tbl */ public void loadTable(Table catalog_tbl, Iterable<Object[]> iterable, int batch_size) { // Special Case: Airport Locations final boolean is_airport = catalog_tbl.getName().equals(SEATSConstants.TABLENAME_AIRPORT); if (LOG.isDebugEnabled()) LOG.debug(String.format("Generating new records for table %s [batchSize=%d]", catalog_tbl.getName(), batch_size)); final List<Column> columns = catalog_tbl.getColumns(); // Check whether we have any special mappings that we need to maintain Map<Integer, Integer> code_2_id = new HashMap<Integer, Integer>(); Map<Integer, Map<String, Long>> mapping_columns = new HashMap<Integer, Map<String, Long>>(); for (int col_code_idx = 0, cnt = columns.size(); col_code_idx < cnt; col_code_idx++) { Column catalog_col = columns.get(col_code_idx); String col_name = catalog_col.getName(); // Code Column -> Id Column Mapping // Check to see whether this table has columns that we need to map their // code values to tuple ids String col_id_name = this.profile.code_columns.get(col_name); if (col_id_name != null) { Column catalog_id_col = catalog_tbl.getColumnByName(col_id_name); assert (catalog_id_col != null) : "The id column " + catalog_tbl.getName() + "." + col_id_name + " is missing"; int col_id_idx = catalog_tbl.getColumnIndex(catalog_id_col); code_2_id.put(col_code_idx, col_id_idx); } // Foreign Key Column to Code->Id Mapping // If this columns references a foreign key that is used in the Code->Id mapping // that we generating above, then we need to know when we should change the // column value from a code to the id stored in our lookup table if (this.profile.fkey_value_xref.containsKey(col_name)) { String col_fkey_name = this.profile.fkey_value_xref.get(col_name); mapping_columns.put(col_code_idx, this.profile.code_id_xref.get(col_fkey_name)); } } // FOR int row_idx = 0; int row_batch = 0; try { String insert_sql = SQLUtil.getInsertSQL(catalog_tbl); PreparedStatement insert_stmt = this.conn.prepareStatement(insert_sql); int sqlTypes[] = catalog_tbl.getColumnTypes(); for (Object tuple[] : iterable) { assert (tuple[0] != null) : "The primary key for " + catalog_tbl.getName() + " is null"; // AIRPORT if (is_airport) { // Skip any airport that does not have flights int col_code_idx = catalog_tbl.getColumnByName("AP_CODE").getIndex(); if (profile.hasFlights((String) tuple[col_code_idx]) == false) { if (LOG.isTraceEnabled()) LOG.trace(String.format("Skipping AIRPORT '%s' because it does not have any flights", tuple[col_code_idx])); continue; } // Update the row # so that it matches what we're actually loading int col_id_idx = catalog_tbl.getColumnByName("AP_ID").getIndex(); tuple[col_id_idx] = (long) (row_idx + 1); // Store Locations int col_lat_idx = catalog_tbl.getColumnByName("AP_LATITUDE").getIndex(); int col_lon_idx = catalog_tbl.getColumnByName("AP_LONGITUDE").getIndex(); Pair<Double, Double> coords = Pair.of((Double) tuple[col_lat_idx], (Double) tuple[col_lon_idx]); if (coords.first == null || coords.second == null) { LOG.error(Arrays.toString(tuple)); } assert (coords.first != null) : String.format("Unexpected null latitude for airport '%s' [%d]", tuple[col_code_idx], col_lat_idx); assert (coords.second != null) : String.format( "Unexpected null longitude for airport '%s' [%d]", tuple[col_code_idx], col_lon_idx); this.airport_locations.put(tuple[col_code_idx].toString(), coords); if (LOG.isTraceEnabled()) LOG.trace(String.format("Storing location for '%s': %s", tuple[col_code_idx], coords)); } // Code Column -> Id Column for (int col_code_idx : code_2_id.keySet()) { assert (tuple[col_code_idx] != null) : String.format( "The value of the code column at '%d' is null for %s\n%s", col_code_idx, catalog_tbl.getName(), Arrays.toString(tuple)); String code = tuple[col_code_idx].toString().trim(); if (code.length() > 0) { Column from_column = columns.get(col_code_idx); assert (from_column != null); Column to_column = columns.get(code_2_id.get(col_code_idx)); assert (to_column != null) : String.format("Invalid column %s.%s", catalog_tbl.getName(), code_2_id.get(col_code_idx)); long id = (Long) tuple[code_2_id.get(col_code_idx)]; if (LOG.isTraceEnabled()) LOG.trace(String.format("Mapping %s '%s' -> %s '%d'", from_column.fullName(), code, to_column.fullName(), id)); this.profile.code_id_xref.get(to_column.getName()).put(code, id); } } // FOR // Foreign Key Code -> Foreign Key Id for (int col_code_idx : mapping_columns.keySet()) { Column catalog_col = columns.get(col_code_idx); assert (tuple[col_code_idx] != null || catalog_col.isNullable()) : String.format( "The code %s column at '%d' is null for %s id=%s\n%s", catalog_col.fullName(), col_code_idx, catalog_tbl.getName(), tuple[0], Arrays.toString(tuple)); if (tuple[col_code_idx] != null) { String code = tuple[col_code_idx].toString(); tuple[col_code_idx] = mapping_columns.get(col_code_idx).get(code); if (LOG.isTraceEnabled()) LOG.trace(String.format("Mapped %s '%s' -> %s '%s'", catalog_col.fullName(), code, catalog_col.getForeignKey().fullName(), tuple[col_code_idx])); } } // FOR for (int i = 0; i < tuple.length; i++) { try { if (tuple[i] != null) { insert_stmt.setObject(i + 1, tuple[i]); } else { insert_stmt.setNull(i + 1, sqlTypes[i]); } } catch (SQLDataException ex) { LOG.error("INVALID " + catalog_tbl.getName() + " TUPLE: " + Arrays.toString(tuple)); throw new RuntimeException("Failed to set value for " + catalog_tbl.getColumn(i).fullName(), ex); } } // FOR insert_stmt.addBatch(); row_idx++; if (++row_batch >= batch_size) { LOG.debug(String.format("Loading %s batch [total=%d]", catalog_tbl.getName(), row_idx)); insert_stmt.executeBatch(); conn.commit(); insert_stmt.clearBatch(); row_batch = 0; } } // FOR if (row_batch > 0) { insert_stmt.executeBatch(); conn.commit(); } insert_stmt.close(); } catch (Exception ex) { throw new RuntimeException("Failed to load table " + catalog_tbl.getName(), ex); } if (is_airport) assert (this.profile.getAirportCount() == row_idx) : String.format("%d != %d", profile.getAirportCount(), row_idx); // Record the number of tuples that we loaded for this table in the profile if (catalog_tbl.getName().equals(SEATSConstants.TABLENAME_RESERVATION)) { this.profile.num_reservations = row_idx + 1; } LOG.info(String.format("Finished loading all %d tuples for %s [%d / %d]", row_idx, catalog_tbl.getName(), this.finished.incrementAndGet(), this.getCatalog().getTableCount())); return; }
From source file:com.flexive.ejb.beans.structure.TypeEngineBean.java
/** * Update an existing type//from ww w . j a v a 2s .c o m * * @param type the type to update * @return id of the type * @throws FxApplicationException on errors */ private long update(FxTypeEdit type) throws FxApplicationException { if (!type.isChanged()) return type.getId(); final UserTicket ticket = FxContext.getUserTicket(); FxPermissionUtils.checkRole(ticket, Role.StructureManagement); final FxEnvironment environment = CacheAdmin.getEnvironment(); if (StringUtils.isEmpty(type.getName())) throw new FxInvalidParameterException("NAME", "ex.structure.update.nameMissing"); //security checks start if (!ticket.mayEditACL(type.getACL().getId(), 0)) throw new FxNoAccessException("ex.acl.noAccess.edit", type.getACL().getName()); //security checks end boolean needReload = false; //full reload only needed if assignments have changed Connection con = null; PreparedStatement ps = null; FxType orgType = environment.getType(type.getId()); StringBuilder sql = new StringBuilder(500); try { con = Database.getDbConnection(); long instanceCount = -1; //number of instances //start name change if (!orgType.getName().equals(type.getName())) { sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET NAME=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setString(1, type.getName()); ps.setLong(2, type.getId()); ps.executeUpdate(); //update all xpaths affected ps.close(); sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_ASSIGNMENTS) .append(" SET XPATH=REPLACE(XPATH, ?, ?) WHERE TYPEDEF=? AND ") .append(StorageManager.getRegExpLikeOperator("XPATH", "?")); ps = con.prepareStatement(sql.toString()); ps.setString(1, orgType.getName() + "/"); ps.setString(2, type.getName() + "/"); ps.setLong(3, type.getId()); ps.setString(4, "^" + orgType.getName() + "/"); int changed = ps.executeUpdate(); if (changed > 0) needReload = true; htracker.track(orgType, "history.type.update.name", orgType.getName(), type.getName(), type.getId(), changed); } //end name change //start description change if (!orgType.getLabel().equals(type.getLabel())) { Database.storeFxString(type.getLabel(), con, TBL_STRUCT_TYPES, "DESCRIPTION", "ID", type.getId()); htracker.track(orgType, "history.type.update.description", orgType.getLabel(), type.getLabel()); } //end description change //start type mode changes if (type.getMode().getId() != orgType.getMode().getId()) { if (instanceCount < 0) instanceCount = getInstanceCount(con, type.getId()); //allow relation => content (removing all relation specific entries) but content => relation requires 0 instances! if ((type.getMode() == TypeMode.Relation && orgType.getMode() == TypeMode.Content && instanceCount > 0) || orgType.getMode() == TypeMode.Preload || type.getMode() == TypeMode.Preload) throw new FxUpdateException("ex.structure.type.typeMode.notUpgradeable", orgType.getMode(), type.getMode(), orgType.getName()); if (type.getMode() == TypeMode.Content) { if (type.getRelations().size() > 0) { //TODO: remove relation mappings throw new FxUpdateException("ex.notImplemented", "Remove all relation mappings for type"); } if (instanceCount > 0) { //TODO: remove all relation specific entries for existing contents throw new FxUpdateException("ex.notImplemented", "Remove all relation specific entries for existing contents"); } } sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET TYPE_MODE=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getMode().getId()); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.typeMode", orgType.getMode(), type.getMode()); } //end type mode changes //start relation changes if (type.getAddedRelations().size() > 0) { sql.setLength(0); sql.append("INSERT INTO ").append(TBL_STRUCT_TYPERELATIONS) .append(" (TYPEDEF,TYPESRC,TYPEDST,MAXSRC,MAXDST)VALUES(?,?,?,?,?)"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getId()); for (FxTypeRelation rel : type.getAddedRelations()) { if (rel.getSource().isRelation()) throw new FxInvalidParameterException("ex.structure.type.relation.wrongTarget", type.getName(), rel.getSource().getName()); if (rel.getDestination().isRelation()) throw new FxInvalidParameterException("ex.structure.type.relation.wrongTarget", type.getName(), rel.getDestination().getName()); ps.setLong(2, rel.getSource().getId()); ps.setLong(3, rel.getDestination().getId()); ps.setLong(4, rel.getMaxSource()); ps.setLong(5, rel.getMaxDestination()); ps.addBatch(); htracker.track(type, "history.type.update.relation.add", type.getName(), rel.getSource().getName(), rel.getMaxSource(), rel.getDestination().getName(), rel.getMaxDestination()); } ps.executeBatch(); } if (type.getUpdatedRelations().size() > 0) { sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPERELATIONS). // 1 2 3 4 5 append(" SET MAXSRC=?,MAXDST=? WHERE TYPEDEF=? AND TYPESRC=? AND TYPEDST=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setLong(3, type.getId()); for (FxTypeRelation rel : type.getUpdatedRelations()) { if (rel.getSource().isRelation()) throw new FxInvalidParameterException("ex.structure.type.relation.wrongTarget", type.getName(), rel.getSource().getName()); if (rel.getDestination().isRelation()) throw new FxInvalidParameterException("ex.structure.type.relation.wrongTarget", type.getName(), rel.getDestination().getName()); //TODO: check if maxSource/maxDestination is not violated if > 0 ps.setLong(4, rel.getSource().getId()); ps.setLong(5, rel.getDestination().getId()); ps.setLong(1, rel.getMaxSource()); ps.setLong(2, rel.getMaxDestination()); ps.addBatch(); htracker.track(type, "history.type.update.relation.update", type.getName(), rel.getSource().getName(), rel.getMaxSource(), rel.getDestination().getName(), rel.getMaxDestination()); } ps.executeBatch(); } if (type.getRemovedRelations().size() > 0) { sql.setLength(0); sql.append("DELETE FROM ").append(TBL_STRUCT_TYPERELATIONS). // 1 2 3 append(" WHERE TYPEDEF=? AND TYPESRC=? AND TYPEDST=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getId()); for (FxTypeRelation rel : type.getRemovedRelations()) { if (rel.getSource().isRelation()) throw new FxInvalidParameterException("ex.structure.type.relation.wrongTarget", type.getName(), rel.getSource().getName()); if (rel.getDestination().isRelation()) throw new FxInvalidParameterException("ex.structure.type.relation.wrongTarget", type.getName(), rel.getDestination().getName()); int[] rels = getRelationCount(con, type.getId(), rel.getSource().getId(), rel.getDestination().getId()); if (!type.isRemoveInstancesWithRelationTypes() && rels[0] > 0) throw new FxRemoveException("ex.structure.type.relation.relationsExist", type.getName(), rel.getSource().getName(), rel.getDestination().getName(), rels[0]); else if (type.isRemoveInstancesWithRelationTypes() && rels[0] > 0) { removeRelationEntries(con, type.getId(), rel.getSource().getId(), rel.getDestination().getId()); } ps.setLong(2, rel.getSource().getId()); ps.setLong(3, rel.getDestination().getId()); ps.addBatch(); htracker.track(type, "history.type.update.relation.remove", type.getName(), rel.getSource().getName(), rel.getMaxSource(), rel.getDestination().getName(), rel.getMaxDestination()); } ps.executeBatch(); } //end relation changes //start ACL changes if (!type.getACL().equals(orgType.getACL())) { if (type.getACL().getCategory() != ACLCategory.STRUCTURE) throw new FxInvalidParameterException("ACL", "ex.acl.category.invalid", type.getACL().getCategory(), ACLCategory.STRUCTURE); sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET ACL=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getACL().getId()); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.acl", orgType.getACL(), type.getACL()); } //end ACL changes //start default instance ACL changes if (type.hasDefaultInstanceACL() != orgType.hasDefaultInstanceACL() || !type.getDefaultInstanceACL().equals(orgType.getDefaultInstanceACL())) { if (type.hasDefaultInstanceACL() && type.getDefaultInstanceACL().getCategory() != ACLCategory.INSTANCE) throw new FxInvalidParameterException("DEFACL", "ex.acl.category.invalid", type.getDefaultInstanceACL().getCategory(), ACLCategory.INSTANCE); sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET DEFACL=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); if (type.hasDefaultInstanceACL()) ps.setLong(1, type.getDefaultInstanceACL().getId()); else ps.setNull(1, java.sql.Types.INTEGER); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.defacl", orgType.hasDefaultInstanceACL() ? orgType.getACL() : "-", type.hasDefaultInstanceACL() ? type.getDefaultInstanceACL() : "-"); } //end default instance ACL changes //start Workflow changes if (!type.getWorkflow().equals(orgType.getWorkflow())) { if (instanceCount < 0) instanceCount = getInstanceCount(con, type.getId()); if (instanceCount > 0) { //Workflow can not be changed with existing instances -> there is no way to reliably //map steps of one workflow to steps of another (even not using stepdefinitions since they //can be used multiple times). A possible solution would be to provide a mapping when changing //workflows but this should be to seldom the case to bother implementing it throw new FxUpdateException("ex.notImplemented", "Workflow changes with existing instance"); } sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET WORKFLOW=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getWorkflow().getId()); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.workflow", orgType.getWorkflow(), type.getWorkflow()); } //end Workflow changes //start Category changes if (!type.getCategory().equals(orgType.getCategory())) { if (!ticket.isGlobalSupervisor()) throw new FxUpdateException("ex.structure.type.category.notSupervisor", orgType.getCategory(), type.getCategory(), orgType.getName()); sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET CATEGORY=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setInt(1, type.getCategory().getId()); ps.setLong(2, type.getId()); ps.executeUpdate(); } //end Category changes //start language mode changes if (!type.getLanguage().equals(orgType.getLanguage())) { if (instanceCount < 0) instanceCount = getInstanceCount(con, type.getId()); if (instanceCount <= 0 || orgType.getLanguage().isUpgradeable(type.getLanguage())) { sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET LANG_MODE=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setInt(1, type.getLanguage().getId()); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.languageMode", orgType.getLanguage().name(), type.getLanguage().name()); } else throw new FxUpdateException("ex.structure.type.languageMode.notUpgradeable", orgType.getLanguage(), type.getLanguage(), orgType.getName()); } //end language mode changes //start state changes if (type.getState().getId() != orgType.getState().getId()) { sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET TYPE_STATE=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setInt(1, type.getState().getId()); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.state", orgType.getState().name(), type.getState().name()); } //end state changes //start permission changes if (type.getBitCodedPermissions() != orgType.getBitCodedPermissions()) { sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET SECURITY_MODE=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setInt(1, type.getBitCodedPermissions()); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.perm", FxPermissionUtils.toString(orgType.getBitCodedPermissions()), FxPermissionUtils.toString(type.getBitCodedPermissions())); } //end permission changes //start multiple ACL setting changes if (type.isMultipleContentACLs() != orgType.isMultipleContentACLs()) { sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET MULTIPLE_CONTENT_ACLS=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setBoolean(1, type.isMultipleContentACLs()); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.multipleContentACLs", orgType.isMultipleContentACLs(), type.isMultipleContentACLs()); } //end multiple ACL setting changes //start isIncludedInSupertypeQueries setting changes if (type.isIncludedInSupertypeQueries() != orgType.isIncludedInSupertypeQueries()) { sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET INSUPERTYPEQUERY=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setBoolean(1, type.isIncludedInSupertypeQueries()); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.inSupertypeQueries", orgType.isIncludedInSupertypeQueries(), type.isIncludedInSupertypeQueries()); } //end isIncludedInSupertypeQueries setting changes //start history track/age changes if (type.isTrackHistory() != orgType.isTrackHistory() || type.getHistoryAge() != orgType.getHistoryAge()) { sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES) .append(" SET TRACKHISTORY=?, HISTORY_AGE=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setBoolean(1, type.isTrackHistory()); ps.setLong(2, type.getHistoryAge()); ps.setLong(3, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.history", orgType.isTrackHistory(), type.isTrackHistory(), orgType.getHistoryAge(), type.getHistoryAge()); } //end history track/age changes //start max.ver changes if (type.getMaxVersions() != orgType.getMaxVersions()) { //TODO: remove any versions that would exceed this count sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET MAX_VERSIONS=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getMaxVersions()); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.maxVersions", orgType.getMaxVersions(), type.getMaxVersions()); } //end max.ver changes //start isAutoVersion setting changes if (type.isAutoVersion() != orgType.isAutoVersion()) { sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET AUTO_VERSION=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setBoolean(1, type.isAutoVersion()); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.isAutoVersion", orgType.isAutoVersion(), type.isAutoVersion()); } //end isAutoVersion setting changes //start max source relations changes if (type.isRelation() && type.getMaxRelSource() != orgType.getMaxRelSource()) { //TODO: check if the new condition is not violated by existing data sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET REL_TOTAL_MAXSRC=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getMaxRelSource()); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.maxRelSource", orgType.getMaxRelSource(), type.getMaxRelSource()); } //end max source relations changes //start max destination relations changes if (type.isRelation() && type.getMaxRelDestination() != orgType.getMaxRelDestination()) { //TODO: check if the new condition is not violated by existing data sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET REL_TOTAL_MAXDST=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); ps.setLong(1, type.getMaxRelDestination()); ps.setLong(2, type.getId()); ps.executeUpdate(); htracker.track(type, "history.type.update.maxRelDest", orgType.getMaxRelDestination(), type.getMaxRelDestination()); } //end max destination relations changes //start icon if (!type.getIcon().equals(orgType.getIcon())) { sql.setLength(0); sql.append("UPDATE ").append(TBL_STRUCT_TYPES).append(" SET ICON_REF=? WHERE ID=?"); if (ps != null) ps.close(); ps = con.prepareStatement(sql.toString()); if (type.getIcon().isEmpty()) ps.setNull(1, java.sql.Types.INTEGER); else ps.setLong(1, type.getIcon().getDefaultTranslation().getId()); ps.setLong(2, type.getId()); ps.executeUpdate(); needReload = true; htracker.track(type, "history.type.update.icon", orgType.getIcon().getDefaultTranslation().getId(), type.getIcon().getDefaultTranslation().getId()); } //end icon // structure option changes boolean optionsChanged = updateTypeOptions(con, type, orgType); // check if any type options must be propagated to derived types if (type.getDerivedTypes().size() > 0) { final List<FxStructureOption> inherit = new ArrayList<FxStructureOption>(type.getOptions().size()); for (FxStructureOption o : type.getOptions()) { if (o.getIsInherited()) { inherit.add(o); } } if (inherit.size() > 0) { for (FxType derived : type.getDerivedTypes()) { updateDerivedTypeOptions(con, derived, inherit); } } } //sync back to cache try { if (needReload) StructureLoader.reload(con); else { StructureLoader.updateType(FxContext.get().getDivisionId(), loadType(con, type.getId())); // reload any derived types if type options have changed if (optionsChanged && type.getDerivedTypes().size() > 0) { for (FxType derivedType : type.getDerivedTypes()) { StructureLoader.updateType(FxContext.get().getDivisionId(), loadType(con, derivedType.getId())); } } } } catch (FxLoadException e) { throw new FxUpdateException(e); } catch (FxCacheException e) { LOG.fatal(e.getMessage(), e); } } catch (SQLException e) { EJBUtils.rollback(ctx); throw new FxUpdateException(LOG, e, "ex.db.sqlError", e.getMessage()); } finally { Database.closeObjects(TypeEngineBean.class, con, ps); } return type.getId(); }
From source file:com.oltpbenchmark.benchmarks.tpcc.TPCCLoader.java
protected int loadCust(int whseKount, int distWhseKount, int custDistKount) { int k = 0;//from ww w. j a v a 2 s . c o m int t = 0; Customer customer = new Customer(); History history = new History(); PrintWriter outHist = null; try { PreparedStatement custPrepStmt = getInsertStatement(TPCCConstants.TABLENAME_CUSTOMER); PreparedStatement histPrepStmt = getInsertStatement(TPCCConstants.TABLENAME_HISTORY); now = new java.util.Date(); if (outputFiles == true) { out = new PrintWriter(new FileOutputStream(fileLocation + "customer.csv")); LOG.debug("\nWriting Customer file to: " + fileLocation + "customer.csv"); outHist = new PrintWriter(new FileOutputStream(fileLocation + "cust-hist.csv")); LOG.debug("\nWriting Customer History file to: " + fileLocation + "cust-hist.csv"); } t = (whseKount * distWhseKount * custDistKount * 2); LOG.debug("\nStart Cust-Hist Load for " + t + " Cust-Hists @ " + now + " ..."); for (int w = 1; w <= whseKount; w++) { for (int d = 1; d <= distWhseKount; d++) { for (int c = 1; c <= custDistKount; c++) { Timestamp sysdate = new java.sql.Timestamp(System.currentTimeMillis()); customer.c_id = c; customer.c_d_id = d; customer.c_w_id = w; // discount is random between [0.0000 ... 0.5000] customer.c_discount = (float) (TPCCUtil.randomNumber(1, 5000, gen) / 10000.0); if (TPCCUtil.randomNumber(1, 100, gen) <= 10) { customer.c_credit = "BC"; // 10% Bad Credit } else { customer.c_credit = "GC"; // 90% Good Credit } if (c <= 1000) { customer.c_last = TPCCUtil.getLastName(c - 1); } else { customer.c_last = TPCCUtil.getNonUniformRandomLastNameForLoad(gen); } customer.c_first = TPCCUtil.randomStr(TPCCUtil.randomNumber(8, 16, gen)); customer.c_credit_lim = 50000; customer.c_balance = -10; customer.c_ytd_payment = 10; customer.c_payment_cnt = 1; customer.c_delivery_cnt = 0; customer.c_street_1 = TPCCUtil.randomStr(TPCCUtil.randomNumber(10, 20, gen)); customer.c_street_2 = TPCCUtil.randomStr(TPCCUtil.randomNumber(10, 20, gen)); customer.c_city = TPCCUtil.randomStr(TPCCUtil.randomNumber(10, 20, gen)); customer.c_state = TPCCUtil.randomStr(3).toUpperCase(); // TPC-C 4.3.2.7: 4 random digits + "11111" customer.c_zip = TPCCUtil.randomNStr(4) + "11111"; customer.c_phone = TPCCUtil.randomNStr(16); customer.c_since = sysdate; customer.c_middle = "OE"; customer.c_data = TPCCUtil.randomStr(TPCCUtil.randomNumber(300, 500, gen)); history.h_c_id = c; history.h_c_d_id = d; history.h_c_w_id = w; history.h_d_id = d; history.h_w_id = w; history.h_date = sysdate; history.h_amount = 10; history.h_data = TPCCUtil.randomStr(TPCCUtil.randomNumber(10, 24, gen)); k = k + 2; if (outputFiles == false) { custPrepStmt.setLong(1, customer.c_w_id); custPrepStmt.setLong(2, customer.c_d_id); custPrepStmt.setLong(3, customer.c_id); custPrepStmt.setDouble(4, customer.c_discount); custPrepStmt.setString(5, customer.c_credit); custPrepStmt.setString(6, customer.c_last); custPrepStmt.setString(7, customer.c_first); custPrepStmt.setDouble(8, customer.c_credit_lim); custPrepStmt.setDouble(9, customer.c_balance); custPrepStmt.setDouble(10, customer.c_ytd_payment); custPrepStmt.setLong(11, customer.c_payment_cnt); custPrepStmt.setLong(12, customer.c_delivery_cnt); custPrepStmt.setString(13, customer.c_street_1); custPrepStmt.setString(14, customer.c_street_2); custPrepStmt.setString(15, customer.c_city); custPrepStmt.setString(16, customer.c_state); custPrepStmt.setString(17, customer.c_zip); custPrepStmt.setString(18, customer.c_phone); custPrepStmt.setTimestamp(19, customer.c_since); custPrepStmt.setString(20, customer.c_middle); custPrepStmt.setString(21, customer.c_data); custPrepStmt.addBatch(); histPrepStmt.setInt(1, history.h_c_id); histPrepStmt.setInt(2, history.h_c_d_id); histPrepStmt.setInt(3, history.h_c_w_id); histPrepStmt.setInt(4, history.h_d_id); histPrepStmt.setInt(5, history.h_w_id); histPrepStmt.setTimestamp(6, history.h_date); histPrepStmt.setDouble(7, history.h_amount); histPrepStmt.setString(8, history.h_data); histPrepStmt.addBatch(); if ((k % configCommitCount) == 0) { long tmpTime = new java.util.Date().getTime(); String etStr = " Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000) + " "; LOG.debug(etStr.substring(0, 30) + " Writing record " + k + " of " + t); lastTimeMS = tmpTime; custPrepStmt.executeBatch(); histPrepStmt.executeBatch(); custPrepStmt.clearBatch(); custPrepStmt.clearBatch(); transCommit(); } } else { String str = ""; str = str + customer.c_id + ","; str = str + customer.c_d_id + ","; str = str + customer.c_w_id + ","; str = str + customer.c_discount + ","; str = str + customer.c_credit + ","; str = str + customer.c_last + ","; str = str + customer.c_first + ","; str = str + customer.c_credit_lim + ","; str = str + customer.c_balance + ","; str = str + customer.c_ytd_payment + ","; str = str + customer.c_payment_cnt + ","; str = str + customer.c_delivery_cnt + ","; str = str + customer.c_street_1 + ","; str = str + customer.c_street_2 + ","; str = str + customer.c_city + ","; str = str + customer.c_state + ","; str = str + customer.c_zip + ","; str = str + customer.c_phone; out.println(str); str = ""; str = str + history.h_c_id + ","; str = str + history.h_c_d_id + ","; str = str + history.h_c_w_id + ","; str = str + history.h_d_id + ","; str = str + history.h_w_id + ","; str = str + history.h_date + ","; str = str + history.h_amount + ","; str = str + history.h_data; outHist.println(str); if ((k % configCommitCount) == 0) { long tmpTime = new java.util.Date().getTime(); String etStr = " Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000) + " "; LOG.debug(etStr.substring(0, 30) + " Writing record " + k + " of " + t); lastTimeMS = tmpTime; } } } // end for [c] } // end for [d] } // end for [w] long tmpTime = new java.util.Date().getTime(); String etStr = " Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000) + " "; LOG.debug(etStr.substring(0, 30) + " Writing record " + k + " of " + t); lastTimeMS = tmpTime; custPrepStmt.executeBatch(); histPrepStmt.executeBatch(); custPrepStmt.clearBatch(); histPrepStmt.clearBatch(); transCommit(); now = new java.util.Date(); if (outputFiles == true) { outHist.close(); } LOG.debug("End Cust-Hist Data Load @ " + now); } catch (SQLException se) { LOG.debug(se.getMessage()); transRollback(); if (outputFiles == true) { outHist.close(); } } catch (Exception e) { e.printStackTrace(); transRollback(); if (outputFiles == true) { outHist.close(); } } return (k); }