List of usage examples for java.sql PreparedStatement clearBatch
void clearBatch() throws SQLException;
Statement
object's current list of SQL commands. From source file:org.pentaho.di.core.database.Database.java
/** * Insert a row into the database using a prepared statement that has all values set. * * @param ps/*from ww w. j a va2 s . co m*/ * The prepared statement * @param batch * True if you want to use batch inserts (size = commit size) * @param handleCommit * True if you want to handle the commit here after the commit size (False e.g. in case the step handles * this, see TableOutput) * @return true if the rows are safe: if batch of rows was sent to the database OR if a commit was done. * @throws KettleDatabaseException */ public boolean insertRow(PreparedStatement ps, boolean batch, boolean handleCommit) throws KettleDatabaseException { String debug = "insertRow start"; boolean rowsAreSafe = false; boolean isBatchUpdate = false; try { // Unique connections and Batch inserts don't mix when you want to roll // back on certain databases. // That's why we disable the batch insert in that case. // boolean useBatchInsert = batch && getDatabaseMetaData().supportsBatchUpdates() && databaseMeta.supportsBatchUpdates() && Const.isEmpty(connectionGroup); // // Add support for batch inserts... // if (!isAutoCommit()) { if (useBatchInsert) { debug = "insertRow add batch"; ps.addBatch(); // Add the batch, but don't forget to run the batch } else { debug = "insertRow exec update"; ps.executeUpdate(); } } else { ps.executeUpdate(); } written++; if (handleCommit) { // some steps handle the commit themselves (see e.g. // TableOutput step) if (!isAutoCommit() && (written % commitsize) == 0) { if (useBatchInsert) { isBatchUpdate = true; debug = "insertRow executeBatch commit"; ps.executeBatch(); commit(); ps.clearBatch(); } else { debug = "insertRow normal commit"; commit(); } written = 0; rowsAreSafe = true; } } return rowsAreSafe; } catch (BatchUpdateException ex) { throw createKettleDatabaseBatchException("Error updating batch", ex); } catch (SQLException ex) { if (isBatchUpdate) { throw createKettleDatabaseBatchException("Error updating batch", ex); } else { throw new KettleDatabaseException("Error inserting/updating row", ex); } } catch (Exception e) { // System.out.println("Unexpected exception in ["+debug+"] : "+e.getMessage()); throw new KettleDatabaseException("Unexpected error inserting/updating row in part [" + debug + "]", e); } }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
private synchronized boolean logBatchDB(PendingLogTask[] packets) { if (isClosed()) return false; if (!isLoggingEnabled() /* && !ENABLE_JOURNALING */) return true; boolean logged = true; PreparedStatement pstmt = null; Connection conn = null;/*from ww w . j ava 2 s . c o m*/ String cmd = "insert into " + getMTable() + " values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; long t0 = System.nanoTime(), t0Millis = System.currentTimeMillis(), t1 = t0; int i = 0; try { for (i = 0; i < packets.length; i++) { if (conn == null) { conn = this.getDefaultConn(); conn.setAutoCommit(false); pstmt = conn.prepareStatement(cmd); } PaxosPacket packet = packets[i].lmTask.logMsg; // accept and decision use a faster implementation int[] sb = AbstractPaxosLogger.getSlotBallot(packet); pstmt.setString(1, packet.getPaxosID()); pstmt.setInt(2, packet.getVersion()); pstmt.setInt(3, sb[0]); pstmt.setInt(4, sb[1]); pstmt.setInt(5, sb[2]); pstmt.setInt(6, packet.getType().getInt()); pstmt.setString(7, packets[i].logfile); pstmt.setLong(8, packets[i].logfileOffset); byte[] msgBytes = isJournalingEnabled() ? new byte[0] : deflate(toBytes(packet)); if (getLogMessageBlobOption()) { pstmt.setInt(9, packets[i].length);// msgBytes.length); Blob blob = conn.createBlob(); blob.setBytes(1, msgBytes); pstmt.setBlob(10, blob); } else { String packetString = packet.toString(); pstmt.setInt(9, packetString.length()); pstmt.setString(10, packetString); } pstmt.addBatch(); if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == packets.length) { int[] executed = pstmt.executeBatch(); conn.commit(); pstmt.clearBatch(); for (int j : executed) logged = logged && (j > 0); if (logged) log.log(Level.FINE, "{0}{1}{2}{3}{4}{5}", new Object[] { this, " successfully logged the " + "last ", (i + 1) % MAX_DB_BATCH_SIZE == 0 ? MAX_DB_BATCH_SIZE : (i + 1) % MAX_DB_BATCH_SIZE, " messages in ", (System.nanoTime() - t1) / 1000, " us" }); t1 = System.nanoTime(); } } } catch (Exception sqle) { /* If any exception happens, we must return false to preserve * safety. We return true only if every message is logged * successfully. */ sqle.printStackTrace(); log.severe(this + " incurred " + sqle + " while logging batch of size:" + packets.length + "; packet_length = " + packets[i].toString().length()); assert (packets[i].toString().length() < MAX_LOG_MESSAGE_SIZE); logged = false; } finally { cleanup(pstmt); cleanup(conn); } if (ENABLE_JOURNALING) DelayProfiler.updateDelayNano("index", t0, packets.length); else DelayProfiler.updateDelay("logBatchDB", t0Millis); // DelayProfiler.updateCount("#logged", packets.length); DelayProfiler.updateMovAvg("#potential_batched", packets.length); return logged; }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
/** * Batched version of putCheckpointState. This is a complicated method with * very different behaviors for updates and inserts. If update is true, it * attempts to batch-update all the checkpoints and for those * updates/inserts that failed, it attempts to individually update/insert * them through/*from ww w.j a v a 2 s. c o m*/ * {@link #putCheckpointState(String, int, Set, int, Ballot, String, int)}. * It is still possible that only a subset of the updates succeed, but that * is okay as checkpoint failure is not fatal except in the case of initial * checkpoint insertion. * * If update is false, it means that this is a batch-insertion of initial * checkpoints, and it is critical that this batch operation is atomic. If * the batch operation only partly succeeds, it should throw an exception so * that the caller can not proceed any further with the batch insertion but * it should also rollback the changes. * * The reason batched creation of initial checkpoints should be atomic is * that otherwise, the checkpoints that did get written essentially are * created paxos instances, but there is no easy way for the caller to know * that they got created and this could lead to nasty surprises later. If * the caller always follows up failed batch creations with sequential * creation, then the rollback is not critical as the sequential creation * will simply "recover" from the checkpoint if any left behind during a * previous failed batch creation. If the caller chooses to keep re-issuing * the batch creation and expects to eventually succeed (assuming that the * instances in the batch didn't actually exist a priori), then rolling back * failed batch creations like below will not help in the event of crashes. * So, the caller should really just issue sequential creation requests if a * batch creation fails or times out for any reason. * * Note: this method just needs to be atomic, i.e., all or none, but not * synchronized. Synchronizing it will invert the invariant that messageLog * is always locked before (because of the getMinLogFile invocation) * SQLPaxosLogger. * * @param tasks * @param update */ @Override public boolean putCheckpointState(CheckpointTask[] tasks, boolean update) { if (isClosed() || DISABLE_CHECKPOINTING) return false; boolean batchSuccess = true; boolean[] committed = new boolean[tasks.length]; long t1 = System.currentTimeMillis(); String insertCmd = "insert into " + getCTable() + " (version,members,slot,ballotnum,coordinator,state,create_time, min_logfile, paxos_id) values (?,?,?,?,?,?,?,?,?)"; String updateCmd = "update " + getCTable() + " set version=?,members=?, slot=?, ballotnum=?, coordinator=?, state=?, create_time=?, min_logfile=? where paxos_id=?"; String cmd = update ? updateCmd : insertCmd; PreparedStatement insertCP = null; Connection conn = null; String minLogfile = null; ArrayList<Integer> batch = new ArrayList<Integer>(); try { for (int i = 0; i < tasks.length; i++) { CheckpointTask task = tasks[i]; assert (task != null); assert (update || task.slot == 0); if ((task.slot == 0) == update) { this.putCheckpointState(task.paxosID, task.version, (task.members), task.slot, task.ballot, task.state, task.gcSlot, task.createTime); committed[i] = true; continue; } if (conn == null) { conn = this.getDefaultConn(); conn.setAutoCommit(false); insertCP = conn.prepareStatement(cmd); } insertCP.setInt(1, task.version); insertCP.setString(2, Util.toJSONString(task.members)); insertCP.setInt(3, task.slot); insertCP.setInt(4, task.ballot.ballotNumber); insertCP.setInt(5, task.ballot.coordinatorID); if (getCheckpointBlobOption()) { Blob blob = conn.createBlob(); blob.setBytes(1, task.state.getBytes(CHARSET)); insertCP.setBlob(6, blob); } else insertCP.setString(6, task.state); insertCP.setLong(7, task.createTime); insertCP.setString(8, minLogfile = this.getSetGCAndGetMinLogfile(task.paxosID, task.version, task.slot - task.gcSlot < 0 ? task.slot : task.gcSlot)); insertCP.setString(9, task.paxosID); insertCP.addBatch(); batch.add(i); incrTotalCheckpoints(); if (shouldLogCheckpoint(1)) log.log(Level.INFO, "{0} checkpointed> ({1}:{2}, {3}{4}, {5}, ({6}, {7}) [{8}]) {9}", new Object[] { this, task.paxosID, task.version, (task.members), task.slot, task.ballot, task.gcSlot, minLogfile, Util.truncate(task.state, TRUNCATED_STATE_SIZE, TRUNCATED_STATE_SIZE), (tasks.length > 1 ? "(batched=" + tasks.length + ")" : "") }); if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == tasks.length) { int[] executed = insertCP.executeBatch(); conn.commit(); insertCP.clearBatch(); for (int j = 0; j < executed.length; j++) batchSuccess = batchSuccess && (committed[batch.get(j)] = (executed[j] > 0)); batch.clear(); } } if (ENABLE_INSTRUMENTATION && Util.oneIn(10)) DelayProfiler.updateDelay("checkpoint", t1, tasks.length); } catch (SQLException | UnsupportedEncodingException sqle) { log.log(Level.SEVERE, "{0} SQLException while batched checkpointing", new Object[] { this }); sqle.printStackTrace(); } finally { cleanup(insertCP); cleanup(conn); } if (!batchSuccess) { if (update) { for (int i = 0; i < tasks.length; i++) if (!committed[i]) this.putCheckpointState(tasks[i].paxosID, tasks[i].version, tasks[i].members, tasks[i].slot, tasks[i].ballot, tasks[i].state, tasks[i].gcSlot); } else { // rollback for (int i = 0; i < tasks.length; i++) if (committed[i]) this.deleteCheckpoint(tasks[i].paxosID, tasks[i].version, tasks[i].members, tasks[i].slot, tasks[i].ballot, tasks[i].state, tasks[i].gcSlot); throw new PaxosInstanceCreationException( "Rolled back failed batch-creation of " + tasks.length + " paxos instances"); } } for (CheckpointTask task : tasks) this.deleteOutdatedMessages(task.paxosID, task.version, task.ballot, task.slot, task.ballot.ballotNumber, task.ballot.coordinatorID, task.gcSlot); return true; }
From source file:com.oltpbenchmark.benchmarks.seats.SEATSLoader.java
/** * /* www .java2 s . co m*/ * @param catalog_tbl */ public void loadTable(Table catalog_tbl, Iterable<Object[]> iterable, int batch_size) { // Special Case: Airport Locations final boolean is_airport = catalog_tbl.getName().equals(SEATSConstants.TABLENAME_AIRPORT); if (LOG.isDebugEnabled()) LOG.debug(String.format("Generating new records for table %s [batchSize=%d]", catalog_tbl.getName(), batch_size)); final List<Column> columns = catalog_tbl.getColumns(); // Check whether we have any special mappings that we need to maintain Map<Integer, Integer> code_2_id = new HashMap<Integer, Integer>(); Map<Integer, Map<String, Long>> mapping_columns = new HashMap<Integer, Map<String, Long>>(); for (int col_code_idx = 0, cnt = columns.size(); col_code_idx < cnt; col_code_idx++) { Column catalog_col = columns.get(col_code_idx); String col_name = catalog_col.getName(); // Code Column -> Id Column Mapping // Check to see whether this table has columns that we need to map their // code values to tuple ids String col_id_name = this.profile.code_columns.get(col_name); if (col_id_name != null) { Column catalog_id_col = catalog_tbl.getColumnByName(col_id_name); assert (catalog_id_col != null) : "The id column " + catalog_tbl.getName() + "." + col_id_name + " is missing"; int col_id_idx = catalog_tbl.getColumnIndex(catalog_id_col); code_2_id.put(col_code_idx, col_id_idx); } // Foreign Key Column to Code->Id Mapping // If this columns references a foreign key that is used in the Code->Id mapping // that we generating above, then we need to know when we should change the // column value from a code to the id stored in our lookup table if (this.profile.fkey_value_xref.containsKey(col_name)) { String col_fkey_name = this.profile.fkey_value_xref.get(col_name); mapping_columns.put(col_code_idx, this.profile.code_id_xref.get(col_fkey_name)); } } // FOR int row_idx = 0; int row_batch = 0; try { String insert_sql = SQLUtil.getInsertSQL(catalog_tbl); PreparedStatement insert_stmt = this.conn.prepareStatement(insert_sql); int sqlTypes[] = catalog_tbl.getColumnTypes(); for (Object tuple[] : iterable) { assert (tuple[0] != null) : "The primary key for " + catalog_tbl.getName() + " is null"; // AIRPORT if (is_airport) { // Skip any airport that does not have flights int col_code_idx = catalog_tbl.getColumnByName("AP_CODE").getIndex(); if (profile.hasFlights((String) tuple[col_code_idx]) == false) { if (LOG.isTraceEnabled()) LOG.trace(String.format("Skipping AIRPORT '%s' because it does not have any flights", tuple[col_code_idx])); continue; } // Update the row # so that it matches what we're actually loading int col_id_idx = catalog_tbl.getColumnByName("AP_ID").getIndex(); tuple[col_id_idx] = (long) (row_idx + 1); // Store Locations int col_lat_idx = catalog_tbl.getColumnByName("AP_LATITUDE").getIndex(); int col_lon_idx = catalog_tbl.getColumnByName("AP_LONGITUDE").getIndex(); Pair<Double, Double> coords = Pair.of((Double) tuple[col_lat_idx], (Double) tuple[col_lon_idx]); if (coords.first == null || coords.second == null) { LOG.error(Arrays.toString(tuple)); } assert (coords.first != null) : String.format("Unexpected null latitude for airport '%s' [%d]", tuple[col_code_idx], col_lat_idx); assert (coords.second != null) : String.format( "Unexpected null longitude for airport '%s' [%d]", tuple[col_code_idx], col_lon_idx); this.airport_locations.put(tuple[col_code_idx].toString(), coords); if (LOG.isTraceEnabled()) LOG.trace(String.format("Storing location for '%s': %s", tuple[col_code_idx], coords)); } // Code Column -> Id Column for (int col_code_idx : code_2_id.keySet()) { assert (tuple[col_code_idx] != null) : String.format( "The value of the code column at '%d' is null for %s\n%s", col_code_idx, catalog_tbl.getName(), Arrays.toString(tuple)); String code = tuple[col_code_idx].toString().trim(); if (code.length() > 0) { Column from_column = columns.get(col_code_idx); assert (from_column != null); Column to_column = columns.get(code_2_id.get(col_code_idx)); assert (to_column != null) : String.format("Invalid column %s.%s", catalog_tbl.getName(), code_2_id.get(col_code_idx)); long id = (Long) tuple[code_2_id.get(col_code_idx)]; if (LOG.isTraceEnabled()) LOG.trace(String.format("Mapping %s '%s' -> %s '%d'", from_column.fullName(), code, to_column.fullName(), id)); this.profile.code_id_xref.get(to_column.getName()).put(code, id); } } // FOR // Foreign Key Code -> Foreign Key Id for (int col_code_idx : mapping_columns.keySet()) { Column catalog_col = columns.get(col_code_idx); assert (tuple[col_code_idx] != null || catalog_col.isNullable()) : String.format( "The code %s column at '%d' is null for %s id=%s\n%s", catalog_col.fullName(), col_code_idx, catalog_tbl.getName(), tuple[0], Arrays.toString(tuple)); if (tuple[col_code_idx] != null) { String code = tuple[col_code_idx].toString(); tuple[col_code_idx] = mapping_columns.get(col_code_idx).get(code); if (LOG.isTraceEnabled()) LOG.trace(String.format("Mapped %s '%s' -> %s '%s'", catalog_col.fullName(), code, catalog_col.getForeignKey().fullName(), tuple[col_code_idx])); } } // FOR for (int i = 0; i < tuple.length; i++) { try { if (tuple[i] != null) { insert_stmt.setObject(i + 1, tuple[i]); } else { insert_stmt.setNull(i + 1, sqlTypes[i]); } } catch (SQLDataException ex) { LOG.error("INVALID " + catalog_tbl.getName() + " TUPLE: " + Arrays.toString(tuple)); throw new RuntimeException("Failed to set value for " + catalog_tbl.getColumn(i).fullName(), ex); } } // FOR insert_stmt.addBatch(); row_idx++; if (++row_batch >= batch_size) { LOG.debug(String.format("Loading %s batch [total=%d]", catalog_tbl.getName(), row_idx)); insert_stmt.executeBatch(); conn.commit(); insert_stmt.clearBatch(); row_batch = 0; } } // FOR if (row_batch > 0) { insert_stmt.executeBatch(); conn.commit(); } insert_stmt.close(); } catch (Exception ex) { throw new RuntimeException("Failed to load table " + catalog_tbl.getName(), ex); } if (is_airport) assert (this.profile.getAirportCount() == row_idx) : String.format("%d != %d", profile.getAirportCount(), row_idx); // Record the number of tuples that we loaded for this table in the profile if (catalog_tbl.getName().equals(SEATSConstants.TABLENAME_RESERVATION)) { this.profile.num_reservations = row_idx + 1; } LOG.info(String.format("Finished loading all %d tuples for %s [%d / %d]", row_idx, catalog_tbl.getName(), this.finished.incrementAndGet(), this.getCatalog().getTableCount())); return; }
From source file:com.oltpbenchmark.benchmarks.tpcc.TPCCLoader.java
protected int loadCust(int whseKount, int distWhseKount, int custDistKount) { int k = 0;//from www .ja v a2 s .com int t = 0; Customer customer = new Customer(); History history = new History(); PrintWriter outHist = null; try { PreparedStatement custPrepStmt = getInsertStatement(TPCCConstants.TABLENAME_CUSTOMER); PreparedStatement histPrepStmt = getInsertStatement(TPCCConstants.TABLENAME_HISTORY); now = new java.util.Date(); if (outputFiles == true) { out = new PrintWriter(new FileOutputStream(fileLocation + "customer.csv")); LOG.debug("\nWriting Customer file to: " + fileLocation + "customer.csv"); outHist = new PrintWriter(new FileOutputStream(fileLocation + "cust-hist.csv")); LOG.debug("\nWriting Customer History file to: " + fileLocation + "cust-hist.csv"); } t = (whseKount * distWhseKount * custDistKount * 2); LOG.debug("\nStart Cust-Hist Load for " + t + " Cust-Hists @ " + now + " ..."); for (int w = 1; w <= whseKount; w++) { for (int d = 1; d <= distWhseKount; d++) { for (int c = 1; c <= custDistKount; c++) { Timestamp sysdate = new java.sql.Timestamp(System.currentTimeMillis()); customer.c_id = c; customer.c_d_id = d; customer.c_w_id = w; // discount is random between [0.0000 ... 0.5000] customer.c_discount = (float) (TPCCUtil.randomNumber(1, 5000, gen) / 10000.0); if (TPCCUtil.randomNumber(1, 100, gen) <= 10) { customer.c_credit = "BC"; // 10% Bad Credit } else { customer.c_credit = "GC"; // 90% Good Credit } if (c <= 1000) { customer.c_last = TPCCUtil.getLastName(c - 1); } else { customer.c_last = TPCCUtil.getNonUniformRandomLastNameForLoad(gen); } customer.c_first = TPCCUtil.randomStr(TPCCUtil.randomNumber(8, 16, gen)); customer.c_credit_lim = 50000; customer.c_balance = -10; customer.c_ytd_payment = 10; customer.c_payment_cnt = 1; customer.c_delivery_cnt = 0; customer.c_street_1 = TPCCUtil.randomStr(TPCCUtil.randomNumber(10, 20, gen)); customer.c_street_2 = TPCCUtil.randomStr(TPCCUtil.randomNumber(10, 20, gen)); customer.c_city = TPCCUtil.randomStr(TPCCUtil.randomNumber(10, 20, gen)); customer.c_state = TPCCUtil.randomStr(3).toUpperCase(); // TPC-C 4.3.2.7: 4 random digits + "11111" customer.c_zip = TPCCUtil.randomNStr(4) + "11111"; customer.c_phone = TPCCUtil.randomNStr(16); customer.c_since = sysdate; customer.c_middle = "OE"; customer.c_data = TPCCUtil.randomStr(TPCCUtil.randomNumber(300, 500, gen)); history.h_c_id = c; history.h_c_d_id = d; history.h_c_w_id = w; history.h_d_id = d; history.h_w_id = w; history.h_date = sysdate; history.h_amount = 10; history.h_data = TPCCUtil.randomStr(TPCCUtil.randomNumber(10, 24, gen)); k = k + 2; if (outputFiles == false) { custPrepStmt.setLong(1, customer.c_w_id); custPrepStmt.setLong(2, customer.c_d_id); custPrepStmt.setLong(3, customer.c_id); custPrepStmt.setDouble(4, customer.c_discount); custPrepStmt.setString(5, customer.c_credit); custPrepStmt.setString(6, customer.c_last); custPrepStmt.setString(7, customer.c_first); custPrepStmt.setDouble(8, customer.c_credit_lim); custPrepStmt.setDouble(9, customer.c_balance); custPrepStmt.setDouble(10, customer.c_ytd_payment); custPrepStmt.setLong(11, customer.c_payment_cnt); custPrepStmt.setLong(12, customer.c_delivery_cnt); custPrepStmt.setString(13, customer.c_street_1); custPrepStmt.setString(14, customer.c_street_2); custPrepStmt.setString(15, customer.c_city); custPrepStmt.setString(16, customer.c_state); custPrepStmt.setString(17, customer.c_zip); custPrepStmt.setString(18, customer.c_phone); custPrepStmt.setTimestamp(19, customer.c_since); custPrepStmt.setString(20, customer.c_middle); custPrepStmt.setString(21, customer.c_data); custPrepStmt.addBatch(); histPrepStmt.setInt(1, history.h_c_id); histPrepStmt.setInt(2, history.h_c_d_id); histPrepStmt.setInt(3, history.h_c_w_id); histPrepStmt.setInt(4, history.h_d_id); histPrepStmt.setInt(5, history.h_w_id); histPrepStmt.setTimestamp(6, history.h_date); histPrepStmt.setDouble(7, history.h_amount); histPrepStmt.setString(8, history.h_data); histPrepStmt.addBatch(); if ((k % configCommitCount) == 0) { long tmpTime = new java.util.Date().getTime(); String etStr = " Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000) + " "; LOG.debug(etStr.substring(0, 30) + " Writing record " + k + " of " + t); lastTimeMS = tmpTime; custPrepStmt.executeBatch(); histPrepStmt.executeBatch(); custPrepStmt.clearBatch(); custPrepStmt.clearBatch(); transCommit(); } } else { String str = ""; str = str + customer.c_id + ","; str = str + customer.c_d_id + ","; str = str + customer.c_w_id + ","; str = str + customer.c_discount + ","; str = str + customer.c_credit + ","; str = str + customer.c_last + ","; str = str + customer.c_first + ","; str = str + customer.c_credit_lim + ","; str = str + customer.c_balance + ","; str = str + customer.c_ytd_payment + ","; str = str + customer.c_payment_cnt + ","; str = str + customer.c_delivery_cnt + ","; str = str + customer.c_street_1 + ","; str = str + customer.c_street_2 + ","; str = str + customer.c_city + ","; str = str + customer.c_state + ","; str = str + customer.c_zip + ","; str = str + customer.c_phone; out.println(str); str = ""; str = str + history.h_c_id + ","; str = str + history.h_c_d_id + ","; str = str + history.h_c_w_id + ","; str = str + history.h_d_id + ","; str = str + history.h_w_id + ","; str = str + history.h_date + ","; str = str + history.h_amount + ","; str = str + history.h_data; outHist.println(str); if ((k % configCommitCount) == 0) { long tmpTime = new java.util.Date().getTime(); String etStr = " Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000) + " "; LOG.debug(etStr.substring(0, 30) + " Writing record " + k + " of " + t); lastTimeMS = tmpTime; } } } // end for [c] } // end for [d] } // end for [w] long tmpTime = new java.util.Date().getTime(); String etStr = " Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000) + " "; LOG.debug(etStr.substring(0, 30) + " Writing record " + k + " of " + t); lastTimeMS = tmpTime; custPrepStmt.executeBatch(); histPrepStmt.executeBatch(); custPrepStmt.clearBatch(); histPrepStmt.clearBatch(); transCommit(); now = new java.util.Date(); if (outputFiles == true) { outHist.close(); } LOG.debug("End Cust-Hist Data Load @ " + now); } catch (SQLException se) { LOG.debug(se.getMessage()); transRollback(); if (outputFiles == true) { outHist.close(); } } catch (Exception e) { e.printStackTrace(); transRollback(); if (outputFiles == true) { outHist.close(); } } return (k); }
From source file:org.seasar.dbflute.logic.replaceschema.loaddata.impl.DfDelimiterDataWriterImpl.java
public void writeData(DfDelimiterDataResultInfo resultInfo) throws IOException { _log.info("/= = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = "); _log.info("writeData(" + _fileName + ")"); _log.info("= = = = = = =/"); FileInputStream fis = null;/*from w w w. jav a2s . c om*/ InputStreamReader ir = null; BufferedReader br = null; final String dataDirectory = Srl.substringLastFront(_fileName, "/"); final LoggingInsertType loggingInsertType = getLoggingInsertType(dataDirectory); final String tableDbName; { String tmp = _fileName.substring(_fileName.lastIndexOf("/") + 1, _fileName.lastIndexOf(".")); if (tmp.indexOf("-") >= 0) { tmp = tmp.substring(tmp.indexOf("-") + "-".length()); } tableDbName = tmp; } final Map<String, DfColumnMeta> columnInfoMap = getColumnMetaMap(tableDbName); if (columnInfoMap.isEmpty()) { throwTableNotFoundException(_fileName, tableDbName); } // process before handling table beforeHandlingTable(tableDbName, columnInfoMap); String lineString = null; String preContinueString = null; String executedSql = null; final List<String> columnNameList = new ArrayList<String>(); final List<String> additionalColumnList = new ArrayList<String>(); final List<String> valueList = new ArrayList<String>(); final File dataFile = new File(_fileName); Connection conn = null; PreparedStatement ps = null; try { fis = new FileInputStream(dataFile); ir = new InputStreamReader(fis, _encoding); br = new BufferedReader(ir); FirstLineInfo firstLineInfo = null; int loopIndex = -1; int rowNumber = 0; int addedBatchSize = 0; while (true) { ++loopIndex; lineString = br.readLine(); if (lineString == null) { break; } // /- - - - - - - - - - - - - - - - - - - - - - // initialize column definition from first line // - - - - - - - - - -/ if (loopIndex == 0) { firstLineInfo = getFirstLineInfo(_delimiter, lineString); columnNameList.addAll(firstLineInfo.getColumnNameList()); if (columnNameList.isEmpty()) { throwDelimiterDataColumnDefNotFoundException(_fileName, tableDbName); } final StringSet columnSet = StringSet.createAsFlexible(); columnSet.addAll(columnNameList); for (String defaultColumn : _defaultValueMap.keySet()) { if (columnSet.contains(defaultColumn)) { continue; } additionalColumnList.add(defaultColumn); } columnNameList.addAll(additionalColumnList); continue; } // /- - - - - - - - - - - - - - - // analyze values in line strings // - - - - - - - - - -/ lineString = filterLineString(lineString); { if (preContinueString != null && !preContinueString.equals("")) { lineString = preContinueString + "\n" + lineString; } final ValueLineInfo valueLineInfo = arrangeValueList(lineString, _delimiter); final List<String> ls = valueLineInfo.getValueList(); if (valueLineInfo.isContinueNextLine()) { preContinueString = ls.remove(ls.size() - 1); valueList.addAll(ls); continue; } valueList.addAll(ls); } // *one record is prepared here // /- - - - - - - - - - - - - - // check definition differences // - - - - - - - - - -/ if (isDifferentColumnValueCount(firstLineInfo, valueList)) { String msg = "The count of values wasn't correct:"; msg = msg + " column=" + firstLineInfo.getColumnNameList().size(); msg = msg + " value=" + valueList.size(); msg = msg + " -> " + valueList; resultInfo.registerWarningFile(_fileName, msg); // clear temporary variables valueList.clear(); preContinueString = null; continue; } // *valid record is prepared here ++rowNumber; // /- - - - - - - - - - - - - - - - // process registration to database // - - - - - - - - - -/ final DfDelimiterDataWriteSqlBuilder sqlBuilder = new DfDelimiterDataWriteSqlBuilder(); sqlBuilder.setTableDbName(tableDbName); sqlBuilder.setColumnInfoMap(columnInfoMap); sqlBuilder.setColumnNameList(columnNameList); sqlBuilder.setValueList(valueList); sqlBuilder.setNotFoundColumnMap(resultInfo.getNotFoundColumnMap()); sqlBuilder.setConvertValueMap(_convertValueMap); sqlBuilder.setDefaultValueMap(_defaultValueMap); sqlBuilder.setBindTypeProvider(new DfColumnBindTypeProvider() { public Class<?> provideBindType(String tableName, DfColumnMeta columnMeta) { return getBindType(tableName, columnMeta); } }); if (conn == null) { conn = _dataSource.getConnection(); } if (ps == null) { executedSql = sqlBuilder.buildSql(); ps = conn.prepareStatement(executedSql); } final Map<String, Object> columnValueMap = sqlBuilder.setupParameter(); handleLoggingInsert(tableDbName, columnNameList, columnValueMap, loggingInsertType, rowNumber); int bindCount = 1; final Set<Entry<String, Object>> entrySet = columnValueMap.entrySet(); for (Entry<String, Object> entry : entrySet) { final String columnName = entry.getKey(); final Object obj = entry.getValue(); // /- - - - - - - - - - - - - - - - - - // process Null (against Null Headache) // - - - - - - - - - -/ if (processNull(tableDbName, columnName, obj, ps, bindCount, columnInfoMap)) { bindCount++; continue; } // /- - - - - - - - - - - - - - - // process NotNull and NotString // - - - - - - - - - -/ // If the value is not null and the value has the own type except string, // It registers the value to statement by the type. if (processNotNullNotString(tableDbName, columnName, obj, conn, ps, bindCount, columnInfoMap)) { bindCount++; continue; } // /- - - - - - - - - - - - - - - - - - // process NotNull and StringExpression // - - - - - - - - - -/ final String value = (String) obj; processNotNullString(dataFile, tableDbName, columnName, value, conn, ps, bindCount, columnInfoMap); bindCount++; } if (isMergedSuppressBatchUpdate(dataDirectory)) { ps.execute(); } else { ps.addBatch(); ++addedBatchSize; if (addedBatchSize == 100000) { // this is supported in only delimiter data writer // because delimiter data can treat large data ps.executeBatch(); // to avoid OutOfMemory ps.clearBatch(); // for next batch addedBatchSize = 0; } } // *one record is finished here // clear temporary variables // if an exception occurs from execute() or addBatch(), // this valueList is to be information for debug valueList.clear(); preContinueString = null; } if (ps != null && addedBatchSize > 0) { ps.executeBatch(); } noticeLoadedRowSize(tableDbName, rowNumber); checkImplicitClassification(dataFile, tableDbName, columnNameList, conn); } catch (FileNotFoundException e) { throw e; } catch (IOException e) { throw e; } catch (SQLException e) { final SQLException nextEx = e.getNextException(); if (nextEx != null && !e.equals(nextEx)) { // focus on next exception _log.warn("*Failed to register: " + e.getMessage()); String msg = buildRegExpMessage(_fileName, tableDbName, executedSql, valueList, nextEx); throw new DfDelimiterDataRegistrationFailureException(msg, nextEx); // switch! } else { String msg = buildRegExpMessage(_fileName, tableDbName, executedSql, valueList, e); throw new DfDelimiterDataRegistrationFailureException(msg, e); } } catch (RuntimeException e) { String msg = buildRegExpMessage(_fileName, tableDbName, executedSql, valueList, e); throw new DfDelimiterDataRegistrationFailureException(msg, e); } finally { try { if (fis != null) { fis.close(); } if (ir != null) { ir.close(); } if (br != null) { br.close(); } } catch (java.io.IOException ignored) { _log.warn("File-close threw the exception: ", ignored); } if (ps != null) { try { ps.close(); } catch (SQLException ignored) { _log.info("Statement.close() threw the exception!", ignored); } } if (conn != null) { try { conn.close(); } catch (SQLException ignored) { _log.info("Connection.close() threw the exception!", ignored); } } // process after (finally) handling table finallyHandlingTable(tableDbName, columnInfoMap); } }
From source file:com.flexive.core.storage.genericSQL.GenericTreeStorageSpreaded.java
protected long _reorganizeSpace(Connection con, SequencerEngine seq, FxTreeMode sourceMode, FxTreeMode destMode, long nodeId, boolean includeNodeId, BigInteger overrideSpacing, BigInteger overrideLeft, FxTreeNodeInfo insertParent, int insertPosition, BigInteger insertSpace, BigInteger insertBoundaries[], int depthDelta, Long destinationNode, boolean createMode, boolean createKeepIds, boolean disableSpaceOptimization) throws FxTreeException { long firstCreatedNodeId = -1; FxTreeNodeInfoSpreaded nodeInfo;//from ww w . ja v a 2s. c om try { nodeInfo = (FxTreeNodeInfoSpreaded) getTreeNodeInfo(con, sourceMode, nodeId); } catch (Exception e) { return -1; } if (!nodeInfo.isSpaceOptimizable() && !disableSpaceOptimization) { // The Root node and cant be optimize any more ... so all we can do is fail :-/ // This should never really happen if (nodeId == ROOT_NODE) { return -1; } //System.out.println("### UP we go, depthDelta=" + depthDelta); return _reorganizeSpace(con, seq, sourceMode, destMode, nodeInfo.getParentId(), includeNodeId, overrideSpacing, overrideLeft, insertParent, insertPosition, insertSpace, insertBoundaries, depthDelta, destinationNode, createMode, createKeepIds, false); } BigInteger spacing = nodeInfo.getDefaultSpacing(); if (overrideSpacing != null && (overrideSpacing.compareTo(spacing) < 0 || overrideLeft != null)) { // override spacing unless it is greater OR overrideLeft is specified (in that case we // have to use the spacing for valid tree ranges) spacing = overrideSpacing; } else { if (spacing.compareTo(GO_UP) < 0 && !createMode && !disableSpaceOptimization) { return _reorganizeSpace(con, seq, sourceMode, destMode, nodeInfo.getParentId(), includeNodeId, overrideSpacing, overrideLeft, insertParent, insertPosition, insertSpace, insertBoundaries, depthDelta, destinationNode, createMode, createKeepIds, false); } } if (insertBoundaries != null && insertPosition == -1) { insertPosition = 0; // insertPosition cannot be negative } Statement stmt = null; PreparedStatement ps = null; ResultSet rs; BigInteger left = overrideLeft == null ? nodeInfo.getLeft() : overrideLeft; BigInteger right = null; String includeNode = includeNodeId ? "=" : ""; long counter = 0; long newId = -1; try { final long start = System.currentTimeMillis(); String createProps = createMode ? ",PARENT,REF,NAME,TEMPLATE" : ""; String sql = " SELECT ID," + StorageManager.getIfFunction( // compute total child count only when the node has children "CHILDCOUNT = 0", "0", "(SELECT COUNT(*) FROM " + getTable(sourceMode) + " WHERE LFT > NODE.LFT AND RGT < NODE.RGT)") + // 3 4 5 6 ", CHILDCOUNT, LFT AS LFTORD,RGT,DEPTH" + createProps + " FROM (SELECT ID,CHILDCOUNT,LFT,RGT,DEPTH" + createProps + " FROM " + getTable(sourceMode) + " WHERE " + "LFT>" + includeNode + nodeInfo.getLeft() + " AND LFT<" + includeNode + nodeInfo.getRight() + ") NODE " + "ORDER BY LFTORD ASC"; stmt = con.createStatement(); rs = stmt.executeQuery(sql); if (createMode) { // 1 2 3 4 5 6 7 8 ps = con.prepareStatement( "INSERT INTO " + getTable(destMode) + " (ID,PARENT,DEPTH,DIRTY,REF,TEMPLATE,LFT,RGT," + //9 10 11 "CHILDCOUNT,NAME,MODIFIED_AT) " + "VALUES (?,?,?,?,?,?,?,?,?,?,?)"); } else { ps = con.prepareStatement("UPDATE " + getTable(sourceMode) + " SET LFT=?,RGT=?,DEPTH=? WHERE ID=?"); } long id; int total_childs; int direct_childs; BigInteger nextLeft; int lastDepth = nodeInfo.getDepth() + (includeNodeId ? 0 : 1); int depth; BigInteger _rgt; BigInteger _lft; Long ref = null; String data = null; String name = ""; Stack<Long> currentParent = null; if (createMode) { currentParent = new Stack<Long>(); currentParent.push(destinationNode); } //System.out.println("Spacing:"+SPACING); while (rs.next()) { //System.out.println("------------------"); id = rs.getLong(1); total_childs = rs.getInt(2); direct_childs = rs.getInt(3); _lft = getNodeBounds(rs, 4); _rgt = getNodeBounds(rs, 5); depth = rs.getInt(6); if (createMode) { // Reading these properties is slow, only do it when needed ref = rs.getLong(8); if (rs.wasNull()) ref = null; name = rs.getString(9); data = rs.getString(10); if (rs.wasNull()) data = null; } left = left.add(spacing).add(BigInteger.ONE); // Handle depth differences if (lastDepth - depth > 0) { BigInteger depthDifference = spacing.add(BigInteger.ONE); left = left.add(depthDifference.multiply(BigInteger.valueOf(lastDepth - depth))); } if (createMode) { if (lastDepth < depth) { currentParent.push(newId); } else if (lastDepth > depth) { for (int p = 0; p < (lastDepth - depth); p++) currentParent.pop(); } } right = left.add(spacing).add(BigInteger.ONE); // add child space if needed if (total_childs > 0) { BigInteger childSpace = spacing.multiply(BigInteger.valueOf(total_childs * 2)); childSpace = childSpace.add(BigInteger.valueOf((total_childs * 2) - 1)); right = right.add(childSpace); nextLeft = left; } else { nextLeft = right; } if (insertBoundaries != null) { // insert gap at requested position // If we're past the gap, keep adding the insert space to left/right because the added // space is never "injected" into the loop, i.e. without adding it the left/right boundaries of // nodes after the gap would be too far to the left. if (_lft.compareTo(insertBoundaries[0]) > 0) { left = left.add(insertSpace); } if (_rgt.compareTo(insertBoundaries[0]) > 0) { right = right.add(insertSpace); } } // sanity checks if (left.compareTo(right) >= 0) { throw new FxTreeException(LOG, "ex.tree.reorganize.failed", counter, left, right, "left greater than right"); } if (insertParent != null && right.compareTo((BigInteger) insertParent.getRight()) > 0) { throw new FxTreeException(LOG, "ex.tree.reorganize.failed", counter, left, right, "wrote past parent node bounds"); } // Update the node if (createMode) { newId = createKeepIds ? id : seq.getId(destMode.getSequencer()); if (firstCreatedNodeId == -1) firstCreatedNodeId = newId; // Create the main entry ps.setLong(1, newId); ps.setLong(2, currentParent.peek()); ps.setLong(3, depth + depthDelta); ps.setBoolean(4, destMode != FxTreeMode.Live); //only flag non-live tree's dirty if (ref == null) { ps.setNull(5, java.sql.Types.NUMERIC); } else { ps.setLong(5, ref); } if (data == null) { ps.setNull(6, java.sql.Types.VARCHAR); } else { ps.setString(6, data); } // System.out.println("=> id:"+newId+" left:"+left+" right:"+right); setNodeBounds(ps, 7, left); setNodeBounds(ps, 8, right); ps.setInt(9, direct_childs); ps.setString(10, name); ps.setLong(11, System.currentTimeMillis()); ps.addBatch(); } else { setNodeBounds(ps, 1, left); setNodeBounds(ps, 2, right); ps.setInt(3, depth + depthDelta); ps.setLong(4, id); ps.addBatch(); // ps.executeBatch(); // ps.clearBatch(); } // Prepare variables for the next node left = nextLeft; lastDepth = depth; counter++; // Execute batch every 10000 items to avoid out of memory if (counter % 10000 == 0) { ps.executeBatch(); ps.clearBatch(); } } rs.close(); stmt.close(); stmt = null; ps.executeBatch(); if (LOG.isDebugEnabled()) { final long time = System.currentTimeMillis() - start; LOG.debug("Tree reorganization of " + counter + " items completed in " + time + " ms (spaceLen=" + spacing + ")"); } return firstCreatedNodeId; } catch (FxApplicationException e) { throw e instanceof FxTreeException ? (FxTreeException) e : new FxTreeException(e); } catch (SQLException e) { String next = ""; if (e.getNextException() != null) next = " next:" + e.getNextException().getMessage(); if (StorageManager.isDuplicateKeyViolation(e)) throw new FxTreeException(LOG, e, "ex.tree.reorganize.duplicateKey"); throw new FxTreeException(LOG, e, "ex.tree.reorganize.failed", counter, left, right, e.getMessage() + next); } catch (Exception e) { throw new FxTreeException(e); } finally { try { if (stmt != null) stmt.close(); } catch (Throwable t) { /*ignore*/} try { if (ps != null) ps.close(); } catch (Throwable t) { /*ignore*/} } }
From source file:com.oltpbenchmark.benchmarks.tpcc.TPCCLoader.java
protected int loadOrder(int whseKount, int distWhseKount, int custDistKount) { int k = 0;//from w w w. ja va 2 s .c o m int t = 0; PrintWriter outLine = null; PrintWriter outNewOrder = null; try { PreparedStatement ordrPrepStmt = getInsertStatement(TPCCConstants.TABLENAME_OPENORDER); PreparedStatement nworPrepStmt = getInsertStatement(TPCCConstants.TABLENAME_NEWORDER); PreparedStatement orlnPrepStmt = getInsertStatement(TPCCConstants.TABLENAME_ORDERLINE); if (outputFiles == true) { out = new PrintWriter(new FileOutputStream(fileLocation + "order.csv")); LOG.debug("\nWriting Order file to: " + fileLocation + "order.csv"); outLine = new PrintWriter(new FileOutputStream(fileLocation + "order-line.csv")); LOG.debug("\nWriting Order Line file to: " + fileLocation + "order-line.csv"); outNewOrder = new PrintWriter(new FileOutputStream(fileLocation + "new-order.csv")); LOG.debug("\nWriting New Order file to: " + fileLocation + "new-order.csv"); } now = new java.util.Date(); Oorder oorder = new Oorder(); NewOrder new_order = new NewOrder(); OrderLine order_line = new OrderLine(); jdbcIO myJdbcIO = new jdbcIO(); t = (whseKount * distWhseKount * custDistKount); t = (t * 11) + (t / 3); LOG.debug("whse=" + whseKount + ", dist=" + distWhseKount + ", cust=" + custDistKount); LOG.debug("\nStart Order-Line-New Load for approx " + t + " rows @ " + now + " ..."); for (int w = 1; w <= whseKount; w++) { for (int d = 1; d <= distWhseKount; d++) { // TPC-C 4.3.3.1: o_c_id must be a permutation of [1, 3000] int[] c_ids = new int[custDistKount]; for (int i = 0; i < custDistKount; ++i) { c_ids[i] = i + 1; } // Collections.shuffle exists, but there is no // Arrays.shuffle for (int i = 0; i < c_ids.length - 1; ++i) { int remaining = c_ids.length - i - 1; int swapIndex = gen.nextInt(remaining) + i + 1; assert i < swapIndex; int temp = c_ids[swapIndex]; c_ids[swapIndex] = c_ids[i]; c_ids[i] = temp; } for (int c = 1; c <= custDistKount; c++) { oorder.o_id = c; oorder.o_w_id = w; oorder.o_d_id = d; oorder.o_c_id = c_ids[c - 1]; // o_carrier_id is set *only* for orders with ids < 2101 // [4.3.3.1] if (oorder.o_id < FIRST_UNPROCESSED_O_ID) { oorder.o_carrier_id = TPCCUtil.randomNumber(1, 10, gen); } else { oorder.o_carrier_id = null; } oorder.o_ol_cnt = TPCCUtil.randomNumber(5, 15, gen); oorder.o_all_local = 1; oorder.o_entry_d = System.currentTimeMillis(); k++; if (outputFiles == false) { myJdbcIO.insertOrder(ordrPrepStmt, oorder); } else { String str = ""; str = str + oorder.o_id + ","; str = str + oorder.o_w_id + ","; str = str + oorder.o_d_id + ","; str = str + oorder.o_c_id + ","; str = str + oorder.o_carrier_id + ","; str = str + oorder.o_ol_cnt + ","; str = str + oorder.o_all_local + ","; Timestamp entry_d = new java.sql.Timestamp(oorder.o_entry_d); str = str + entry_d; out.println(str); } // 900 rows in the NEW-ORDER table corresponding to the // last // 900 rows in the ORDER table for that district (i.e., // with // NO_O_ID between 2,101 and 3,000) if (c >= FIRST_UNPROCESSED_O_ID) { new_order.no_w_id = w; new_order.no_d_id = d; new_order.no_o_id = c; k++; if (outputFiles == false) { myJdbcIO.insertNewOrder(nworPrepStmt, new_order); } else { String str = ""; str = str + new_order.no_w_id + ","; str = str + new_order.no_d_id + ","; str = str + new_order.no_o_id; outNewOrder.println(str); } } // end new order for (int l = 1; l <= oorder.o_ol_cnt; l++) { order_line.ol_w_id = w; order_line.ol_d_id = d; order_line.ol_o_id = c; order_line.ol_number = l; // ol_number order_line.ol_i_id = TPCCUtil.randomNumber(1, 100000, gen); if (order_line.ol_o_id < FIRST_UNPROCESSED_O_ID) { order_line.ol_delivery_d = oorder.o_entry_d; order_line.ol_amount = 0; } else { order_line.ol_delivery_d = null; // random within [0.01 .. 9,999.99] order_line.ol_amount = (float) (TPCCUtil.randomNumber(1, 999999, gen) / 100.0); } order_line.ol_supply_w_id = order_line.ol_w_id; order_line.ol_quantity = 5; order_line.ol_dist_info = TPCCUtil.randomStr(24); k++; if (outputFiles == false) { myJdbcIO.insertOrderLine(orlnPrepStmt, order_line); } else { String str = ""; str = str + order_line.ol_w_id + ","; str = str + order_line.ol_d_id + ","; str = str + order_line.ol_o_id + ","; str = str + order_line.ol_number + ","; str = str + order_line.ol_i_id + ","; Timestamp delivery_d = new Timestamp(order_line.ol_delivery_d); str = str + delivery_d + ","; str = str + order_line.ol_amount + ","; str = str + order_line.ol_supply_w_id + ","; str = str + order_line.ol_quantity + ","; str = str + order_line.ol_dist_info; outLine.println(str); } if ((k % configCommitCount) == 0) { long tmpTime = new java.util.Date().getTime(); String etStr = " Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000) + " "; LOG.debug(etStr.substring(0, 30) + " Writing record " + k + " of " + t); lastTimeMS = tmpTime; if (outputFiles == false) { ordrPrepStmt.executeBatch(); nworPrepStmt.executeBatch(); orlnPrepStmt.executeBatch(); ordrPrepStmt.clearBatch(); nworPrepStmt.clearBatch(); orlnPrepStmt.clearBatch(); transCommit(); } } } // end for [l] } // end for [c] } // end for [d] } // end for [w] LOG.debug(" Writing final records " + k + " of " + t); if (outputFiles == false) { ordrPrepStmt.executeBatch(); nworPrepStmt.executeBatch(); orlnPrepStmt.executeBatch(); } else { outLine.close(); outNewOrder.close(); } transCommit(); now = new java.util.Date(); LOG.debug("End Orders Load @ " + now); } catch (Exception e) { e.printStackTrace(); transRollback(); if (outputFiles == true) { outLine.close(); outNewOrder.close(); } } return (k); }
From source file:org.wso2.carbon.apimgt.impl.dao.ApiMgtDAO.java
/** * Updateexternal APIStores details to which APIs published * * @param apiIdentifier API Identifier/* ww w.ja v a 2 s . c o m*/ * @throws APIManagementException if failed to add Application */ public void updateExternalAPIStoresDetails(APIIdentifier apiIdentifier, Set<APIStore> apiStoreSet, Connection conn) throws APIManagementException, SQLException { PreparedStatement ps = null; try { conn.setAutoCommit(false); //This query to add external APIStores to database table String sqlQuery = SQLConstants.UPDATE_EXTERNAL_API_STORE_SQL; ps = conn.prepareStatement(sqlQuery); //Get API Id int apiId; apiId = getAPIID(apiIdentifier, conn); if (apiId == -1) { String msg = "Could not load API record for: " + apiIdentifier.getApiName(); log.error(msg); } for (Object storeObject : apiStoreSet) { APIStore store = (APIStore) storeObject; ps.setString(1, store.getEndpoint()); ps.setString(2, store.getType()); ps.setInt(3, apiId); ps.setString(4, store.getName()); ps.addBatch(); } ps.executeBatch(); ps.clearBatch(); conn.commit(); } catch (SQLException e) { log.error("Error while updating External APIStore details to the database for API : ", e); } finally { APIMgtDBUtil.closeAllConnections(ps, null, null); } }
From source file:org.wso2.carbon.apimgt.impl.dao.ApiMgtDAO.java
/** * Adds URI templates define for an API/*from www . java 2s . c om*/ * * @param apiId * @param api * @param connection * @throws APIManagementException */ public void addURLTemplates(int apiId, API api, Connection connection) throws APIManagementException { if (apiId == -1) { //application addition has failed return; } PreparedStatement prepStmt = null; PreparedStatement scopePrepStmt = null; String query = SQLConstants.ADD_URL_MAPPING_SQL; String scopeQuery = SQLConstants.ADD_OAUTH2_RESOURCE_SCOPE_SQL; try { //connection = APIMgtDBUtil.getConnection(); prepStmt = connection.prepareStatement(query); scopePrepStmt = connection.prepareStatement(scopeQuery); Iterator<URITemplate> uriTemplateIterator = api.getUriTemplates().iterator(); URITemplate uriTemplate; for (; uriTemplateIterator.hasNext();) { uriTemplate = uriTemplateIterator.next(); prepStmt.setInt(1, apiId); prepStmt.setString(2, uriTemplate.getHTTPVerb()); prepStmt.setString(3, uriTemplate.getAuthType()); prepStmt.setString(4, uriTemplate.getUriTemplate()); //If API policy is available then set it for all the resources if (StringUtils.isEmpty(api.getApiLevelPolicy())) { prepStmt.setString(5, uriTemplate.getThrottlingTier()); } else { prepStmt.setString(5, api.getApiLevelPolicy()); } InputStream is; if (uriTemplate.getMediationScript() != null) { is = new ByteArrayInputStream( uriTemplate.getMediationScript().getBytes(Charset.defaultCharset())); } else { is = null; } if (connection.getMetaData().getDriverName().contains("PostgreSQL") || connection.getMetaData().getDatabaseProductName().contains("DB2")) { if (uriTemplate.getMediationScript() != null) { prepStmt.setBinaryStream(6, is, uriTemplate.getMediationScript().getBytes(Charset.defaultCharset()).length); } else { prepStmt.setBinaryStream(6, is, 0); } } else { prepStmt.setBinaryStream(6, is); } prepStmt.addBatch(); if (uriTemplate.getScope() != null) { scopePrepStmt.setString(1, APIUtil.getResourceKey(api, uriTemplate)); if (uriTemplate.getScope().getId() == 0) { String scopeKey = uriTemplate.getScope().getKey(); Scope scopeByKey = APIUtil.findScopeByKey(api.getScopes(), scopeKey); if (scopeByKey != null) { if (scopeByKey.getId() > 0) { uriTemplate.getScopes().setId(scopeByKey.getId()); } } } scopePrepStmt.setInt(2, uriTemplate.getScope().getId()); scopePrepStmt.addBatch(); } } prepStmt.executeBatch(); prepStmt.clearBatch(); scopePrepStmt.executeBatch(); scopePrepStmt.clearBatch(); } catch (SQLException e) { handleException("Error while adding URL template(s) to the database for API : " + api.getId(), e); } finally { APIMgtDBUtil.closeAllConnections(prepStmt, null, null); APIMgtDBUtil.closeAllConnections(scopePrepStmt, null, null); } }