List of usage examples for java.sql Connection createBlob
Blob createBlob() throws SQLException;
Blob
interface. From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
@Deprecated private boolean log(String paxosID, int version, int slot, int ballotnum, int coordinator, PaxosPacketType type, String message) {/*from www. j a v a 2s . co m*/ if (isClosed()) return false; if (!isLoggingEnabled()) return true; boolean logged = false; String cmd = "insert into " + getMTable() + " values (?, ?, ?, ?, ?, ?, ?)"; PreparedStatement localLogMsgStmt = null; Connection conn = null; try { conn = this.getDefaultConn(); localLogMsgStmt = conn.prepareStatement(cmd); // no re-use option localLogMsgStmt.setString(1, paxosID); localLogMsgStmt.setInt(2, version); localLogMsgStmt.setInt(3, slot); localLogMsgStmt.setInt(4, ballotnum); localLogMsgStmt.setInt(5, coordinator); localLogMsgStmt.setInt(6, type.getInt()); if (getLogMessageBlobOption()) { // localLogMsgStmt.setBlob(7, new StringReader(message)); Blob blob = conn.createBlob(); blob.setBytes(1, message.getBytes(CHARSET)); localLogMsgStmt.setBlob(7, blob); } else localLogMsgStmt.setString(7, message); int rowcount = localLogMsgStmt.executeUpdate(); assert (rowcount == 1); logged = true; log.log(Level.FINEST, "{0} inserted {1}, {2}, {3}, {4}, {5}", new Object[] { this, paxosID, slot, ballotnum, coordinator, message }); } catch (SQLException sqle) { if (SQL.DUPLICATE_KEY.contains(sqle.getSQLState())) { log.log(Level.FINE, "{0} log message {1} previously logged", new Object[] { this, message }); logged = true; } else { log.severe("SQLException while logging as " + cmd + " : " + sqle); sqle.printStackTrace(); } } catch (UnsupportedEncodingException e) { e.printStackTrace(); } finally { cleanup(localLogMsgStmt); cleanup(conn); } // no cleanup if statement is re-used return logged; }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
@Override public/* synchronized */boolean pause(String paxosID, String serializedState) { if (isClosed() /* || !isLoggingEnabled() */) return false; boolean paused = false; String insertCmd = "insert into " + (USE_CHECKPOINTS_AS_PAUSE_TABLE ? getCTable() : getPTable()) + " (serialized, has_serialized, logindex, paxos_id) values (?,true,?,?)"; String insertCmdNoLogIndex = "insert into " + (USE_CHECKPOINTS_AS_PAUSE_TABLE ? getCTable() : getPTable()) + " (serialized, has_serialized, paxos_id) values (?,true,?)"; String updateCmd = "update " + (USE_CHECKPOINTS_AS_PAUSE_TABLE ? getCTable() : getPTable()) + " set serialized=?, has_serialized=true, logindex=? where paxos_id=?"; String updateCmdNoLogIndex = "update " + (USE_CHECKPOINTS_AS_PAUSE_TABLE ? getCTable() : getPTable()) + " set serialized=?, has_serialized=true where paxos_id=?"; PreparedStatement pstmt = null; Connection conn = null; synchronized (this.messageLog) { try {//from w ww . j a va 2 s. co m LogIndex logIndex = this.messageLog.getLogIndex(paxosID); boolean pauseLogIndex = (logIndex != null); Blob blob = null; byte[] logIndexBytes = null; conn = this.getDefaultConn(); // try update first; if exception, try insert pstmt = conn.prepareStatement(pauseLogIndex ? updateCmd : updateCmdNoLogIndex); pstmt.setString(1, serializedState); if (pauseLogIndex) { // we pause logIndex as well with older MessageLogPausable logIndexBytes = deflate(this.messageLog.getLogIndex(paxosID).toString().getBytes(CHARSET)); blob = conn.createBlob(); blob.setBytes(1, logIndexBytes); pstmt.setBlob(2, blob); assert (new String(inflate(logIndexBytes), CHARSET) .equals(this.messageLog.getLogIndex(paxosID).toString())); } pstmt.setString(pauseLogIndex ? 3 : 2, paxosID); try { pstmt.executeUpdate(); } catch (SQLException e) { pstmt.close(); // try insert pstmt = conn.prepareStatement(pauseLogIndex ? insertCmd : insertCmdNoLogIndex); pstmt.setString(1, serializedState); if (pauseLogIndex) { blob = conn.createBlob(); blob.setBytes(1, logIndexBytes); pstmt.setBlob(2, blob); } pstmt.setString(pauseLogIndex ? 3 : 2, paxosID); pstmt.executeUpdate(); } log.log(Level.FINE, "{0} paused [{1}] ,[{2}]", new Object[] { this, serializedState, logIndex }); paused = true; } catch (SQLException | IOException e) { log.severe(this + " failed to pause instance " + paxosID); this.deletePaused(paxosID); e.printStackTrace(); } finally { cleanup(pstmt); cleanup(conn); } // needed with older, MessageLogPausable this.messageLog.uncache(paxosID); } return paused; }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
private/* synchronized */Set<String> pauseLogIndex(Map<String, LogIndex> toCommit) { if (isClosed()) return null; if (!USE_CHECKPOINTS_AS_PAUSE_TABLE) return this.pauseLogIndexIndividually(toCommit); String updateCmd = "update " + (USE_CHECKPOINTS_AS_PAUSE_TABLE ? getCTable() : getPTable()) + " set logindex=? where paxos_id=?"; PreparedStatement pstmt = null; Connection conn = null; Set<String> paused = new HashSet<String>(); Set<String> batch = new HashSet<String>(); synchronized (this.messageLog) { try {// w ww . ja va2 s . c om int i = 0; for (String paxosID : toCommit.keySet()) { LogIndex logIndex = toCommit.get(paxosID); if (conn == null) { conn = this.getDefaultConn(); conn.setAutoCommit(false); pstmt = conn.prepareStatement(updateCmd); } byte[] logIndexBytes = logIndex != null ? deflate(logIndex.toString().getBytes(CHARSET)) : null; if (logIndexBytes != null && ENABLE_INSTRUMENTATION && Util.oneIn(Integer.MAX_VALUE)) DelayProfiler.updateMovAvg("logindex_size", logIndexBytes.length); Blob blob = conn.createBlob(); if (logIndexBytes != null) blob.setBytes(1, logIndexBytes); pstmt.setBlob(1, logIndexBytes != null ? blob : null); pstmt.setString(2, paxosID); pstmt.addBatch(); batch.add(paxosID); if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == toCommit.size()) { pstmt.executeBatch(); conn.commit(); pstmt.clearBatch(); paused.addAll(batch); log.log(Level.FINE, "{0} paused logIndex batch {1}", new Object[] { this, Util.truncatedLog(batch, 16) }); batch.clear(); } i++; } } catch (SQLException | IOException sqle) { log.severe(this + " failed to pause logIndex batch"); sqle.printStackTrace(); } finally { cleanup(pstmt); cleanup(conn); } // free up memory for (String paxosID : paused) this.messageLog.uncache(paxosID); } if (paused.size() != toCommit.size()) paused.addAll(this.pauseLogIndexIndividually(diffLI(toCommit, paused))); return paused; }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
/** * Batched version of putCheckpointState. This is a complicated method with * very different behaviors for updates and inserts. If update is true, it * attempts to batch-update all the checkpoints and for those * updates/inserts that failed, it attempts to individually update/insert * them through/*from w ww. ja v a 2 s . c o m*/ * {@link #putCheckpointState(String, int, Set, int, Ballot, String, int)}. * It is still possible that only a subset of the updates succeed, but that * is okay as checkpoint failure is not fatal except in the case of initial * checkpoint insertion. * * If update is false, it means that this is a batch-insertion of initial * checkpoints, and it is critical that this batch operation is atomic. If * the batch operation only partly succeeds, it should throw an exception so * that the caller can not proceed any further with the batch insertion but * it should also rollback the changes. * * The reason batched creation of initial checkpoints should be atomic is * that otherwise, the checkpoints that did get written essentially are * created paxos instances, but there is no easy way for the caller to know * that they got created and this could lead to nasty surprises later. If * the caller always follows up failed batch creations with sequential * creation, then the rollback is not critical as the sequential creation * will simply "recover" from the checkpoint if any left behind during a * previous failed batch creation. If the caller chooses to keep re-issuing * the batch creation and expects to eventually succeed (assuming that the * instances in the batch didn't actually exist a priori), then rolling back * failed batch creations like below will not help in the event of crashes. * So, the caller should really just issue sequential creation requests if a * batch creation fails or times out for any reason. * * Note: this method just needs to be atomic, i.e., all or none, but not * synchronized. Synchronizing it will invert the invariant that messageLog * is always locked before (because of the getMinLogFile invocation) * SQLPaxosLogger. * * @param tasks * @param update */ @Override public boolean putCheckpointState(CheckpointTask[] tasks, boolean update) { if (isClosed() || DISABLE_CHECKPOINTING) return false; boolean batchSuccess = true; boolean[] committed = new boolean[tasks.length]; long t1 = System.currentTimeMillis(); String insertCmd = "insert into " + getCTable() + " (version,members,slot,ballotnum,coordinator,state,create_time, min_logfile, paxos_id) values (?,?,?,?,?,?,?,?,?)"; String updateCmd = "update " + getCTable() + " set version=?,members=?, slot=?, ballotnum=?, coordinator=?, state=?, create_time=?, min_logfile=? where paxos_id=?"; String cmd = update ? updateCmd : insertCmd; PreparedStatement insertCP = null; Connection conn = null; String minLogfile = null; ArrayList<Integer> batch = new ArrayList<Integer>(); try { for (int i = 0; i < tasks.length; i++) { CheckpointTask task = tasks[i]; assert (task != null); assert (update || task.slot == 0); if ((task.slot == 0) == update) { this.putCheckpointState(task.paxosID, task.version, (task.members), task.slot, task.ballot, task.state, task.gcSlot, task.createTime); committed[i] = true; continue; } if (conn == null) { conn = this.getDefaultConn(); conn.setAutoCommit(false); insertCP = conn.prepareStatement(cmd); } insertCP.setInt(1, task.version); insertCP.setString(2, Util.toJSONString(task.members)); insertCP.setInt(3, task.slot); insertCP.setInt(4, task.ballot.ballotNumber); insertCP.setInt(5, task.ballot.coordinatorID); if (getCheckpointBlobOption()) { Blob blob = conn.createBlob(); blob.setBytes(1, task.state.getBytes(CHARSET)); insertCP.setBlob(6, blob); } else insertCP.setString(6, task.state); insertCP.setLong(7, task.createTime); insertCP.setString(8, minLogfile = this.getSetGCAndGetMinLogfile(task.paxosID, task.version, task.slot - task.gcSlot < 0 ? task.slot : task.gcSlot)); insertCP.setString(9, task.paxosID); insertCP.addBatch(); batch.add(i); incrTotalCheckpoints(); if (shouldLogCheckpoint(1)) log.log(Level.INFO, "{0} checkpointed> ({1}:{2}, {3}{4}, {5}, ({6}, {7}) [{8}]) {9}", new Object[] { this, task.paxosID, task.version, (task.members), task.slot, task.ballot, task.gcSlot, minLogfile, Util.truncate(task.state, TRUNCATED_STATE_SIZE, TRUNCATED_STATE_SIZE), (tasks.length > 1 ? "(batched=" + tasks.length + ")" : "") }); if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == tasks.length) { int[] executed = insertCP.executeBatch(); conn.commit(); insertCP.clearBatch(); for (int j = 0; j < executed.length; j++) batchSuccess = batchSuccess && (committed[batch.get(j)] = (executed[j] > 0)); batch.clear(); } } if (ENABLE_INSTRUMENTATION && Util.oneIn(10)) DelayProfiler.updateDelay("checkpoint", t1, tasks.length); } catch (SQLException | UnsupportedEncodingException sqle) { log.log(Level.SEVERE, "{0} SQLException while batched checkpointing", new Object[] { this }); sqle.printStackTrace(); } finally { cleanup(insertCP); cleanup(conn); } if (!batchSuccess) { if (update) { for (int i = 0; i < tasks.length; i++) if (!committed[i]) this.putCheckpointState(tasks[i].paxosID, tasks[i].version, tasks[i].members, tasks[i].slot, tasks[i].ballot, tasks[i].state, tasks[i].gcSlot); } else { // rollback for (int i = 0; i < tasks.length; i++) if (committed[i]) this.deleteCheckpoint(tasks[i].paxosID, tasks[i].version, tasks[i].members, tasks[i].slot, tasks[i].ballot, tasks[i].state, tasks[i].gcSlot); throw new PaxosInstanceCreationException( "Rolled back failed batch-creation of " + tasks.length + " paxos instances"); } } for (CheckpointTask task : tasks) this.deleteOutdatedMessages(task.paxosID, task.version, task.ballot, task.slot, task.ballot.ballotNumber, task.ballot.coordinatorID, task.gcSlot); return true; }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
private synchronized boolean logBatchDB(PendingLogTask[] packets) { if (isClosed()) return false; if (!isLoggingEnabled() /* && !ENABLE_JOURNALING */) return true; boolean logged = true; PreparedStatement pstmt = null; Connection conn = null; String cmd = "insert into " + getMTable() + " values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; long t0 = System.nanoTime(), t0Millis = System.currentTimeMillis(), t1 = t0; int i = 0;// w w w .j av a 2s . co m try { for (i = 0; i < packets.length; i++) { if (conn == null) { conn = this.getDefaultConn(); conn.setAutoCommit(false); pstmt = conn.prepareStatement(cmd); } PaxosPacket packet = packets[i].lmTask.logMsg; // accept and decision use a faster implementation int[] sb = AbstractPaxosLogger.getSlotBallot(packet); pstmt.setString(1, packet.getPaxosID()); pstmt.setInt(2, packet.getVersion()); pstmt.setInt(3, sb[0]); pstmt.setInt(4, sb[1]); pstmt.setInt(5, sb[2]); pstmt.setInt(6, packet.getType().getInt()); pstmt.setString(7, packets[i].logfile); pstmt.setLong(8, packets[i].logfileOffset); byte[] msgBytes = isJournalingEnabled() ? new byte[0] : deflate(toBytes(packet)); if (getLogMessageBlobOption()) { pstmt.setInt(9, packets[i].length);// msgBytes.length); Blob blob = conn.createBlob(); blob.setBytes(1, msgBytes); pstmt.setBlob(10, blob); } else { String packetString = packet.toString(); pstmt.setInt(9, packetString.length()); pstmt.setString(10, packetString); } pstmt.addBatch(); if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == packets.length) { int[] executed = pstmt.executeBatch(); conn.commit(); pstmt.clearBatch(); for (int j : executed) logged = logged && (j > 0); if (logged) log.log(Level.FINE, "{0}{1}{2}{3}{4}{5}", new Object[] { this, " successfully logged the " + "last ", (i + 1) % MAX_DB_BATCH_SIZE == 0 ? MAX_DB_BATCH_SIZE : (i + 1) % MAX_DB_BATCH_SIZE, " messages in ", (System.nanoTime() - t1) / 1000, " us" }); t1 = System.nanoTime(); } } } catch (Exception sqle) { /* If any exception happens, we must return false to preserve * safety. We return true only if every message is logged * successfully. */ sqle.printStackTrace(); log.severe(this + " incurred " + sqle + " while logging batch of size:" + packets.length + "; packet_length = " + packets[i].toString().length()); assert (packets[i].toString().length() < MAX_LOG_MESSAGE_SIZE); logged = false; } finally { cleanup(pstmt); cleanup(conn); } if (ENABLE_JOURNALING) DelayProfiler.updateDelayNano("index", t0, packets.length); else DelayProfiler.updateDelay("logBatchDB", t0Millis); // DelayProfiler.updateCount("#logged", packets.length); DelayProfiler.updateMovAvg("#potential_batched", packets.length); return logged; }