List of usage examples for java.sql PreparedStatement executeBatch
int[] executeBatch() throws SQLException;
From source file:org.apache.sqoop.mapreduce.SQLServerExportDBExecThread.java
/** * Execute the provided PreparedStatement, by default this assume batch * execute, but this can be overridden by subclasses for a different mode * of execution which should match getPreparedStatement implementation *//* w w w .j av a2 s . c o m*/ @Override protected void executeStatement(PreparedStatement stmt, List<SqoopRecord> records) throws SQLException { // On failures in commit step, we cannot guarantee that transactions have // been successfully committed to the database. // This can result in access violation issues for columns with unique // constraints. // One way to handle this is check whether records are committed in the // database before propagating the failure for retry. However in case we // have connection loss, we wont be able to access the database. // An alternative option is to ignore violation issues in the next retry // in case the records have been already committed Connection conn = getConnection(); try { stmt.executeBatch(); } catch (SQLException execSqlEx) { LOG.warn("Error executing statement: " + execSqlEx); if (failedCommit && canIgnoreForFailedCommit(execSqlEx.getSQLState())) { LOG.info("Ignoring error after failed commit"); } else { throw execSqlEx; } } // If the batch of records is executed successfully, then commit before // processing the next batch of records try { conn.commit(); failedCommit = false; } catch (SQLException commitSqlEx) { LOG.warn("Error while committing transactions: " + commitSqlEx); failedCommit = true; throw commitSqlEx; } }
From source file:org.apache.hive.hplsql.Copy.java
/** * Copy the query results to another table * @throws Exception // w w w . ja v a 2 s . co m */ void copyToTable(HplsqlParser.Copy_stmtContext ctx, Query query) throws Exception { ResultSet rs = query.getResultSet(); if (rs == null) { return; } ResultSetMetaData rm = rs.getMetaData(); int cols = rm.getColumnCount(); int rows = 0; if (trace) { trace(ctx, "SELECT executed: " + cols + " columns"); } Connection conn = exec.getConnection(targetConn); StringBuilder sql = new StringBuilder(); sql.append("INSERT INTO " + sqlInsertName + " VALUES ("); for (int i = 0; i < cols; i++) { sql.append("?"); if (i + 1 < cols) { sql.append(","); } } sql.append(")"); PreparedStatement ps = conn.prepareStatement(sql.toString()); long start = timer.start(); long prev = start; boolean batchOpen = false; while (rs.next()) { for (int i = 1; i <= cols; i++) { ps.setObject(i, rs.getObject(i)); } rows++; if (batchSize > 1) { ps.addBatch(); batchOpen = true; if (rows % batchSize == 0) { ps.executeBatch(); batchOpen = false; } } else { ps.executeUpdate(); } if (trace && rows % 100 == 0) { long cur = timer.current(); if (cur - prev > 10000) { trace(ctx, "Copying rows: " + rows + " (" + rows / ((cur - start) / 1000) + " rows/sec)"); prev = cur; } } } if (batchOpen) { ps.executeBatch(); } ps.close(); exec.returnConnection(targetConn, conn); exec.setRowCount(rows); long elapsed = timer.stop(); if (info) { info(ctx, "COPY completed: " + rows + " row(s), " + timer.format() + ", " + rows / (elapsed / 1000) + " rows/sec"); } }
From source file:org.wso2.carbon.device.mgt.core.device.details.mgt.dao.impl.DeviceDetailsDAOImpl.java
@Override public void addDeviceProperties(Map<String, String> propertyMap, int deviceId, int enrolmentId) throws DeviceDetailsMgtDAOException { if (propertyMap.isEmpty()) { if (log.isDebugEnabled()) { log.debug("Property map of device id :" + deviceId + " is empty."); }/*from ww w .ja va 2 s . c o m*/ return; } Connection conn; PreparedStatement stmt = null; try { conn = this.getConnection(); stmt = conn.prepareStatement( "INSERT INTO DM_DEVICE_INFO (DEVICE_ID, KEY_FIELD, VALUE_FIELD, ENROLMENT_ID) " + "VALUES (?, ?, ?, ?)"); for (Map.Entry<String, String> entry : propertyMap.entrySet()) { stmt.setInt(1, deviceId); stmt.setString(2, entry.getKey()); stmt.setString(3, entry.getValue()); stmt.setInt(4, enrolmentId); stmt.addBatch(); } stmt.executeBatch(); } catch (SQLException e) { throw new DeviceDetailsMgtDAOException("Error occurred while inserting device properties to database.", e); } finally { DeviceManagementDAOUtil.cleanupResources(stmt, null); } }
From source file:fitmon.WorkoutData.java
public void addData(String workout, String intensity, int minutes, double calories, String date, int userId) throws IOException, NoSuchAlgorithmException, InvalidKeyException, JSONException, SQLException, ClassNotFoundException {// w w w . j av a2s .c o m //ArrayList arr = new ArrayList(al); PreparedStatement st = null; Connection conn = null; try { Class.forName("com.mysql.jdbc.Driver"); conn = (Connection) DriverManager.getConnection("jdbc:mysql://localhost:3306/fitmon", "root", "april-23"); String query = "INSERT into workout (type,calories,date,intensity,duration,userId) values (?,?,?,?,?,?);"; st = conn.prepareStatement(query); conn.setAutoCommit(false); //st.setInt(1,7); st.setString(1, workout); st.setDouble(2, calories); st.setString(3, date); st.setString(4, intensity); st.setInt(5, minutes); st.setInt(6, userId); st.addBatch(); st.executeBatch(); conn.commit(); System.out.println("Record is inserted into workout table!"); st.close(); conn.close(); } catch (SQLException e) { System.out.println(e.getMessage()); conn.rollback(); } finally { if (st != null) { st.close(); } if (conn != null) { conn.close(); } } }
From source file:com.nabla.wapp.server.auth.UserManager.java
public boolean updateRoleDefinition(final Integer roleId, final SelectionDelta delta) throws SQLException { Assert.argumentNotNull(roleId);//w w w . ja v a 2 s .c om Assert.argumentNotNull(delta); final LockTableGuard lock = new LockTableGuard(conn, LOCK_USER_TABLES); try { final ConnectionTransactionGuard guard = new ConnectionTransactionGuard(conn); try { if (delta.isRemovals()) Database.executeUpdate(conn, "DELETE FROM role_definition WHERE role_id=? AND child_role_id IN (?);", roleId, delta.getRemovals()); if (delta.isAdditions()) { final PreparedStatement stmt = conn .prepareStatement("INSERT INTO role_definition (role_id, child_role_id) VALUES(?,?);"); try { stmt.clearBatch(); stmt.setInt(1, roleId); for (final Integer childId : delta.getAdditions()) { stmt.setInt(2, childId); stmt.addBatch(); } if (!Database.isBatchCompleted(stmt.executeBatch())) return false; } finally { stmt.close(); } } return guard.setSuccess(updateUserRoleTable()); } finally { guard.close(); } } finally { lock.close(); } }
From source file:org.wso2.carbon.analytics.datasource.rdbms.RDBMSAnalyticsFileSystem.java
private void writeChunksMerge(String path, List<DataChunk> chunks, String query) throws IOException { Connection conn = null;//from ww w . ja va2 s . co m PreparedStatement stmt = null; try { conn = this.getConnection(false); stmt = conn.prepareStatement(query); for (DataChunk chunk : chunks) { this.populateStatementWithDataChunk(stmt, path, chunk); stmt.addBatch(); } stmt.executeBatch(); conn.commit(); } catch (SQLException e) { RDBMSUtils.rollbackConnection(conn); throw new IOException("Error in fs write chunk merge: " + e.getMessage(), e); } finally { RDBMSUtils.cleanupConnection(null, stmt, conn); } }
From source file:QueryRunner.java
/** * Execute a batch of SQL INSERT, UPDATE, or DELETE queries. * /* w ww . j a va 2s .c o m*/ * @param conn The Connection to use to run the query. The caller is * responsible for closing this Connection. * @param sql The SQL to execute. * @param params An array of query replacement parameters. Each row in * this array is one set of batch replacement values. * @return The number of rows updated per statement. * @throws SQLException if a database access error occurs * @since DbUtils 1.1 */ public int[] batch(Connection conn, String sql, Object[][] params) throws SQLException { PreparedStatement stmt = null; int[] rows = null; try { stmt = this.prepareStatement(conn, sql); for (int i = 0; i < params.length; i++) { this.fillStatement(stmt, params[i]); stmt.addBatch(); } rows = stmt.executeBatch(); } catch (SQLException e) { this.rethrow(e, sql, params); } finally { stmt.close(); } return rows; }
From source file:netflow.DatabaseProxy.java
private void updateAggregationResults(List<AggregationRecord> records) throws SQLException { if (records.isEmpty()) { log.debug("Nothing to update"); return;//w w w . j a v a 2 s .c o m } log.debug("updateAggregationResults(): <<<<"); log.debug(records.size() + " to update"); PreparedStatement pstmt = con.prepareStatement(getQuery("aggregation.summary.update")); for (AggregationRecord record : records) { pstmt.setLong(1, record.getInput()); pstmt.setLong(2, record.getOutput()); pstmt.setInt(3, record.getClientId()); pstmt.setDate(4, record.getDate()); pstmt.addBatch(); } final int[] ints = pstmt.executeBatch(); log.debug(ints.length + " records updated"); log.debug("updateAggregationResults(): >>>>"); }
From source file:netflow.DatabaseProxy.java
private void addAggregationResults(List<AggregationRecord> records) throws SQLException { if (records.isEmpty()) { log.debug("Nothing to insert"); return;/* w ww.j ava2 s . co m*/ } log.debug("insertAggregationResults(): <<<<"); log.debug(records.size() + " to insert"); PreparedStatement pstmt = con.prepareStatement(getQuery("aggregation.summary.insert")); for (AggregationRecord record : records) { pstmt.setLong(1, record.getInput()); pstmt.setLong(2, record.getOutput()); pstmt.setInt(3, record.getClientId()); pstmt.setDate(4, record.getDate()); pstmt.addBatch(); } final int[] ints = pstmt.executeBatch(); log.debug(ints.length + " records inserted"); log.debug("insertAggregationResults(): >>>>"); }
From source file:eu.celarcloud.celar_ms.ServerPack.Database.MySQL.DBHandlerWithConnPool.java
public void insertBatchMetricValues(ArrayList<MetricObj> metriclist) { PreparedStatement stmt = null; Connection c = null;/*from ww w .j av a 2s .c om*/ try { c = this.getConnection(); stmt = c.prepareStatement(INSERT_METRIC_VALUE); for (MetricObj metric : metriclist) { stmt.setString(1, metric.getMetricID()); stmt.setTimestamp(2, new java.sql.Timestamp(metric.getTimestamp())); stmt.setString(3, metric.getValue()); stmt.addBatch(); } stmt.executeBatch(); } catch (SQLException e) { server.writeToLog(Level.SEVERE, "MySQL Handler insertBatchMetricValues>> " + e); } catch (Exception e) { server.writeToLog(Level.SEVERE, "MySQL Handler insertBatchMetricValues>> " + e); } finally { this.release(stmt, c); } }