List of usage examples for java.sql PreparedStatement addBatch
void addBatch() throws SQLException;
PreparedStatement
object's batch of commands. From source file:QueryRunner.java
/** * Execute a batch of SQL INSERT, UPDATE, or DELETE queries. * //from w w w . j av a2 s . c o m * @param conn The Connection to use to run the query. The caller is * responsible for closing this Connection. * @param sql The SQL to execute. * @param params An array of query replacement parameters. Each row in * this array is one set of batch replacement values. * @return The number of rows updated per statement. * @throws SQLException if a database access error occurs * @since DbUtils 1.1 */ public int[] batch(Connection conn, String sql, Object[][] params) throws SQLException { PreparedStatement stmt = null; int[] rows = null; try { stmt = this.prepareStatement(conn, sql); for (int i = 0; i < params.length; i++) { this.fillStatement(stmt, params[i]); stmt.addBatch(); } rows = stmt.executeBatch(); } catch (SQLException e) { this.rethrow(e, sql, params); } finally { stmt.close(); } return rows; }
From source file:org.wso2.carbon.analytics.datasource.rdbms.RDBMSAnalyticsFileSystem.java
private void writeChunksMerge(String path, List<DataChunk> chunks, String query) throws IOException { Connection conn = null;//from www . jav a 2 s . c om PreparedStatement stmt = null; try { conn = this.getConnection(false); stmt = conn.prepareStatement(query); for (DataChunk chunk : chunks) { this.populateStatementWithDataChunk(stmt, path, chunk); stmt.addBatch(); } stmt.executeBatch(); conn.commit(); } catch (SQLException e) { RDBMSUtils.rollbackConnection(conn); throw new IOException("Error in fs write chunk merge: " + e.getMessage(), e); } finally { RDBMSUtils.cleanupConnection(null, stmt, conn); } }
From source file:HSqlManager.java
private static void checkPhage(Connection connection) throws SQLException, IOException { List<String[]> all = INSTANCE.readFileAllStrains(INSTANCE.path); List<String> clusters = all.stream().map(x -> x[0]).collect(Collectors.toList()); Set<String> phages = all.stream().map(x -> x[1]).collect(Collectors.toSet()); List<String> strains = all.stream().map(x -> x[2]).collect(Collectors.toList()); List<String> phageslist = all.stream().map(x -> x[1]).collect(Collectors.toList()); Set<String> dbphages = new HashSet<>(); Statement st = connection.createStatement(); PreparedStatement insertPhages = connection .prepareStatement("INSERT INTO Primerdb.Phages(Name, Cluster, Strain)" + " values(?,?,?);"); String sql = "SELECT * FROM Primerdb.Phages;"; ResultSet rs = st.executeQuery(sql); while (rs.next()) { dbphages.add(rs.getString("Name")); }// w ww .j av a 2 s . c o m phages.removeAll(dbphages); List<String[]> phageinfo = new ArrayList<>(); if (phages.size() > 0) { System.out.println("Phages Added:"); phages.forEach(x -> { String[] ar = new String[3]; System.out.println(x); String cluster = clusters.get(phageslist.indexOf(x)); String strain = strains.get(phageslist.indexOf(x)); try { insertPhages.setString(1, x); insertPhages.setString(2, cluster); insertPhages.setString(3, strain); insertPhages.addBatch(); } catch (SQLException e) { e.printStackTrace(); } try { insertPhages.executeBatch(); } catch (SQLException e) { e.printStackTrace(); } ar[0] = x; ar[1] = cluster; ar[2] = strain; phageinfo.add(ar); }); newPhages = phageinfo; } else { System.out.println("No Phages added"); } st.close(); insertPhages.close(); }
From source file:eu.celarcloud.celar_ms.ServerPack.Database.MySQL.DBHandlerWithConnPool.java
public void insertBatchMetricValues(ArrayList<MetricObj> metriclist) { PreparedStatement stmt = null; Connection c = null;//from w ww .ja v a2s. c o m try { c = this.getConnection(); stmt = c.prepareStatement(INSERT_METRIC_VALUE); for (MetricObj metric : metriclist) { stmt.setString(1, metric.getMetricID()); stmt.setTimestamp(2, new java.sql.Timestamp(metric.getTimestamp())); stmt.setString(3, metric.getValue()); stmt.addBatch(); } stmt.executeBatch(); } catch (SQLException e) { server.writeToLog(Level.SEVERE, "MySQL Handler insertBatchMetricValues>> " + e); } catch (Exception e) { server.writeToLog(Level.SEVERE, "MySQL Handler insertBatchMetricValues>> " + e); } finally { this.release(stmt, c); } }
From source file:com.nabla.dc.server.xml.settings.XmlCompany.java
public boolean save(final Connection conn, final Map<String, Integer> companyIds, final SaveContext ctx) throws SQLException, DispatchException { Integer companyId = companyIds.get(getName()); if (companyId != null) { if (ctx.getOption() == SqlInsertOptions.APPEND) return true; Database.executeUpdate(conn, "UPDATE company SET active=? WHERE id=?;", active, companyId); Database.executeUpdate(conn, "DELETE FROM financial_year WHERE company_id=?;", companyId); if (accounts != null) { if (log.isDebugEnabled()) log.debug("deleting all accounts of company '" + getName() + "'"); accounts.clear(conn, companyId); }// w w w . j a v a2s . com if (asset_categories != null) asset_categories.clear(conn, companyId); if (users != null) users.clear(conn, companyId); } else { companyId = Database.addRecord(conn, "INSERT INTO company (name,uname,active) VALUES(?,?,?);", getName(), getName().toUpperCase(), active); if (companyId == null) throw new InternalErrorException(Util.formatInternalErrorDescription("failed to insert company")); companyIds.put(getName(), companyId); } final Integer financialYearId = Database.addRecord(conn, "INSERT INTO financial_year (company_id, name) VALUES(?,?);", companyId, financial_year); final PreparedStatement stmt = conn .prepareStatement("INSERT INTO period_end (financial_year_id,name,end_date) VALUES(?,?,?);"); try { stmt.setInt(1, financialYearId); final Calendar dt = new GregorianCalendar(); dt.setTime(start_date); final SimpleDateFormat financialYearFormat = new SimpleDateFormat("MMM yyyy"); for (int m = 0; m < 12; ++m) { dt.set(GregorianCalendar.DAY_OF_MONTH, dt.getActualMaximum(GregorianCalendar.DAY_OF_MONTH)); final Date end = new Date(dt.getTime().getTime()); stmt.setString(2, financialYearFormat.format(end)); stmt.setDate(3, end); stmt.addBatch(); dt.add(GregorianCalendar.MONTH, 1); } if (!Database.isBatchCompleted(stmt.executeBatch())) throw new InternalErrorException(Util .formatInternalErrorDescription("fail to insert periods for company '" + getName() + "'")); } finally { stmt.close(); } if (accounts != null) accounts.save(conn, companyId); return (asset_categories == null || asset_categories.save(conn, companyId, ctx)) && (users == null || users.save(conn, companyId, ctx)); }
From source file:fitmon.WorkoutData.java
public void addData(String workout, String intensity, int minutes, double calories, String date, int userId) throws IOException, NoSuchAlgorithmException, InvalidKeyException, JSONException, SQLException, ClassNotFoundException {/*from ww w . j a v a2 s . co m*/ //ArrayList arr = new ArrayList(al); PreparedStatement st = null; Connection conn = null; try { Class.forName("com.mysql.jdbc.Driver"); conn = (Connection) DriverManager.getConnection("jdbc:mysql://localhost:3306/fitmon", "root", "april-23"); String query = "INSERT into workout (type,calories,date,intensity,duration,userId) values (?,?,?,?,?,?);"; st = conn.prepareStatement(query); conn.setAutoCommit(false); //st.setInt(1,7); st.setString(1, workout); st.setDouble(2, calories); st.setString(3, date); st.setString(4, intensity); st.setInt(5, minutes); st.setInt(6, userId); st.addBatch(); st.executeBatch(); conn.commit(); System.out.println("Record is inserted into workout table!"); st.close(); conn.close(); } catch (SQLException e) { System.out.println(e.getMessage()); conn.rollback(); } finally { if (st != null) { st.close(); } if (conn != null) { conn.close(); } } }
From source file:org.wso2.carbon.device.mgt.core.archival.dao.impl.ArchivalDAOImpl.java
@Override public void copyOperationIDsForArchival(List<Integer> operationIds) throws ArchivalDAOException { PreparedStatement stmt = null; try {// w w w . j av a2 s. co m Connection conn = ArchivalSourceDAOFactory.getConnection(); String sql = "INSERT INTO DM_ARCHIVED_OPERATIONS(ID,CREATED_TIMESTAMP) VALUES (?,NOW())"; stmt = conn.prepareStatement(sql); int count = 0; for (int i = 0; i < operationIds.size(); i++) { stmt.setInt(1, operationIds.get(i)); stmt.addBatch(); if (++count % this.batchSize == 0) { stmt.executeBatch(); } } stmt.executeBatch(); if (log.isDebugEnabled()) { log.debug(count + " Records copied to the temporary table."); } } catch (SQLException e) { throw new ArchivalDAOException("Error while copying operation Ids for archival", e); } finally { ArchivalDAOUtil.cleanupResources(stmt); } }
From source file:org.accada.epcis.repository.capture.CaptureOperationsBackendSQL.java
/** * {@inheritDoc}//from ww w . java2 s . co m */ public void insertEpcsForEvent(final CaptureOperationsSession session, final long eventId, final String eventType, final List<String> epcs) throws SQLException { // preparing statement for insertion of associated EPCs String insert = "INSERT INTO event_" + eventType + "_EPCs (event_id, epc) VALUES (?, ?)"; PreparedStatement ps = session.getBatchInsert(insert); LOG.debug("INSERT: " + insert); // insert all EPCs in the EPCs array for (String epc : epcs) { if (LOG.isDebugEnabled()) { LOG.debug(" insert param 1: " + eventId); LOG.debug(" insert param 2: " + epc.toString()); } ps.setLong(1, eventId); ps.setString(2, epc.toString()); ps.addBatch(); } }
From source file:com.gs.obevo.db.impl.core.jdbc.JdbcHelper.java
public int[] batchUpdate(Connection conn, String sql, Object[][] argsArray) { PreparedStatement ps = null; try {// ww w . j a v a 2s .c o m this.jdbcHandler.preUpdate(conn, this); if (LOG.isDebugEnabled()) { LOG.debug("Executing batch update on connection {}: {} with args: {}", displayConnection(conn), sql, argsArray); } ps = conn.prepareStatement(sql); for (Object[] args : argsArray) { for (int j = 0; j < args.length; j++) { if (!parameterTypeEnabled || args[j] != null) { ps.setObject(j + 1, args[j]); } else { ps.setNull(j + 1, ps.getParameterMetaData().getParameterType(j + 1)); } } ps.addBatch(); } return ps.executeBatch(); } catch (SQLException e) { LOG.error("Error during batch execution; will print out the full batch stack trace: "); this.logSqlBatchException(e, 0); throw new DataAccessException(e); } finally { DbUtils.closeQuietly(ps); } }
From source file:org.wso2.carbon.identity.openidconnect.dao.RequestObjectDAOImpl.java
private void insertRequestObjectClaimValues(Map<Integer, List<String>> claimValues, Connection connection) throws IdentityOAuth2Exception { String sqlStmt = SQLQueries.STORE_IDN_OIDC_REQ_OBJECT_CLAIM_VALUES; PreparedStatement prepStmt = null; try {//from ww w . j a v a 2 s . c o m prepStmt = connection.prepareStatement(sqlStmt); for (Map.Entry<Integer, List<String>> entry : claimValues.entrySet()) { List<String> claimValuesList = entry.getValue(); if (CollectionUtils.isNotEmpty(claimValuesList)) { for (String value : claimValuesList) { prepStmt.setInt(1, entry.getKey()); prepStmt.setString(2, value); prepStmt.addBatch(); if (log.isDebugEnabled()) { log.debug("Claim value :" + value + " is added to the batch."); } } } } prepStmt.executeBatch(); connection.commit(); } catch (SQLException e) { String errorMessage = "Error when storing the request object claim values."; log.error(errorMessage, e); throw new IdentityOAuth2Exception(errorMessage, e); } finally { IdentityApplicationManagementUtil.closeStatement(prepStmt); } }