List of usage examples for java.sql PreparedStatement addBatch
void addBatch() throws SQLException;
PreparedStatement
object's batch of commands. From source file:com.mtgi.analytics.sql.BehaviorTrackingDataSourceTest.java
@Test public void testPreparedStatement() throws Exception { //test tracking through the prepared statement API, which should also //log parameters in the events. PreparedStatement stmt = conn.prepareStatement("insert into TEST_TRACKING values (?, ?, ?)"); stmt.setLong(1, 1);/*from w w w .j ava2 s . c o m*/ stmt.setString(2, "hello"); stmt.setObject(3, null, Types.VARCHAR); assertEquals(1, stmt.executeUpdate()); //test support for batching. each batch should log 1 event. stmt.setLong(1, 3); stmt.setString(2, "batch"); stmt.setObject(3, "1", Types.VARCHAR); stmt.addBatch(); stmt.setLong(1, 4); stmt.setString(2, "batch"); stmt.setObject(3, "2", Types.VARCHAR); stmt.addBatch(); stmt.executeBatch(); //back to a regular old update. stmt.setLong(1, 2); stmt.setObject(2, "goodbye", Types.VARCHAR); stmt.setNull(3, Types.VARCHAR); assertEquals(1, stmt.executeUpdate()); stmt = conn.prepareStatement("update TEST_TRACKING set DESCRIPTION = 'world'"); assertEquals(4, stmt.executeUpdate()); stmt = conn.prepareStatement("select ID from TEST_TRACKING order by ID"); ResultSet rs = stmt.executeQuery(); int index = 0; long[] keys = { 1L, 2L, 3L, 4L }; while (rs.next()) assertEquals(keys[index++], rs.getLong(1)); rs.close(); assertEquals(4, index); manager.flush(); assertEventDataMatches("BehaviorTrackingDataSourceTest.testPreparedStatement-result.xml"); }
From source file:com.tacitknowledge.util.migration.jdbc.SqlLoadMigrationTask.java
/** * {@inheritDoc}/*ww w .j a v a 2 s .co m*/ */ public void migrate(MigrationContext ctx) throws MigrationException { DataSourceMigrationContext context = (DataSourceMigrationContext) ctx; Connection conn = null; PreparedStatement stmt = null; try { conn = context.getConnection(); stmt = conn.prepareStatement(getStatmentSql()); List rows = getData(getResourceAsStream()); int rowCount = rows.size(); for (int i = 0; i < rowCount; i++) { String data = (String) rows.get(i); boolean loadRowFlag = insert(data, stmt); if (loadRowFlag) { stmt.addBatch(); if (i % 50 == 0) { stmt.executeBatch(); } } } stmt.executeBatch(); context.commit(); } catch (Exception e) { String message = getName() + ": Error running SQL \"" + getStatmentSql() + "\""; log.error(message, e); if (e instanceof SQLException) { if (((SQLException) e).getNextException() != null) { log.error("Chained SQL Exception", ((SQLException) e).getNextException()); } } context.rollback(); throw new MigrationException(message, e); } finally { SqlUtil.close(conn, stmt, null); } }
From source file:com.dsf.dbxtract.cdc.AppJournalWindowTest.java
/** * Rigourous Test :-)//from ww w. j a va 2 s .com * * @throws Exception * in case of any error */ @Test(dependsOnMethods = "setUp", timeOut = 120000) public void testAppWithJournalWindow() throws Exception { final Config config = new Config(configFile); BasicDataSource ds = new BasicDataSource(); Source source = config.getDataSources().getSources().get(0); ds.setDriverClassName(source.getDriver()); ds.setUsername(source.getUser()); ds.setPassword(source.getPassword()); ds.setUrl(source.getConnection()); // prepara os dados Connection conn = ds.getConnection(); conn.createStatement().execute("truncate table test"); conn.createStatement().execute("truncate table j$test"); // Carrega os dados de origem PreparedStatement ps = conn.prepareStatement("insert into test (key1,key2,data) values (?,?,?)"); for (int i = 0; i < TEST_SIZE; i++) { if ((i % 100) == 0) { ps.executeBatch(); } ps.setInt(1, i); ps.setInt(2, i); ps.setInt(3, (int) Math.random() * 500); ps.addBatch(); } ps.executeBatch(); ps.close(); // Popula as tabelas de journal ps = conn.prepareStatement("insert into j$test (key1,key2) values (?,?)"); for (int i = 0; i < TEST_SIZE; i++) { if ((i % 500) == 0) { ps.executeBatch(); } ps.setInt(1, i); ps.setInt(2, i); ps.addBatch(); } ps.executeBatch(); ps.close(); Long maxWindowId = 0L; ResultSet rs = conn.createStatement().executeQuery("select max(window_id) from j$test"); if (rs.next()) { maxWindowId = rs.getLong(1); System.out.println("maximum window_id loaded: " + maxWindowId); } rs.close(); conn.close(); ds.close(); // Clear any previous test String zkKey = "/dbxtract/cdc/" + source.getName() + "/J$TEST/lastWindowId"; if (client.checkExists().forPath(zkKey) != null) client.delete().forPath(zkKey); // starts monitor Monitor.getInstance(config); // start app app = new App(config); System.out.println(config.toString()); app.start(); Assert.assertEquals(config.getHandlers().iterator().next().getStrategy(), JournalStrategy.WINDOW); while (true) { TimeUnit.MILLISECONDS.sleep(500); try { Long lastWindowId = Long.parseLong(new String(client.getData().forPath(zkKey))); System.out.println("lastWindowId = " + lastWindowId); if (maxWindowId.longValue() == lastWindowId.longValue()) { System.out.println("expected window_id reached"); break; } } catch (NoNodeException nne) { System.out.println("ZooKeeper - no node exception :: " + zkKey); } } }
From source file:org.apache.sqoop.repository.derby.DerbyUpgradeGenericJdbcConnectorConfigAndInputNames.java
private void renameConfigInputs(long configId, Map<String, String> inputNameMap) { PreparedStatement statement = null; try {/*from w w w . ja v a 2s. c o m*/ statement = connection.prepareStatement(DerbySchemaUpgradeQuery.QUERY_UPDATE_TABLE_SQ_INPUT_SQI_NAME); for (String inputName : inputNameMap.keySet()) { statement.setString(1, inputNameMap.get(inputName)); statement.setString(2, inputName); statement.setLong(3, configId); statement.addBatch(); LOG.debug("QUERY(" + DerbySchemaUpgradeQuery.QUERY_UPDATE_TABLE_SQ_INPUT_SQI_NAME + ") args [" + inputNameMap.get(inputName) + "," + inputName + "," + configId + "]"); } int[] updateCounts = statement.executeBatch(); LOG.debug("QUERY(" + DerbySchemaUpgradeQuery.QUERY_UPDATE_TABLE_SQ_INPUT_SQI_NAME + ") update count: " + StringUtils.join(ArrayUtils.toObject(updateCounts), ",")); } catch (SQLException e) { throw new SqoopException(DerbyRepoError.DERBYREPO_0002, e); } finally { handler.closeStatements(statement); } }
From source file:de.is24.infrastructure.gridfs.http.metadata.generation.PrimaryDbGenerator.java
private void writePrimaryFiles(PreparedStatement ps, YumPackage yumPackage, int pkgKey, Matcher<YumPackageFile> matcher) throws SQLException { int counter = 0; for (YumPackageDir dir : yumPackage.getPackageDirs()) { for (YumPackageFile file : select(dir.getFiles(), matcher)) { ps.setString(1, file.getDir() + file.getName()); ps.setString(2, file.getType().toString().toLowerCase()); ps.setInt(3, pkgKey);//from www . j a v a 2s.c o m ps.addBatch(); counter++; } } if (counter > 0) { ps.executeBatch(); } }
From source file:org.wso2.carbon.is.migration.service.v540.migrator.PermissionMigrator.java
private void fillPreparedStatement(List<Permission> permissions, PreparedStatement statement) throws SQLException { for (Permission permission : permissions) { statement.setInt(1, permission.getUniqueId()); statement.setInt(2, permission.getId()); statement.setInt(3, permission.getTenantId()); statement.addBatch(); }/*from w ww .jav a 2 s .c om*/ }
From source file:org.schedoscope.metascope.tasks.repository.mysql.impl.ParameterValueEntityMySQLRepository.java
@Override public void insertOrUpdate(Connection connection, List<ParameterValueEntity> parameterValues) { String insertParameterSql = "insert into parameter_value_entity (" + JDBCUtil.getDatabaseColumnsForClass(ParameterValueEntity.class) + ") values (" + JDBCUtil.getValuesCountForClass(ParameterValueEntity.class) + ") " + "on duplicate key update " + MySQLUtil.getOnDuplicateKeyString(ParameterValueEntity.class); PreparedStatement stmt = null; try {//from w ww. j a va 2s .c om int batch = 0; connection.setAutoCommit(false); stmt = connection.prepareStatement(insertParameterSql); for (ParameterValueEntity parameterValueEntity : parameterValues) { stmt.setString(1, parameterValueEntity.getUrlPath()); stmt.setString(2, parameterValueEntity.getKey()); stmt.setString(3, parameterValueEntity.getValue()); stmt.setString(4, parameterValueEntity.getTableFqdn()); stmt.addBatch(); batch++; if (batch % 1024 == 0) { stmt.executeBatch(); } } stmt.executeBatch(); connection.commit(); connection.setAutoCommit(true); } catch (SQLException e) { LOG.error("Could not save parameter value", e); } finally { DbUtils.closeQuietly(stmt); } }
From source file:org.schedoscope.metascope.tasks.repository.mysql.impl.SuccessorEntityMySQLRepository.java
@Override public void insertOrUpdate(Connection connection, List<SuccessorEntity> successors) { String insertSuccessorSql = "insert into successor_entity (" + JDBCUtil.getDatabaseColumnsForClass(SuccessorEntity.class) + ") values (" + JDBCUtil.getValuesCountForClass(SuccessorEntity.class) + ") " + "on duplicate key update " + MySQLUtil.getOnDuplicateKeyString(SuccessorEntity.class); PreparedStatement stmt = null; try {//from w w w. j a v a 2 s . c o m int batch = 0; connection.setAutoCommit(false); stmt = connection.prepareStatement(insertSuccessorSql); for (SuccessorEntity successorEntity : successors) { stmt.setString(1, successorEntity.getUrlPath()); stmt.setString(2, successorEntity.getSuccessorUrlPath()); stmt.setString(3, successorEntity.getSuccessorFqdn()); stmt.setString(4, successorEntity.getInternalViewId()); stmt.addBatch(); batch++; if (batch % 1024 == 0) { stmt.executeBatch(); } } stmt.executeBatch(); connection.commit(); connection.setAutoCommit(true); } catch (SQLException e) { LOG.error("Could not save successor", e); } finally { DbUtils.closeQuietly(stmt); } }
From source file:com.rajaram.bookmark.dao.BookmarkDaoImpl.java
@Override public void insertBookmarks(String userName, List<Bookmark> bookmarkList) { String sql = "INSERT INTO " + tableName + " (user_name, url, " + "url_name, folder_name) VALUES (?, ?, ?, ?)"; Connection conn = null;/*w w w. ja v a 2s. c o m*/ final int batchSize = 1000; int count = 0; try { conn = dataSource.getConnection(); PreparedStatement prepareStatement = conn.prepareStatement(sql); for (Bookmark bookmark : bookmarkList) { prepareStatement.setString(BookmarkSqlTableConstants.userNameIndex, userName); prepareStatement.setString(BookmarkSqlTableConstants.urlIndex, bookmark.getUrl()); prepareStatement.setString(BookmarkSqlTableConstants.urlNameIndex, bookmark.getName()); prepareStatement.setString(BookmarkSqlTableConstants.folderNameIndex, bookmark.getFolder()); prepareStatement.addBatch(); if (++count % batchSize == 0) { prepareStatement.executeBatch(); } } prepareStatement.executeBatch(); // insert remaining records prepareStatement.close(); } catch (SQLException e) { throw new RuntimeException(e); } finally { if (conn != null) { try { conn.close(); } catch (SQLException e) { } } } }
From source file:org.schedoscope.metascope.tasks.repository.mysql.impl.ViewDependencyEntityMySQLRepository.java
@Override public void insertOrUpdate(Connection connection, List<ViewDependencyEntity> dependencies) { String insertDependencySql = "insert into view_dependency_entity (" + JDBCUtil.getDatabaseColumnsForClass(ViewDependencyEntity.class) + ") values (" + JDBCUtil.getValuesCountForClass(ViewDependencyEntity.class) + ") " + "on duplicate key update " + MySQLUtil.getOnDuplicateKeyString(ViewDependencyEntity.class); PreparedStatement stmt = null; try {//from w ww . j ava2 s. co m int batch = 0; connection.setAutoCommit(false); stmt = connection.prepareStatement(insertDependencySql); for (ViewDependencyEntity viewDependencyEntity : dependencies) { stmt.setString(1, viewDependencyEntity.getUrlPath()); stmt.setString(2, viewDependencyEntity.getDependencyUrlPath()); stmt.setString(3, viewDependencyEntity.getDependencyFqdn()); stmt.setString(4, viewDependencyEntity.getInternalViewId()); stmt.addBatch(); batch++; if (batch % 1024 == 0) { stmt.executeBatch(); } } stmt.executeBatch(); connection.commit(); connection.setAutoCommit(true); } catch (SQLException e) { LOG.error("Could not save view dependency", e); } finally { DbUtils.closeQuietly(stmt); } }