List of usage examples for java.sql Statement execute
boolean execute(String sql) throws SQLException;
From source file:migration.ProjektMigration.java
/** * Creates the nutzung./*from www. j ava 2s.c om*/ */ public void createNutzung() { String load_sql; Statement load_stmt; ResultSet load_rs; String store_sql; PreparedStatement store_prepstmt; final ResultSet store_rs; try { load_sql = "SELECT Zugriffe, Zeitraum, Nutzungsjahr, Rechnungsbetrag, Titelnummer FROM Nutzungstabelle"; load_stmt = this.leg_con.createStatement(); store_sql = "insert into Nutzung (journal_id, nutzungsjahr, rechnungsbetrag, zeitraum, zugriffe) values (?, ?, ?, ?, ?)"; store_prepstmt = this.tgt_con.prepareStatement(store_sql); // evtl. // brauchen // wir // was // in // Richtung: // Statement.RETURN_GENERATED_KEYS // logger.info("Lese von Nutzungstabelle"); load_stmt.execute(load_sql); load_rs = load_stmt.getResultSet(); // logger.info("Schreibe nach Nutzung"); while (load_rs.next()) { final int titelnummer = load_rs.getInt("Titelnummer"); final int journalID = this.help.getIdFromIntArray(this.help.getJournals(), titelnummer); // System.out.println("Titelnummer: " + titelnummer + // " JournalID " + journalID); if ((titelnummer > 0) && (journalID > 0)) { store_prepstmt.setLong(1, journalID); store_prepstmt.setLong(2, load_rs.getLong("Nutzungsjahr")); store_prepstmt.setFloat(3, load_rs.getFloat("Rechnungsbetrag")); store_prepstmt.setLong(4, load_rs.getLong("Zeitraum")); store_prepstmt.setLong(5, load_rs.getLong("Zugriffe")); store_prepstmt.executeUpdate(); } } } catch (final SQLException e) { e.printStackTrace(); // To change body of catch statement use File | // Settings | File Templates. } // insert into Interesse (besteller_bestellerId, interesse, journal_id) // values (?, ?, ?) // insert into Nutzung (journal_id, nutzungsjahr, rechnungsbetrag, // zeitraum, zugriffe) values (?, ?, ?, ?, ?) // insert into Rechnung (betrag, bezugsform, bezugsjahr, // exemplar_exemplarId, sigel_sigelId) values (?, ?, ?, ?, ?) }
From source file:com.baifendian.swordfish.execserver.engine.hive.HiveSqlExec.java
/** * sql ? ?, ?, execute, ?/*from www . j av a 2s. c o m*/ * * @param createFuncs ? * @param sqls sql * @param isContinue ?, ??? * @param resultCallback , ? * @param queryLimit ? * @param remainTime ?, */ public boolean execute(List<String> createFuncs, List<String> sqls, boolean isContinue, ResultCallback resultCallback, Integer queryLimit, int remainTime) { // ? if (remainTime <= 0) { return false; } // ? queryLimit = (queryLimit != null) ? queryLimit : defaultQueryLimit; HiveConnection hiveConnection = null; Statement sta = null; Thread logThread = null; // hive ? HiveService2ConnectionInfo hiveService2ConnectionInfo = hiveUtil.getHiveService2ConnectionInfo(userName); logger.info("execution connection information:{}", hiveService2ConnectionInfo); HiveService2Client hiveService2Client = hiveUtil.getHiveService2Client(); try { try { hiveConnection = hiveService2Client.borrowClient(hiveService2ConnectionInfo); sta = hiveConnection.createStatement(); // sta.setQueryTimeout(remainTime); // logThread = new Thread(new JdbcLogRunnable(sta)); logThread.setDaemon(true); logThread.start(); // set queue if (queueSQL != null) { logger.info("hive queue : {}", queueSQL); sta.execute(queueSQL); } // function if (createFuncs != null) { for (String createFunc : createFuncs) { logger.info("hive create function sql: {}", createFunc); sta.execute(createFunc); } } } catch (Exception e) { logger.error("execute query exception", e); // , , ? handlerResults(0, sqls, FlowStatus.FAILED, resultCallback); return false; } // sql ? for (int index = 0; index < sqls.size(); ++index) { String sql = sqls.get(index); Date startTime = new Date(); logger.info("hive execute sql: {}", sql); ExecResult execResult = new ExecResult(); execResult.setIndex(index); execResult.setStm(sql); try { // ? query show ? if (HiveUtil.isTokQuery(sql) || HiveUtil.isLikeShowStm(sql)) { sta.setMaxRows(queryLimit); ResultSet res = sta.executeQuery(sql); ResultSetMetaData resultSetMetaData = res.getMetaData(); int count = resultSetMetaData.getColumnCount(); List<String> colums = new ArrayList<>(); for (int i = 1; i <= count; i++) { colums.add(resultSetMetaData.getColumnLabel( i)/*parseColumnName(resultSetMetaData.getColumnLabel(i), colums)*/); } execResult.setTitles(colums); List<List<String>> datas = new ArrayList<>(); // 1, query ? if (count > 1 || HiveUtil.isTokQuery(sql)) { while (res.next()) { List<String> values = new ArrayList<>(); for (int i = 1; i <= count; ++i) { values.add(res.getString(i)); } datas.add(values); } } else { StringBuffer buffer = new StringBuffer(); while (res.next()) { buffer.append(res.getString(1)); buffer.append("\n"); } List<String> values = new ArrayList<>(); values.add(buffer.toString().trim()); datas.add(values); } execResult.setValues(datas); } else { sta.execute(sql); } // ?? execResult.setStatus(FlowStatus.SUCCESS); // ? if (resultCallback != null) { Date endTime = new Date(); resultCallback.handleResult(execResult, startTime, endTime); } } catch (SQLTimeoutException e) { // sql logger.error("executeQuery timeout exception", e); handlerResults(index, sqls, FlowStatus.FAILED, resultCallback); return false; } catch (DaoSemanticException | HiveSQLException e) { // logger.error("executeQuery exception", e); if (isContinue) { handlerResult(index, sql, FlowStatus.FAILED, resultCallback); } else { handlerResults(index, sqls, FlowStatus.FAILED, resultCallback); return false; } } catch (Exception e) { // TTransport if (e.toString().contains("TTransportException")) { logger.error("Get TTransportException return a client", e); // ??? // hiveService2Client.invalidateObject(hiveService2ConnectionInfo, hiveConnection); handlerResults(index, sqls, FlowStatus.FAILED, resultCallback); return false; } // socket if (e.toString().contains("SocketException")) { logger.error("SocketException clear pool", e); hiveService2Client.clear(); handlerResults(index, sqls, FlowStatus.FAILED, resultCallback); return false; } logger.error("executeQuery exception", e); if (isContinue) { handlerResult(index, sql, FlowStatus.FAILED, resultCallback); } else { handlerResults(index, sqls, FlowStatus.FAILED, resultCallback); return false; } } } } finally { // try { if (sta != null) { sta.close(); } } catch (Exception e) { logger.error("Catch an exception", e); } try { // if (hiveConnection != null) { // hiveConnection.close(); // , ?? hiveService2Client.returnClient(hiveService2ConnectionInfo, hiveConnection); } } catch (Exception e) { logger.error("Catch an exception", e); } // try { if (logThread != null) { logThread.interrupt(); logThread.join(HiveUtil.DEFAULT_QUERY_PROGRESS_THREAD_TIMEOUT); } } catch (Exception e) { // logger.error("Catch an exception", e); } } return true; }
From source file:com.edgenius.wiki.installation.DBLoader.java
public void resetTable(String dbType, ConnectionProxy con) throws SQLException, IOException { Statement dropStat = null; Statement stat = null;/*w w w .ja va2 s .co m*/ try { log.info("Creating tables..."); dropStat = con.createStatement(); stat = con.createStatement(); List<String> lines = loadSQLFile(dbType, dbType + ".ddl"); for (String sql : lines) { //need know if table already exist, if exist need run alter table ... drop index..., otherwise, skip that sql = sql.replaceAll("\n", " ").trim(); // Here is really a special patch for MYSQL 4 as I don't want to waste much time on this special issue // key size is over 1024 issue on MySQL 4 http://bugs.mysql.com/bug.php?id=4541 if (sql.toLowerCase().startsWith( "create index page_link_index on " + Constants.TABLE_PREFIX.toLowerCase() + "page_links")) { try { //only mysql4 may throw exception, ignore it. dropStat.execute(sql); } catch (Exception e) { log.error("Create page link index operation failed...."); } continue; } //1. don't detect table exist because it only check if role table has data, if partial create, isTableExist() won't work //2. put drop independent and try..catch b/c some DBs will report error if table not exist while drop if (sql.toLowerCase().startsWith("alter table ") && sql.toLowerCase().indexOf(" drop ") != -1 || sql.toLowerCase().startsWith("drop ")) { try { dropStat.execute(sql); } catch (Exception e) { log.error("Drop operation failed. It is OK for initial installation."); } continue; } stat.addBatch(sql); } stat.executeBatch(); log.info("Initialize data for system..."); lines = loadSQLFile(dbType, dbType + "-init-tables.sql"); for (String sql : lines) { sql = sql.replaceAll("\n", " ").trim(); stat.addBatch(sql); } stat.executeBatch(); log.info("Initialize quartz tables for system..."); lines = loadSQLFile(dbType, dbType + "-quartz.sql"); for (String sql : lines) { sql = sql.replaceAll("\n", " ").trim(); if (sql.toLowerCase().startsWith("drop ")) { try { dropStat.execute(sql); } catch (Exception e) { log.error("Drop operation failed...." + sql); } continue; } stat.addBatch(sql); } stat.executeBatch(); log.info("System all tables and initial data are ready"); } finally { if (stat != null) stat.close(); if (dropStat != null) dropStat.close(); } }
From source file:migration.ProjektMigration.java
/** * Creates the rechnung.// w ww . j a va 2s.c om */ public void createRechnung() { String load_sql; Statement load_stmt; ResultSet load_rs; String store_sql; PreparedStatement store_prepstmt; final ResultSet store_rs; try { load_sql = "SELECT Rechnungsbetrag, Bezugsform, Bezugsjahr, Sigel, Exemplar, ExImportID FROM ExRechnungstabelle"; load_stmt = this.leg_con.createStatement(); store_sql = "insert into Rechnung (betrag, bezugsform, bezugsjahr, exemplar_exemplarId) values (?, ?, ?, ?)";// , // sigel_sigelId store_prepstmt = this.tgt_con.prepareStatement(store_sql); // evtl. // brauchen // wir // was // in // Richtung: // Statement.RETURN_GENERATED_KEYS // logger.info("Lese von ExRechnungstabelle"); load_stmt.execute(load_sql); load_rs = load_stmt.getResultSet(); // logger.info("Schreibe nach Rechnung"); while (load_rs.next()) { store_prepstmt.setInt(1, load_rs.getInt("Rechnungsbetrag")); store_prepstmt.setString(2, load_rs.getString("Bezugsform")); store_prepstmt.setString(3, load_rs.getString("Bezugsjahr")); int tmp = this.help.getIdFromIntArray(this.getExemplare(), load_rs.getInt("Exemplar")); if (tmp > 0) { store_prepstmt.setLong(4, tmp); } else { store_prepstmt.setNull(4, java.sql.Types.BIGINT); } tmp = this.help.getIdFromStringArray(this.help.getSigel(), load_rs.getString("Sigel")); // store_prepstmt.setLong(5, (long)tmp); store_prepstmt.executeUpdate(); } } catch (final SQLException e) { e.printStackTrace(); // To change body of catch statement use File | // Settings | File Templates. } // insert into Interesse (besteller_bestellerId, interesse, journal_id) // values (?, ?, ?) // insert into Nutzung (journal_id, nutzungsjahr, rechnungsbetrag, // zeitraum, zugriffe) values (?, ?, ?, ?, ?) // insert into Rechnung (betrag, bezugsform, bezugsjahr, // exemplar_exemplarId, sigel_sigelId) values (?, ?, ?, ?, ?) }
From source file:org.zenoss.zep.dao.impl.ElapsedTime.java
@Override public void optimizeTables() throws ZepException { final DatabaseType dbType = databaseCompatibility.getDatabaseType(); final String externalToolName = this.useExternalToolPath + "/pt-online-schema-change"; final String tableToOptimize = "event_summary"; // if we want to use percona's pt-online-schema-change to avoid locking the tables due to mysql optimize... //checks if external tool is available if (this.useExternalTool && dbType == DatabaseType.MYSQL && DaoUtils.executeCommand("ls " + externalToolName) == 0) { logger.info("Validating state of event_summary"); this.validateEventSummaryState(); logger.debug("Optimizing table: " + tableToOptimize + " via percona " + externalToolName); eventSummaryOptimizationTime.setStartTime(); String externalToolCommandPrefix = externalToolName + " --alter \"ENGINE=Innodb\" D=" + this.dbname + ",t="; String externalToolCommandSuffix = ""; if (System.getenv("USE_ZENDS") != null && Integer.parseInt(System.getenv("USE_ZENDS").trim()) == 1) { externalToolCommandSuffix = " --defaults-file=/opt/zends/etc/zends.cnf"; }/* ww w. ja v a 2 s .c o m*/ externalToolCommandSuffix += " " + this.externalToolOptions + " --alter-foreign-keys-method=drop_swap --host=" + this.hostname + " --port=" + this.port + " --user=" + this.username + " --password=" + this.password + " --execute"; int return_code = DaoUtils .executeCommand(externalToolCommandPrefix + tableToOptimize + externalToolCommandSuffix); if (return_code != 0) { logger.error("External tool failed on: " + tableToOptimize + ". Therefore, table:" + tableToOptimize + "will not be optimized."); } else { logger.debug( "Successfully optimized table: " + tableToOptimize + "using percona " + externalToolName); } eventSummaryOptimizationTime.setEndTime(); SendOptimizationTimeEvent(eventSummaryOptimizationTime, tableToOptimize, "percona"); if (this.tablesToOptimize.contains(tableToOptimize)) { this.tablesToOptimize.remove(tableToOptimize); } } else { if (this.useExternalTool) { logger.warn( "External tool not available. Table: " + tableToOptimize + " optimization may be slow."); } if (!this.tablesToOptimize.contains(tableToOptimize)) { this.tablesToOptimize.add(tableToOptimize); } } eventSummaryOptimizationTime.setStartTime(); // init so elapsedTime() == 0 try { logger.debug("Optimizing tables: {}", this.tablesToOptimize); this.template.execute(new ConnectionCallback<Object>() { @Override public Object doInConnection(Connection con) throws SQLException, DataAccessException { Boolean currentAutoCommit = null; Statement statement = null; try { currentAutoCommit = con.getAutoCommit(); con.setAutoCommit(true); statement = con.createStatement(); for (String tableToOptimize : tablesToOptimize) { logger.debug("Optimizing table: {}", tableToOptimize); final String sql; switch (dbType) { case MYSQL: sql = "OPTIMIZE TABLE " + tableToOptimize; break; case POSTGRESQL: sql = "VACUUM ANALYZE " + tableToOptimize; break; default: throw new IllegalStateException("Unsupported database type: " + dbType); } if (tableToOptimize == "event_summary") { eventSummaryOptimizationTime.setStartTime(); } statement.execute(sql); if (tableToOptimize == "event_summary") { eventSummaryOptimizationTime.setEndTime(); } logger.debug("Completed optimizing table: {}", tableToOptimize); } } finally { JdbcUtils.closeStatement(statement); if (currentAutoCommit != null) { con.setAutoCommit(currentAutoCommit); } } return null; } }); } finally { logger.info("Validating state of event_summary"); this.validateEventSummaryState(); } if (eventSummaryOptimizationTime.getElapsedTime() > 0) { SendOptimizationTimeEvent(eventSummaryOptimizationTime, "event_summary", ""); } logger.debug("Completed Optimizing tables: {}", tablesToOptimize); }
From source file:com.flexive.core.storage.genericSQL.GenericTreeStorageSpreaded.java
/** * {@inheritDoc}/* w w w .j ava 2 s . co m*/ */ @Override public void activateNode(Connection con, SequencerEngine seq, ContentEngine ce, FxTreeMode mode, final long nodeId, boolean activateContents) throws FxApplicationException { if (mode == FxTreeMode.Live) //Live tree can not be activated! return; long ids[] = getIdChain(con, mode, nodeId); //all id's up to the root node acquireLocksForUpdate(con, mode, Arrays.asList(ArrayUtils.toObject(ids))); try { // lock node in live tree including all children (which *can* be removed if they were removed in the edit tree) acquireLocksForUpdate(con, FxTreeMode.Live, selectDirectChildNodeIds(con, FxTreeMode.Live, nodeId, true)); } catch (SQLException e) { throw new FxDbException(e); } for (long id : ids) { if (id == ROOT_NODE) continue; FxTreeNode srcNode = getNode(con, mode, id); //check if the node already exists in the live tree if (exists(con, FxTreeMode.Live, id)) { //Move and setData will not do anything if the node is already in its correct place and move(con, seq, FxTreeMode.Live, id, srcNode.getParentNodeId(), srcNode.getPosition()); setData(con, FxTreeMode.Live, id, srcNode.getData()); } else { createNode(con, seq, ce, FxTreeMode.Live, srcNode.getId(), srcNode.getParentNodeId(), srcNode.getName(), srcNode.getLabel(), srcNode.getPosition(), srcNode.getReference(), srcNode.getData(), activateContents); } // Remove all deleted direct child nodes Statement stmt = null; Statement stmt2 = null; try { stmt = con.createStatement(); stmt2 = con.createStatement(); if (StorageManager.isDisableIntegrityTransactional()) { stmt2.execute(StorageManager.getReferentialIntegrityChecksStatement(false)); } try { ResultSet rs = stmt.executeQuery( "SELECT DISTINCT tl.ID FROM " + getTable(FxTreeMode.Live) + " tl " + "LEFT JOIN " + getTable(FxTreeMode.Edit) + " te ON tl.ID=te.ID WHERE te.ID=null AND " + "te.PARENT=" + nodeId + " AND tl.PARENT=" + nodeId); while (rs != null && rs.next()) { long deleteId = rs.getLong(1); // System.out.println("==> deleted:"+deleteId); acquireLocksForUpdate(con, FxTreeMode.Live, Arrays.asList(deleteId)); stmt2.addBatch("DELETE FROM " + getTable(FxTreeMode.Live) + " WHERE ID=" + deleteId); } stmt2.addBatch("UPDATE " + getTable(FxTreeMode.Live) + " SET MODIFIED_AT=" + System.currentTimeMillis()); stmt2.executeBatch(); } finally { if (StorageManager.isDisableIntegrityTransactional()) { stmt2.execute(StorageManager.getReferentialIntegrityChecksStatement(true)); } } } catch (SQLException e) { throw new FxTreeException("ex.tree.activate.failed", nodeId, false, e.getMessage()); } finally { try { if (stmt != null) stmt.close(); } catch (Exception exc) { //ignore } try { if (stmt2 != null) stmt2.close(); } catch (Exception exc) { //ignore } } clearDirtyFlag(con, mode, nodeId); } }
From source file:com.flexive.core.storage.genericSQL.GenericTreeStorageSpreaded.java
/** * Do what i mean function :-D/*from w w w . j a va 2s.c om*/ * * @param con an open and valid Connection * @param seq a valid Sequencer reference * @param sourceMode the source table (matters in createMode only) * @param destMode the destination table (matters in createMode only) * @param nodeId the node to work on * @param includeNodeId if true the operations root node (nodeId) is included into the updates * @param overrideSpacing if set this spacing is used instead of the computed one * @param overrideLeft if set this will be the first left position * @param insertParent create mode only: the parent node in which we will generate the free space * specified by the parameters [insertPosition] and [insertSpace] * @param insertPosition create mode only: the position withn the destination nodes childs * @param insertSpace create mode only: the space to keep free at the specified position * @param insertBoundaries create mode only: the insert boundaries * @param depthDelta create mode only: the delta to apply to the depth * @param destinationNode create mode only: the destination node * @param createMode if true the function will insert copy of nodes instead of updating them * @param createKeepIds keep the ids in create mode * @param disableSpaceOptimization if the space inside the node must not be compacted (for moving node trees) * @return first created node id or -1 if no node was created using this method * @throws FxTreeException if the function fails */ public long reorganizeSpace(Connection con, SequencerEngine seq, FxTreeMode sourceMode, FxTreeMode destMode, long nodeId, boolean includeNodeId, BigInteger overrideSpacing, BigInteger overrideLeft, FxTreeNodeInfo insertParent, int insertPosition, BigInteger insertSpace, BigInteger insertBoundaries[], int depthDelta, Long destinationNode, boolean createMode, boolean createKeepIds, boolean disableSpaceOptimization) throws FxTreeException { Statement stmt = null; try { synchronized (LOCK_REORG) { acquireLocksForUpdate(con, sourceMode); stmt = con.createStatement(); if (StorageManager.isDisableIntegrityTransactional()) { stmt.execute(StorageManager.getReferentialIntegrityChecksStatement(false)); } return _reorganizeSpace(con, seq, sourceMode, destMode, nodeId, includeNodeId, overrideSpacing, overrideLeft, insertParent, insertPosition, insertSpace, insertBoundaries, depthDelta, destinationNode, createMode, createKeepIds, disableSpaceOptimization); } } catch (FxDbException e) { throw new FxTreeException(e); } catch (SQLException e) { throw new FxTreeException(LOG, e, "ex.db.sqlError", e.getMessage()); } finally { if (stmt != null) { try { if (StorageManager.isDisableIntegrityTransactional()) { try { stmt.execute(StorageManager.getReferentialIntegrityChecksStatement(true)); } catch (SQLException e) { LOG.error(e); } } stmt.close(); } catch (SQLException e) { LOG.error(e); } } } }
From source file:lib.JdbcTemplate.java
@Override public void execute(final String sql) throws DataAccessException { if (logger.isDebugEnabled()) { logger.debug("Executing SQL statement [" + sql + "]"); }/* w ww . j a va2 s .c o m*/ class ExecuteStatementCallback implements StatementCallback<Object>, SqlProvider { @Override public Object doInStatement(Statement stmt) throws SQLException { stmt.execute(sql); return null; } @Override public String getSql() { return sql; } } execute(new ExecuteStatementCallback()); }
From source file:com.thinkbiganalytics.nifi.v2.thrift.RefreshableDataSource.java
/** * test the connection to see if it can be used to communicate with the JDBC source * * @param username a username to connect with if needed * @param password a password to connect with if needed * @return true if the connection is alive */// w ww.java 2 s. c o m public synchronized boolean testConnection(String username, String password) { // Get a connection to test final Connection connection; try { connection = (StringUtils.isNotBlank(username) || StringUtils.isNotBlank(password)) ? getConnectionForValidation(username, password) : getConnectionForValidation(); log.info("connection obtained by RefreshableDatasource"); } catch (final SQLException e) { log.warn("A database access error occurred when getting a connection for JDBC URL: ", url, e); return false; } // Test the connection using different methods boolean asyncCleanup = false; Statement statement = null; try { // Method 1: Test using driver method try { // can throw "java.sql.SQLException: Method not supported"; ignore and try other methods if so final boolean isValid = connection.isValid(validationQueryTimeout.intValue()); if (!isValid) { log.info("Connection obtained for JDBC URL was not valid: {}", url); return false; } } catch (final SQLException e) { log.debug("The isValid() method is not supported for the JDBC URL: {}", url); } // Method 2: Test with a statement and query timeout try { statement = connection.createStatement(); } catch (final SQLException e) { log.warn("A database access error occurred when getting a statement for JDBC URL: {}", url, e); return false; } try { statement.setQueryTimeout(validationQueryTimeout.intValue()); // throws method not supported if Hive driver try { statement.execute(validationQuery); // executes if no exception from setQueryTimeout return true; } catch (final SQLException e) { log.debug("Failed to execute validation query for JDBC URL: {}", url, e); log.info("Connection obtained for JDBC URL was not valid: {}", url); return false; } } catch (final SQLException e) { log.warn("The Statement.setQueryTimeout() method is not supported for the JDBC URL: {}", url); } // Method 3: Test with a statement and a timer asyncCleanup = true; boolean isValid; try { isValid = validateQueryWithTimeout(statement, validationQuery, validationQueryTimeout.intValue()); } catch (final SQLException e) { log.debug("Failed to execute validation query for JDBC URL: {}", url, e); isValid = false; } if (!isValid) { log.info("Connection obtained for JDBC URL was not valid: {}", url); } return isValid; } finally { connectionCleanup(connection, statement, asyncCleanup); } }
From source file:com.mysql.stresstool.RunnableQueryInsertPartRange.java
public void run() { BufferedReader d = null;// w w w. j av a2 s . c o m Connection conn = null; try { if (stikyconnection && jdbcUrlMap.get("dbType") != null && !((String) jdbcUrlMap.get("dbType")).equals("MySQL")) { conn = DriverManager.getConnection((String) jdbcUrlMap.get("dbType"), "test", "test"); } else if (stikyconnection) conn = DriverManager.getConnection((String) jdbcUrlMap.get("jdbcUrl")); } catch (SQLException ex) { ex.printStackTrace(); } try { long execTime = 0; int pkStart = 0; int pkEnds = 0; int intDeleteInterval = 0; int intBlobInterval = 0; int intBlobIntervalLimit = StressTool.getNumberFromRandom(4).intValue(); ThreadInfo thInfo; long threadTimeStart = System.currentTimeMillis(); active = true; thInfo = new ThreadInfo(); thInfo.setId(this.ID); thInfo.setType("insert"); thInfo.setStatusActive(this.isActive()); StressTool.setInfo(this.ID, thInfo); boolean lazy = false; int lazyInterval = 0; for (int repeat = 0; repeat <= repeatNumber; repeat++) { if (!stikyconnection) { try { if (conn != null && !conn.isClosed()) { conn.close(); } SoftReference sf = new SoftReference( DriverManager.getConnection((String) jdbcUrlMap.get("jdbcUrl"))); conn = (Connection) sf.get(); } catch (SQLException ex) { for (int icon = 0; icon <= 3; icon++) { try { Thread.sleep(10000); SoftReference sf = new SoftReference( DriverManager.getConnection((String) jdbcUrlMap.get("jdbcUrl"))); conn = (Connection) sf.get(); } catch (SQLException ex2) { ex2.printStackTrace(); } } //ex.printStackTrace(); } } if (conn != null) { Statement stmt = null; // ResultSet rs = null; // ResultSet rs2 = null; conn.setAutoCommit(false); stmt = conn.createStatement(); stmt.execute("SET AUTOCOMMIT=0"); String query = null; ArrayList insert1 = null; ArrayList insert2 = null; int pk = 0; if (repeat > 0 && lazyInterval < 500) { lazy = true; ++lazyInterval; } else { lazy = false; lazyInterval = 0; } intBlobInterval++; //IMPLEMENTING lazy Vector v = this.getTablesValues(lazy); insert1 = (ArrayList<String>) v.get(0); insert2 = (ArrayList<String>) v.get(1); // System.out.println(insert1); // System.out.println(insert2); // pk = ((Integer) v.get(2)).intValue(); int[] iLine = { 0, 0 }; // pkStart = StressTool.getNumberFromRandom(2147483647).intValue(); // pkEnds = StressTool.getNumberFromRandom(2147483647).intValue(); try { long timeStart = System.currentTimeMillis(); if (this.ignoreBinlog) stmt.execute("SET sql_log_bin=0"); //stmt.execute("SET GLOBAL max_allowed_packet=1073741824"); if (dbType.equals("MySQL") && !engine.toUpperCase().equals("BRIGHTHOUSE")) stmt.execute("BEGIN"); else stmt.execute("COMMIT"); // stmt.execute("SET TRANSACTION NAME 'TEST'"); { Iterator<String> it = insert1.iterator(); while (it.hasNext()) { stmt.addBatch(it.next()); } } if (!this.doSimplePk) { if (intBlobInterval > intBlobIntervalLimit) { Iterator<String> it = insert2.iterator(); while (it.hasNext()) { stmt.addBatch(it.next()); } intBlobInterval = 0; } } if (debug) { System.out.println("Thread " + thInfo.getId() + " Executing loop " + thInfo.getExecutedLoops() + " QUERY1==" + insert1); System.out.println("Thread " + thInfo.getId() + " Executing loop " + thInfo.getExecutedLoops() + " QUERY2==" + insert2); } iLine = executeSQL(stmt); // System.out.println("Query1 = " + insert1); // System.out.println("Query2 = " + insert2); // stmt.execute("START TRANSACTION"); // stmt.execute(insert1); // iLine = stmt.executeBatch(); // conn.commit(); long timeEnds = System.currentTimeMillis(); execTime = (timeEnds - timeStart); } catch (SQLException sqle) { conn.rollback(); System.out.println("FAILED QUERY1==" + insert1); System.out.println("FAILED QUERY2==" + insert2); sqle.printStackTrace(); System.exit(1); //conn.close(); //this.setJdbcUrl(jdbcUrl); //System.out.println("Query Insert TH RE-INIZIALIZING"); } finally { // conn.commit(); if (conn != null && !conn.isClosed()) { try { stmt.addBatch("COMMIT"); executeSQL(stmt); } catch (SQLException sqle) { System.out.println( "#####################\n[Warning] Cannot explicitly commit given error\n#################"); conn.close(); continue; } } else { System.out.println( "#####################\n[Warning] Cannot explicitly commit given connection was interrupted unexpectedly\n#################"); } // intDeleteInterval++; if (doLog) { System.out.println("Query Insert TH = " + this.getID() + " Loop N = " + repeat + " " + iLine[0] + "|" + ((iLine.length > 1) ? iLine[1] : 0) + " Exec Time(ms) =" + execTime + " Running = " + repeat + " of " + repeatNumber + " to go =" + (repeatNumber - repeat) + " Using Lazy=" + lazy); } } thInfo.setExecutedLoops(repeat); if (sleepFor > 0 || this.getSleepWrite() > 0) { if (this.getSleepWrite() > 0) { Thread.sleep(getSleepWrite()); } else Thread.sleep(sleepFor); } } } long threadTimeEnd = System.currentTimeMillis(); this.executionTime = (threadTimeEnd - threadTimeStart); // this.setExecutionTime(executionTime); active = false; // System.out.println("Query Insert TH = " + this.getID() + " COMPLETED! TOTAL TIME = " + execTime + "(ms) Sec =" + (execTime/1000)); thInfo.setExecutionTime(executionTime); thInfo.setStatusActive(false); StressTool.setInfo(this.ID, thInfo); return; } catch (Exception ex) { ex.printStackTrace(); try { conn.close(); } catch (SQLException e) { // TODO Auto-generated catch block e.printStackTrace(); } } }