List of usage examples for java.sql Statement execute
boolean execute(String sql) throws SQLException;
From source file:com.googlecode.fascinator.portal.HouseKeeper.java
/** * Create the given table in the database. * /*from w w w. ja v a2 s. com*/ * @param table The table to create * @throws SQLException if there was an error during creation, or an unknown * table was specified. */ private void createTable(String table) throws SQLException { if (table.equals(NOTIFICATIONS_TABLE)) { Statement sql = dbConnection().createStatement(); sql.execute("CREATE TABLE " + NOTIFICATIONS_TABLE + "(id INTEGER NOT NULL GENERATED ALWAYS AS " + "IDENTITY (START WITH 1, INCREMENT BY 1), " + "block CHAR(1) NOT NULL, " + "message VARCHAR(4000) NOT NULL, " + "datetime TIMESTAMP NOT NULL, " + "PRIMARY KEY (id))"); close(sql); return; } throw new SQLException("Unknown table '" + table + "' requested!"); }
From source file:com.nextep.designer.sqlclient.ui.editors.SQLFullClientGUI.java
/** * @see com.nextep.datadesigner.gui.model.IConnector#refreshConnector() *///from w w w. j a v a 2 s. c o m @Override public void refreshConnector() { // Clearing current table columns // clearSQLView(); final ISQLScript script = (ISQLScript) getModel(); if (script.getSql() == null || "".equals(script.getSql())) { return; } try { // sqlText.add("select * from " + o.getName()); // sqlText.select(sqlText.getItemCount()-1); // Creating result table final CTabItem sqlItem = new CTabItem(sqlFolder, SWT.NONE); Composite resultPane = new Composite(sqlFolder, SWT.NONE); final Table sqlView = new Table(resultPane, SWT.FULL_SELECTION | SWT.BORDER); final NextepTableEditor editor = NextepTableEditor.handle(sqlView); CoolBar statsBar = new CoolBar(resultPane, SWT.NONE); statsBar.setLayoutData(new GridData(GridData.FILL_HORIZONTAL)); final CoolItem rowsItem = new CoolItem(statsBar, SWT.NONE); rowsItem.setSize(rowsItem.computeSize(100, 20)); final Label rowsCount = new Label(statsBar, SWT.NONE); rowsItem.setControl(rowsCount); final CoolItem timeItem = new CoolItem(statsBar, SWT.NONE); final Label timeLabel = new Label(statsBar, SWT.NONE); timeItem.setControl(timeLabel); timeItem.setSize(timeItem.computeSize(200, 20)); sqlView.setHeaderVisible(true); sqlView.setLinesVisible(true); sqlView.setLayoutData(new GridData(GridData.FILL, GridData.FILL, true, true, 1, 1)); resultPane.setLayoutData(new GridData(GridData.FILL, GridData.FILL, true, true, 1, 1)); GridLayout grid = new GridLayout(); grid.marginBottom = grid.marginHeight = grid.marginLeft = grid.marginRight = grid.marginTop = grid.marginWidth = 0; resultPane.setLayout(grid); sqlItem.setControl(resultPane); final Listener sortListener = new SortListener(sqlView); final String query = formatQuery(script.getSql()); final int queryLen = query.length(); sqlItem.setText(queryLen < 30 ? query : query.substring(0, 30) + "..."); sqlItem.setToolTipText(query); sqlFolder.setSelection(sqlItem); final List<ICommand> bufferedCommands = new ArrayList<ICommand>(); // Initializing lines Job refreshJob = new Job("Fetching SQL data...") { @Override protected IStatus run(IProgressMonitor monitor) { Statement s = null; ResultSet r = null; try { s = connection.createStatement(); final Date startDate = new Date(); final boolean isResultSet = s.execute(query); final Date afterExecDate = new Date(); if (!isResultSet) { final int updates = s.getUpdateCount(); bufferedCommands.add(new ICommand() { @Override public String getName() { return null; } @Override public Object execute(Object... parameters) { if (sqlView != null && !sqlView.isDisposed()) { TableColumn c = new TableColumn(sqlView, SWT.NONE); c.setText(SQLClientMessages.getString("sql.result")); c.setWidth(300); c.addListener(SWT.Selection, sortListener); if (updates > 0) { final TableItem i = new TableItem(sqlView, SWT.NONE); i.setText(MessageFormat.format( SQLClientMessages.getString("sql.updatedRows"), updates)); } else { final TableItem i = new TableItem(sqlView, SWT.NONE); i.setText(SQLClientMessages.getString("sql.queryOk")); } } return null; } }); syncProcessCommands(bufferedCommands); return Status.OK_STATUS; } r = s.getResultSet(); // Initializing columns final ResultSetMetaData md = r.getMetaData(); // Initializing sizing table final int[] colMaxWidth = new int[md.getColumnCount() + 1]; for (int i = 1; i <= md.getColumnCount(); i++) { final int index = i; final String colName = md.getColumnName(index); // final int colPrecision = md.getPrecision(index); final int colType = md.getColumnType(index); final int colIndex = i - 1; bufferedCommands.add(new ICommand() { @Override public String getName() { return null; } @Override public Object execute(Object... parameters) { if (!sqlView.isDisposed()) { TableColumn c = new TableColumn(sqlView, SWT.NONE); c.addListener(SWT.Selection, sortListener); c.setText(colName); c.setWidth(colName.length() * 8); colMaxWidth[colIndex] = c.getWidth(); c.setData(COL_TYPE, colType); TextColumnEditor.handle(editor, colIndex, ChangeEvent.CUSTOM_1, new IEventListener() { @Override public void handleEvent(ChangeEvent event, IObservable source, Object data) { } }); } return null; } }); } final ResultSet rset = r; int rows = 0; final long execTime = afterExecDate.getTime() - startDate.getTime(); bufferedCommands.add(new ICommand() { @Override public String getName() { return null; } @Override public Object execute(Object... parameters) { timeLabel.setText(MessageFormat .format(SQLClientMessages.getString("sql.executionTime"), execTime)); return null; } }); syncProcessCommands(bufferedCommands); while (r.next()) { rows++; // Handling cancellation while fetching SQL lines if (monitor.isCanceled()) { return Status.CANCEL_STATUS; } final String[] colValues = new String[md.getColumnCount()]; final Collection<Integer> nullCols = new ArrayList<Integer>(); for (int i = 1; i <= md.getColumnCount(); i++) { Object val = null; try { val = rset.getObject(i); } catch (SQLException e) { LOGGER.error("Error while fetching column value : " + e.getMessage(), e); val = e.getMessage(); } final String strVal = strVal(val); colValues[i - 1] = strVal; // Building list of null columns if (val == null) { nullCols.add(i - 1); } // Updating max sizes final int colWidth = colMaxWidth[i - 1]; if (strVal.length() * 8 > colWidth) { colMaxWidth[i - 1] = strVal.length() * 8; } } // Adding the row as a command bufferedCommands.add(buildAddRowCommand(colValues, sqlView, nullCols)); // Flushing to display every N lines if (bufferedCommands.size() > MAX_ROWS_BEFORE_REFRESH) { bufferedCommands.add(buildAdjustWidthCommand(sqlView, colMaxWidth)); syncProcessCommands(bufferedCommands); } } // Flushing any left row bufferedCommands.add(buildAdjustWidthCommand(sqlView, colMaxWidth)); final Date afterFetchDate = new Date(); final int nbRows = rows; bufferedCommands.add(new ICommand() { @Override public String getName() { // TODO Auto-generated method stub return null; } @Override public Object execute(Object... parameters) { long fetchTime = afterFetchDate.getTime() - afterExecDate.getTime(); timeLabel.setText( MessageFormat.format(SQLClientMessages.getString("sql.executionFetchTime"), execTime, fetchTime)); rowsCount.setText(MessageFormat .format(SQLClientMessages.getString("sql.fetchedRows"), nbRows)); return null; } }); syncProcessCommands(bufferedCommands); } catch (final SQLException e) { PlatformUI.getWorkbench().getDisplay().syncExec(new Runnable() { @Override public void run() { if (!sqlView.isDisposed()) { sqlView.removeAll(); for (TableColumn c : sqlView.getColumns()) { c.dispose(); } TableColumn c = new TableColumn(sqlView, SWT.NONE); c.setText("SQL Exception " + e.getErrorCode()); c.setWidth(300); TableItem i = new TableItem(sqlView, SWT.NONE); i.setText(e.getMessage()); } } }); // throw new ErrorException(e); } finally { try { if (r != null) {// && !r.isClosed()) { r.close(); } if (s != null) { // && !s.isClosed()) { s.close(); } } catch (SQLException e) { throw new ErrorException(e); } finally { PlatformUI.getWorkbench().getDisplay().asyncExec(new Runnable() { @Override public void run() { // If the user has closed his SQL Query editor, we will // fall here (exception) with a disposed button if (runSQLButton != null && !runSQLButton.isDisposed()) { runSQLButton.setEnabled(true); } } }); } } return Status.OK_STATUS; } }; runSQLButton.setEnabled(false); refreshJob.schedule(); // } catch(SQLException e) { // throw new ErrorException(e); } finally { // try { // if(stmt != null && !stmt.isClosed()) { // stmt.close(); // } // if(rset != null && !rset.isClosed()) { // rset.close(); // } // } catch(SQLException e) { // throw new ErrorException(e); // } } }
From source file:com.transcend.rds.worker.CreateDBInstanceReadReplicaActionWorker.java
private void replicationHelper(Session sess, final AccountBean ac, final String dbInstanceId, final String sourceDBInstanceId, final String avZone, final int masterPort, final int replicaPort, final String masterFile, final int position) throws Exception { logger.debug("Locking before accessing the critical database table..."); String address = null;//from w w w. j ava 2 s .c o m boolean wait = true; int count = 0; while (wait) { logger.debug("Waiting for Resource(s) to be created... " + count + "th trial!"); Thread.sleep(30000); logger.debug("Renewing the session..."); sess = HibernateUtil.newSession(); sess.beginTransaction(); final RdsDbinstance inst = InstanceEntity.selectDBInstance(sess, dbInstanceId, ac.getId()); String stat = null; if (inst != null) { stat = inst.getDbinstanceStatus(); } logger.debug("DBInstance existence: " + inst + "; DBInstanceStatus: " + stat); if (inst != null && stat.equals("restoring")) { inst.setDbinstanceStatus(RDSUtilities.STATUS_MODIFYING); inst.setRead_only(true); inst.setSourceDbinstanceId(sourceDBInstanceId); address = inst.getAddress(); sess.save(inst); sess.getTransaction().commit(); sess.close(); wait = false; break; } sess.close(); ++count; } logger.debug("Unlocking after accessing the critical database table..."); sess = HibernateUtil.newSession(); sess.beginTransaction(); if (address == null) { throw RDSQueryFaults.InternalFailure(); } // modify the ec2 security group of master DBInstance to grant access to // CIDRIP of slave DBInstance's IP/32 logger.debug("Adding another authorization to the underlying ec2 security group"); final String internal = "rds-" + ac.getId() + "-" + sourceDBInstanceId + "-" + masterPort; List<RdsDbsecurityGroup> secGrps = SecurityGroupEntity.selectAllSecurityGroups(sess, internal, ac.getId(), null, 0); if (secGrps.size() != 1) { throw RDSQueryFaults.InternalFailure(); } final RdsDbsecurityGroup masterSecGrp = secGrps.get(0); List<RdsIPRangeBean> ips = masterSecGrp.getIPRange(sess); final String slaveCidrip = address + "/32"; final boolean authorized = false; if (!authorized) { logger.debug("Authorizing ingress for " + slaveCidrip + " to access the source DBInstance."); final CallStruct callEc2SecGrp = new CallStruct(); callEc2SecGrp.setAc(AccountUtil.toAccount(ac)); callEc2SecGrp.setCtx(new TemplateContext(null)); callEc2SecGrp.setName(internal); callEc2SecGrp.setStackId("rds." + ac.getId() + "." + sourceDBInstanceId); final Map<String, Object> props = new HashMap<String, Object>(); props.put(Constants.AVAILABILITYZONE, ac.getDefZone()); props.put(Constants.GROUPNAME, internal); props.put(Constants.CIDRIP, slaveCidrip); props.put(Constants.SOURCESECURITYGROUPNAME, null); // SourceSecurityGroupOwnerId is not required props.put(Constants.SOURCESECURITYGROUPOWNERID, null); // hardcoded values below props.put(Constants.FROMPORT, masterPort); props.put(Constants.TOPORT, masterPort); props.put(Constants.IPPROTOCOL, Constants.DEFAULT_RDS_PROTOCOL); callEc2SecGrp.setProperties(props); callEc2SecGrp.setType(SecurityGroupIngress.TYPE); final SecurityGroupIngress provider0 = new SecurityGroupIngress(); try { provider0.create(callEc2SecGrp); } catch (final AmazonServiceException e) { logger.debug(e.getMessage()); } catch (final AmazonClientException e) { logger.debug(e.getMessage()); } final RdsIPRangeBean newAuth = new RdsIPRangeBean(masterSecGrp.getId(), slaveCidrip); ips.add(newAuth); sess.save(newAuth); } final String replicaInternal = "rds-" + ac.getId() + "-" + dbInstanceId + "-" + replicaPort; secGrps = SecurityGroupEntity.selectAllSecurityGroups(sess, replicaInternal, ac.getId(), null, 0); if (secGrps.size() != 1) { throw RDSQueryFaults.InternalFailure(); } final RdsDbsecurityGroup slaveSecGrp = secGrps.get(0); final String rds_host = Appctx.getBean("internalServiceIp"); final String RdsServerCidrip = rds_host + "/32"; logger.debug("Authorizing ingress for " + RdsServerCidrip + " to access the read replica DBInstance."); final CallStruct callEc2SecGrp = new CallStruct(); callEc2SecGrp.setAc(AccountUtil.toAccount(ac)); callEc2SecGrp.setCtx(new TemplateContext(null)); callEc2SecGrp.setName(replicaInternal); callEc2SecGrp.setStackId("rds." + ac.getId() + "." + dbInstanceId); final Map<String, Object> props = new HashMap<String, Object>(); props.put(Constants.AVAILABILITYZONE, ac.getDefZone()); props.put(Constants.GROUPNAME, replicaInternal); props.put(Constants.CIDRIP, RdsServerCidrip); props.put(Constants.SOURCESECURITYGROUPNAME, null); // SourceSecurityGroupOwnerId is not required props.put(Constants.SOURCESECURITYGROUPOWNERID, null); // hardcoded values below props.put(Constants.FROMPORT, replicaPort); props.put(Constants.TOPORT, replicaPort); props.put(Constants.IPPROTOCOL, Constants.DEFAULT_RDS_PROTOCOL); callEc2SecGrp.setProperties(props); callEc2SecGrp.setType(SecurityGroupIngress.TYPE); final SecurityGroupIngress provider0 = new SecurityGroupIngress(); try { provider0.create(callEc2SecGrp); } catch (final AmazonServiceException e) { logger.debug(e.getMessage()); } catch (final AmazonClientException e) { logger.debug(e.getMessage()); } final RdsIPRangeBean newAuth = new RdsIPRangeBean(slaveSecGrp.getId(), RdsServerCidrip); ips = slaveSecGrp.getIPRange(sess); ips.add(newAuth); sess.save(newAuth); logger.debug("Copy the mysqldump file from the source DBInstance to the DBInstanceReadReplica"); String key_dir = (String) ConfigurationUtil.getConfiguration(Arrays.asList(new String[] { "KEYS_DIR" })); final String key = ac.getDefKeyName() + ".pem"; if (key_dir.charAt(key_dir.length() - 1) != '/') { key_dir += '/'; } final String key_path = key_dir + key; logger.debug("Account's ec2 key is at: " + key_path); final RdsDbinstance source = InstanceEntity.selectDBInstance(sess, sourceDBInstanceId, ac.getId()); final String masterHostname = source.getAddress(); final String slaveHostname = address; logger.debug("check if mysqldump is completed or not before trying to scp the dump;" + " databagitem Replication should be modified to have Task = mysqldump_complete"); String databagName = "rds-" + ac.getId() + "-" + source.getDbinstanceId(); String databagItem = ChefUtil.getDatabagItem(databagName, "Replication"); JsonNode replicationItem = JsonUtil.load(databagItem); JsonNode task = replicationItem.get("Task"); String taskValue = task.getTextValue(); boolean dumpReady = false; while (!dumpReady) { databagItem = ChefUtil.getDatabagItem(databagName, "Replication"); replicationItem = JsonUtil.load(databagItem); task = replicationItem.get("Task"); taskValue = task.getTextValue(); if (taskValue.equals("mysqldump_complete")) { dumpReady = true; } } logger.debug("Replication databag item: " + replicationItem.toString() + " and Task = " + taskValue); // scp the key file into source DBInstance final String scpCommand = "scp -o StrictHostKeyChecking=no -i " + key_path + " " + key_path + " root@" + masterHostname + ":/root"; logger.debug("SCP command is: " + scpCommand); Runtime.getRuntime().exec(scpCommand); logger.debug("Scp'ed the client key to the source DBInstance."); // set the flag for scp and wait till this process is completed by chef String replication_item = "{\"Task\":\"" + "scp" + "\", " + "\"TargetHostname\":\"" + slaveHostname + "\", " + "\"Key\":\"" + key + "\"" + "}"; ChefUtil.createDatabagItem(databagName, "Replication", replication_item); boolean scpDone = false; while (!scpDone) { databagItem = ChefUtil.getDatabagItem(databagName, "Replication"); replicationItem = JsonUtil.load(databagItem); task = replicationItem.get("Task"); taskValue = task.getTextValue(); if (taskValue.equals("scp_complete")) { scpDone = true; } } logger.debug("Replication databag item: " + replicationItem.toString() + " and Task = " + taskValue); ChefUtil.deleteDatabagItem(databagName, "Replication"); // apply the mysqldump to the read replica DBInstance, then change the // master; restart mysql server on read replica databagName = "rds-" + ac.getId() + "-" + dbInstanceId; replication_item = "{\"Task\":\"slave\"}"; ChefUtil.createDatabagItem(databagName, "Replication", replication_item); boolean restored = false; while (!restored) { databagItem = ChefUtil.getDatabagItem(databagName, "Replication"); replicationItem = JsonUtil.load(databagItem); task = replicationItem.get("Task"); taskValue = task.getTextValue(); if (taskValue.equals("slave_complete")) { restored = true; } } logger.debug("Replication databag item: " + replicationItem.toString() + " and Task = " + taskValue); final Connection slave = getConnection("root", source.getMasterUserPassword(), source.getEngine().toLowerCase(), slaveHostname, replicaPort); final Statement changeMaster = slave.createStatement(); final String change = "CHANGE MASTER TO MASTER_HOST=\'" + masterHostname + "\', MASTER_USER=\'" + RDS_Constants.RDS_REPLICATION_USER + "\', MASTER_PASSWORD=\'" + RDS_Constants.RDS_REPLICATION_PASSWORD + "\', MASTER_LOG_FILE=\'" + masterFile + "\', MASTER_LOG_POS=" + position; changeMaster.execute(change); logger.debug("Modified the master of the read replica to the source DBInstance."); final Statement startSlave = slave.createStatement(); startSlave.execute("START SLAVE"); final Statement flushTableRL = slave.createStatement(); flushTableRL.execute("FLUSH TABLES WITH READ LOCK"); final Statement setReadOnly = slave.createStatement(); setReadOnly.execute("SET GLOBAL read_only = ON"); logger.debug("Slave service started and configured for read_only."); logger.debug("Closing the connection to the replica DBInstance."); slave.close(); ChefUtil.deleteDatabagItem(databagName, "Replication"); logger.debug("Modifying the DBInstanceStatus for both DBInstances"); final RdsDbinstance inst = InstanceEntity.selectDBInstance(sess, sourceDBInstanceId, ac.getId()); inst.setDbinstanceStatus(RDSUtilities.STATUS_AVAILABLE); final RdsDbinstance inst2 = InstanceEntity.selectDBInstance(sess, dbInstanceId, ac.getId()); inst2.setDbinstanceStatus(RDSUtilities.STATUS_AVAILABLE); sess.save(inst); sess.save(inst2); sess.getTransaction().commit(); sess.close(); logger.debug("CreateDBInstanceReadReplica completed successfully."); }
From source file:gobblin.data.management.conversion.hive.validation.ValidationJob.java
/*** * Execute Hive queries using {@link HiveJdbcConnector} and validate results. * @param queries Queries to execute.//from w w w .ja v a2s . c o m */ @SuppressWarnings("unused") private List<Long> getValidationOutputFromHiveJdbc(List<String> queries) throws IOException { if (null == queries || queries.size() == 0) { log.warn("No queries specified to be executed"); return Collections.emptyList(); } Statement statement = null; List<Long> rowCounts = Lists.newArrayList(); Closer closer = Closer.create(); try { HiveJdbcConnector hiveJdbcConnector = HiveJdbcConnector.newConnectorWithProps(props); statement = hiveJdbcConnector.getConnection().createStatement(); for (String query : queries) { log.info("Executing query: " + query); boolean result = statement.execute(query); if (result) { ResultSet resultSet = statement.getResultSet(); if (resultSet.next()) { rowCounts.add(resultSet.getLong(1)); } } else { log.warn("Query output for: " + query + " : " + result); } } } catch (SQLException e) { throw new RuntimeException(e); } finally { try { closer.close(); } catch (Exception e) { log.warn("Could not close HiveJdbcConnector", e); } if (null != statement) { try { statement.close(); } catch (SQLException e) { log.warn("Could not close Hive statement", e); } } } return rowCounts; }
From source file:com.udps.hive.jdbc.HiveConnection.java
public void setSchema(String schema) throws SQLException { // JDK 1.7/*from w w w . j a v a 2s .c om*/ if (isClosed) { throw new SQLException("Connection is closed"); } if (schema == null || schema.isEmpty()) { throw new SQLException("Schema name is null or empty"); } Statement stmt = createStatement(); stmt.execute("use " + schema); stmt.close(); }
From source file:it.doqui.index.ecmengine.business.personalization.multirepository.bootstrap.SchemaBootstrap.java
/** * Execute the given SQL statement, absorbing exceptions that we expect during * schema creation or upgrade.//from www . j a v a 2 s . co m */ private void executeStatement(Connection connection, String sql, boolean optional, int line, File file) throws Exception { Statement stmt = connection.createStatement(); try { if (logger.isDebugEnabled()) { LogUtil.debug(logger, "[SchemaBootstrap::executeStatement] Repository '" + RepositoryManager.getCurrentRepository() + "' -- " + MSG_EXECUTING_STATEMENT, sql); } stmt.execute(sql); // Write the statement to the file, if necessary StringBuilder executedStatements = executedStatementsThreadLocal.get(); if (executedStatements != null) { executedStatements.append(sql).append(";\n"); } } catch (SQLException e) { if (optional) { // it was marked as optional, so we just ignore it LogUtil.debug(logger, "[SchemaBootstrap::executeStatement] Repository '" + RepositoryManager.getCurrentRepository() + "' -- " + MSG_OPTIONAL_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line); } else { LogUtil.error(logger, "[SchemaBootstrap::executeStatement] Repository '" + RepositoryManager.getCurrentRepository() + "' -- " + ERR_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line); throw e; } } finally { try { stmt.close(); } catch (Throwable e) { } } }
From source file:com.mysql.stresstool.RunnableQueryInsertPartRange.java
public boolean executeSQL(String command, Connection conn) throws Exception { boolean done; try {//from ww w.j a v a 2 s .c om Statement stmt = conn.createStatement(); done = stmt.execute(command); } catch (SQLException sqle) { if ((sqle.getErrorCode() == 1205 || sqle.getErrorCode() == 1213) && lockRetry < 4) { lockRetry++; System.out.println("Lock Found for thread = " + Thread.currentThread().getId() + " repeat N: " + lockRetry + " OF 3"); done = executeSQL(command, conn); try { Thread.sleep(1000); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } } else { throw new Exception(sqle); } } catch (Exception ex) { throw new Exception(ex); } return done; }
From source file:com.openddal.test.BaseTestCase.java
/** * Check that executing the specified query results in the specified error. * * @param expectedErrorCode the expected error code * @param stat the statement//from www.java2 s .c o m * @param sql the SQL statement to execute */ protected void assertThrows(int expectedErrorCode, Statement stat, String sql) { try { stat.execute(sql); fail("Expected error: " + expectedErrorCode); } catch (SQLException ex) { assertEquals(expectedErrorCode, ex.getErrorCode()); } }
From source file:com.mysql.stresstool.RunnableClusterQueryInsert.java
public void run() { BufferedReader d = null;//from w w w.j a v a 2 s.c o m Connection conn = null; try { if (jdbcUrlMap.get("dbType") != null && !((String) jdbcUrlMap.get("dbType")).equals("MySQL")) { conn = DriverManager.getConnection((String) jdbcUrlMap.get("dbType"), "test", "test"); } else conn = DriverManager.getConnection((String) jdbcUrlMap.get("jdbcUrl")); } catch (SQLException ex) { ex.printStackTrace(); } if (conn != null) { try { Statement stmt = null; // ResultSet rs = null; // ResultSet rs2 = null; conn.setAutoCommit(false); stmt = conn.createStatement(); stmt.execute("SET AUTOCOMMIT=0"); long execTime = 0; int pkStart = 0; int pkEnds = 0; int intDeleteInterval = 0; int intBlobInterval = 0; int intBlobIntervalLimit = StressTool.getNumberFromRandom(4).intValue(); ThreadInfo thInfo; long threadTimeStart = System.currentTimeMillis(); active = true; thInfo = new ThreadInfo(); thInfo.setId(this.ID); thInfo.setType("insert"); thInfo.setStatusActive(this.isActive()); StressTool.setInfo(this.ID, thInfo); boolean lazy = false; int lazyInterval = 0; for (int repeat = 0; repeat <= repeatNumber; repeat++) { String query = null; ArrayList insert1 = null; ArrayList insert2 = null; int pk = 0; if (repeat > 0 && lazyInterval < 500) { lazy = true; ++lazyInterval; } else { lazy = false; lazyInterval = 0; } intBlobInterval++; //IMPLEMENTING lazy Vector v = this.getTablesValues(lazy); insert1 = (ArrayList<String>) v.get(0); insert2 = (ArrayList<String>) v.get(1); // System.out.println(insert1); // System.out.println(insert2); // pk = ((Integer) v.get(2)).intValue(); int[] iLine = { 0, 0 }; // pkStart = StressTool.getNumberFromRandom(2147483647).intValue(); // pkEnds = StressTool.getNumberFromRandom(2147483647).intValue(); try { long timeStart = System.currentTimeMillis(); if (this.ignoreBinlog) stmt.execute("SET sql_log_bin=0"); // stmt.execute("SET GLOBAL max_allowed_packet=1073741824"); if (dbType.equals("MySQL") && !engine.toUpperCase().equals("BRIGHTHOUSE")) stmt.execute("BEGIN"); else stmt.execute("COMMIT"); // stmt.execute("SET TRANSACTION NAME 'TEST'"); { Iterator<String> it = insert1.iterator(); while (it.hasNext()) { stmt.addBatch(it.next()); } } if (!this.doSimplePk) { if (intBlobInterval > intBlobIntervalLimit) { Iterator<String> it = insert2.iterator(); while (it.hasNext()) { stmt.addBatch(it.next()); } intBlobInterval = 0; } } iLine = stmt.executeBatch(); stmt.clearBatch(); // System.out.println("Query1 = " + insert1); // System.out.println("Query2 = " + insert2); // stmt.execute("START TRANSACTION"); // stmt.execute(insert1); // iLine = stmt.executeBatch(); // conn.commit(); long timeEnds = System.currentTimeMillis(); execTime = (timeEnds - timeStart); } catch (Exception sqle) { conn.rollback(); System.out.println("FAILED QUERY1==" + insert1); System.out.println("FAILED QUERY2==" + insert2); sqle.printStackTrace(); System.exit(1); //conn.close(); //this.setJdbcUrl(jdbcUrl); //System.out.println("Query Insert TH RE-INIZIALIZING"); } finally { // conn.commit(); stmt.execute("COMMIT"); // intDeleteInterval++; if (doLog) { System.out.println("Query Insert TH = " + this.getID() + " Loop N = " + repeat + " " + iLine[0] + "|" + ((iLine.length > 1) ? iLine[1] : 0) + " Exec Time(ms) =" + execTime + " Running = " + repeat + " of " + repeatNumber + " to go =" + (repeatNumber - repeat) + " Using Lazy=" + lazy); } } thInfo.setExecutedLoops(repeat); if (sleepFor > 0 || this.getSleepWrite() > 0) { if (this.getSleepWrite() > 0) { Thread.sleep(getSleepWrite()); } else Thread.sleep(sleepFor); } } long threadTimeEnd = System.currentTimeMillis(); this.executionTime = (threadTimeEnd - threadTimeStart); // this.setExecutionTime(executionTime); active = false; // System.out.println("Query Insert TH = " + this.getID() + " COMPLETED! TOTAL TIME = " + execTime + "(ms) Sec =" + (execTime/1000)); thInfo.setExecutionTime(executionTime); thInfo.setStatusActive(false); StressTool.setInfo(this.ID, thInfo); return; } catch (Exception ex) { ex.printStackTrace(); try { conn.close(); } catch (SQLException e) { // TODO Auto-generated catch block e.printStackTrace(); } } } }
From source file:com.alibaba.otter.node.etl.load.loader.db.DbLoadAction.java
/** * ddl??: /* w w w.j a v a 2 s . c om*/ * * @param context * @param eventDatas */ private void doDdl(DbLoadContext context, List<EventData> eventDatas) { for (final EventData data : eventDatas) { DataMedia dataMedia = ConfigHelper.findDataMedia(context.getPipeline(), data.getTableId()); final DbDialect dbDialect = dbDialectFactory.getDbDialect(context.getIdentity().getPipelineId(), (DbMediaSource) dataMedia.getSource()); Boolean skipDdlException = context.getPipeline().getParameters().getSkipDdlException(); try { Boolean result = dbDialect.getJdbcTemplate().execute(new StatementCallback<Boolean>() { public Boolean doInStatement(Statement stmt) throws SQLException, DataAccessException { Boolean result = false; if (dbDialect instanceof MysqlDialect && StringUtils.isNotEmpty(data.getDdlSchemaName())) { // mysqlddl??schema result &= stmt.execute("use " + data.getDdlSchemaName()); } result &= stmt.execute(data.getSql()); return result; } }); if (result) { context.getProcessedDatas().add(data); // ??sql } else { context.getFailedDatas().add(data); } } catch (Throwable e) { if (skipDdlException) { // do skip logger.warn("skip exception for ddl : {} , caused by {}", data, ExceptionUtils.getFullStackTrace(e)); } else { throw new LoadException(e); } } } }