Example usage for java.sql Statement execute

List of usage examples for java.sql Statement execute

Introduction

In this page you can find the example usage for java.sql Statement execute.

Prototype

boolean execute(String sql) throws SQLException;

Source Link

Document

Executes the given SQL statement, which may return multiple results.

Usage

From source file:it.geosolutions.geobatch.imagemosaic.GranuleRemoverOnlineTest.java

protected void removeStore() throws MalformedURLException, ClassNotFoundException, SQLException {
    // remove existing store from GeoServer
    GeoServerRESTReader gsReader = createGSReader();
    GeoServerRESTPublisher publisher = createGSPublisher();

    if (null != gsReader.getCoverageStore(WORKSPACE, STORENAME)) {
        LOGGER.info("Removing existing store");
        publisher.removeCoverageStore(WORKSPACE, STORENAME, true);
    }//from   w w w .  j  a  va 2  s.  c o  m

    // remove table from PG
    Connection connection = createPGConnection();
    try {
        LOGGER.info("Checking if PG table '" + STORENAME + "' exists");
        Statement st = connection.createStatement();
        st.execute("SELECT count(*) FROM " + getFixture().getProperty("pg_schema") + ".\"" + STORENAME + "\"");
        st.close();

        // previous select did not throw, so the table does exist.
        try {
            LOGGER.info("Removing PG table '" + STORENAME + "'");

            st = connection.createStatement();
            st.executeUpdate("DROP TABLE " + getFixture().getProperty("pg_schema") + ".\"" + STORENAME + "\"");
            st.close();
        } catch (SQLException ex) {
            LOGGER.warn("Error while dropping table");
        }
    } catch (Exception e) {
        LOGGER.info("The store " + STORENAME + " probably does not exist: " + e.getMessage());
    } finally {
        connection.close();
    }
}

From source file:org.sakaiproject.sitestats.impl.DBHelper.java

private void renameIndex(Connection c, String oldIndex, String newIndex, String field, String table)
        throws SQLException {
    String sql = null;//w ww.  jav  a2 s.co m
    notifyIndexesUpdate();
    if (dbVendor.equals("mysql"))
        sql = "ALTER TABLE " + table + " DROP INDEX " + oldIndex + ", ADD INDEX " + newIndex + " USING BTREE("
                + field + ")";
    else if (dbVendor.equals("oracle"))
        sql = "ALTER INDEX " + oldIndex + " RENAME TO " + newIndex;
    Statement st = null;
    try {
        st = c.createStatement();
        st.execute(sql);
    } catch (SQLException e) {
        LOG.warn("Failed to execute sql: " + sql, e);
    } finally {
        if (st != null)
            st.close();
    }
}

From source file:com.splout.db.engine.MySQLOutputFormat.java

@Override
public void close() throws IOException, InterruptedException {
    try {/*from w w w  .  j a va  2 s.co  m*/
        for (Map.Entry<Integer, Connection> entry : connCache.entrySet()) {
            LOG.info("Closing SQL connection [" + entry.getKey() + "]");
            //
            Connection conn = entry.getValue();
            Statement st = conn.createStatement();
            st.execute("COMMIT");
            if (getPostSQL() != null) {
                LOG.info("Executing end SQL statements.");
                for (String sql : getPostSQL()) {
                    LOG.info("Executing: " + sql);
                    st.execute(sql);
                }
            }
            st.close();
            conn.close();
            // close MySQL before copying files (so mysql.sock disappears!)
            EmbeddedMySQL msql = mySQLs.get(entry.getKey());

            msql.stop();
            File resident = msql.getConfig().getResidentFolder();
            File zipDest = new File(resident.getParentFile(), entry.getKey() + ".db");

            // Create a "partition.db" zip with the needed files.
            CompressorUtil.createZip(resident, zipDest,
                    new WildcardFileFilter(
                            new String[] { "ib*", "*.frm", "*.MYD", "*.MYI", "db.opt", "*.ibd" }),
                    FileFilterUtils.or(FileFilterUtils.nameFileFilter("data"),
                            FileFilterUtils.nameFileFilter("splout")));
            // Delete all files except the generated zip "partition.db"
            FileUtils.deleteDirectory(new File(resident, "bin"));
            FileUtils.deleteDirectory(new File(resident, "data"));
            FileUtils.deleteDirectory(new File(resident, "share"));
        }
    } catch (Exception e) {
        throw new IOException(e);
    } finally { // in any case, destroy the HeartBeater
        for (Map.Entry<Integer, EmbeddedMySQL> entry : mySQLs.entrySet()) {
            entry.getValue().stop();
        }
    }
}

From source file:com.cloudera.sqoop.manager.SQLServerManagerImportManualTest.java

@Before
public void setUp() {
    super.setUp();

    SqoopOptions options = new SqoopOptions(CONNECT_STRING, TABLE_NAME);
    options.setUsername(DATABASE_USER);/*ww  w . j a v  a2s . co m*/
    options.setPassword(DATABASE_PASSWORD);

    manager = new SQLServerManager(options);

    // Drop the existing table, if there is one.
    Connection conn = null;
    Statement stmt = null;
    try {
        conn = manager.getConnection();
        stmt = conn.createStatement();
        stmt.execute("DROP TABLE " + TABLE_NAME);
    } catch (SQLException sqlE) {
        LOG.info("Table was not dropped: " + sqlE.getMessage());
    } finally {
        try {
            if (null != stmt) {
                stmt.close();
            }
        } catch (Exception ex) {
            LOG.warn("Exception while closing stmt", ex);
        }
    }

    // Create and populate table
    try {
        conn = manager.getConnection();
        conn.setAutoCommit(false);
        stmt = conn.createStatement();

        // create the database table and populate it with data.
        stmt.executeUpdate(
                "CREATE TABLE " + TABLE_NAME + " (" + "id INT NOT NULL, " + "name VARCHAR(24) NOT NULL, "
                        + "salary FLOAT, " + "dept VARCHAR(32), " + "PRIMARY KEY (id))");

        stmt.executeUpdate(
                "INSERT INTO " + TABLE_NAME + " VALUES(" + "1,'Aaron', " + "1000000.00,'engineering')");
        stmt.executeUpdate("INSERT INTO " + TABLE_NAME + " VALUES(" + "2,'Bob', " + "400.00,'sales')");
        stmt.executeUpdate("INSERT INTO " + TABLE_NAME + " VALUES(" + "3,'Fred', 15.00," + "'marketing')");
        conn.commit();
    } catch (SQLException sqlE) {
        LOG.error("Encountered SQL Exception: ", sqlE);
        sqlE.printStackTrace();
        fail("SQLException when running test setUp(): " + sqlE);
    } finally {
        try {
            if (null != stmt) {
                stmt.close();
            }
        } catch (Exception ex) {
            LOG.warn("Exception while closing connection/stmt", ex);
        }
    }
}

From source file:com.migratebird.database.impl.DefaultSQLHandler.java

@Override
public void execute(String sql, DataSource dataSource) {
    logger.debug(sql);//from ww  w.j  a va 2 s . c o m

    if (!doExecuteUpdates) {
        // skip update
        return;
    }
    Statement statement = null;
    try {
        statement = getConnection(dataSource).createStatement();
        statement.execute(sql);

    } catch (Exception e) {
        throw new DatabaseException("Could not perform database statement: " + sql, e);
    } finally {
        closeQuietly(statement);
    }
}

From source file:com.cloudera.recordbreaker.analyzer.DataQuery.java

/**
 * Run a sample set of Hive test queries to check whether the Hive server is up and active
 *///from w ww  . j  a va 2 s  . c o m
public boolean testQueryServer() {
    if (hiveCon == null) {
        return false;
    }
    try {
        //
        // Create table
        //
        String tablename = "test_datatable" + Math.abs(r.nextInt());
        Statement stmt = hiveCon.createStatement();
        try {
            stmt.execute("CREATE TABLE " + tablename + "(a int, b int, c int)");
        } finally {
            stmt.close();
        }

        //
        // Drop table
        //
        stmt = hiveCon.createStatement();
        try {
            stmt.execute("DROP TABLE " + tablename);
        } finally {
            stmt.close();
        }
        return true;
    } catch (Exception ex) {
        ex.printStackTrace();
        return false;
    }
}

From source file:com.fileanalyzer.dao.impl.FileStatisticDAOImpl.java

@Override
public void delete(FileStatistic fStat) {
    Connection con = null;//from  w  w w  .  j  a v a2 s.c o m
    Statement statement = null;
    try {
        con = DBConnector.getConnection();
        con.setAutoCommit(false);
        statement = con.createStatement();
        statement.execute("delete from " + FileStatistic.FileStatisticKey.TABLE + " where id=" + fStat.getId());
        con.commit();
    } catch (SQLException e) {
        if (con != null) {
            try {
                log.error("Transaction is being rolled back", e);
                con.rollback();
            } catch (SQLException excep) {
                log.error(excep);
            }
        }
    } finally {
        if (statement != null) {
            try {
                statement.close();
            } catch (SQLException ex) {
                log.error(ex);
            }
        }
        try {
            con.setAutoCommit(true);
        } catch (SQLException ex) {
            log.error("setAutoCommit(true)", ex);
        }
        if (con != null) {
            try {
                con.close();
            } catch (SQLException ex) {
                log.error(ex);
            }
        }
    }
}

From source file:com.adito.jdbc.hsqldb.EmbeddedHSQLDBServer.java

/**
 * Stop the Database engine.// ww  w.j  a va 2  s. com
 */
public void stop() {

    if (server != null) {

        /*
         * TODO A nasty hack. HSQLDB cannot have new databases added to it
         * while its running in TCP/IP server mode. So we have to restart
         * the server. Unfortunately, the client side of the connection does
         * not register that this has happened so is considered re-useable
         * by the pool. This results in a 'Connection is closed' error when
         * then next statement executes.
         */
        JDBCConnectionImpl.JDBCPool.getInstance().closeAll();

        // Get a JDBC connection
        for (Iterator i = databases.iterator(); i.hasNext();) {
            String n = (String) i.next();
            Connection con = null;
            try {
                if (log.isInfoEnabled())
                    log.info("Compacting database " + n);
                con = DriverManager.getConnection(
                        EmbeddedHSQLDBServer.this.serverMode ? "jdbc:hsqldb:hsql://localhost/" + n
                                : "jdbc:hsqldb:file:" + ContextHolder.getContext().getDBDirectory().getPath()
                                        + "/" + n);
                Statement s = con.createStatement();
                s.execute("SHUTDOWN COMPACT");
                if (log.isInfoEnabled())
                    log.info("Database " + n + " compacted.");
            } catch (Exception e) {
                log.error("Failed to compact database.");
            } finally {
                if (con != null) {
                    try {
                        con.close();
                    } catch (Exception e) {
                    }
                }
            }

        }
        server.signalCloseAllServerConnections();
        server.stop();
        waitForServerToStop();
        server = null;
        testedConnection = false;
    }
    started = false;
}

From source file:com.fileanalyzer.dao.impl.FilesDAOImpl.java

@Override
public void delete(Files fileInDb) {
    Connection con = null;//  w ww  .  j  a  v  a  2  s  .  c  o  m
    Statement statement = null;
    try {
        con = DBConnector.getConnection();
        con.setAutoCommit(false);
        statement = con.createStatement();
        statement.execute("delete from " + Files.FilesFieldsKey.TABLE + " where id=" + fileInDb.getId());
        con.commit();
    } catch (SQLException e) {
        handleException(e, con);
    } finally {
        if (statement != null) {
            try {
                statement.close();
            } catch (SQLException ex) {
                log.error(ex);
            }
        }
        try {
            con.setAutoCommit(true);
        } catch (SQLException ex) {
            log.error("setAutoCommit(true)", ex);
        }
        if (con != null) {
            try {
                con.close();
            } catch (SQLException ex) {
                log.error(ex);
            }
        }
    }
}

From source file:org.openmrs.module.mysqletl.web.controller.ETLModuleManageController.java

@RequestMapping(value = "/module/mysqletl/sqoop_transform", method = RequestMethod.POST)
public @ResponseBody String sqoopTransform(@RequestParam(value = "user", required = false) String UserName,
        @RequestParam(value = "pass", required = false) String Password,
        @RequestParam(value = "host", required = false) String Host,
        @RequestParam(value = "port", required = false) String Port,
        @RequestParam(value = "servertype", required = false) String serverType,
        @RequestParam(value = "dbname", required = false) String db_name,
        @RequestParam(value = "tablename", required = false) String table_name,
        @RequestParam(value = "columnlist[]", required = false) List<String> column_list,
        @RequestParam(value = "joincndtn", required = false) String join_cndtn, ModelMap model)
        throws Exception {
    //      try{ 
    //Setting Connection to MySQL
    Class.forName("com.mysql.jdbc.Driver");
    String connectionURL = "jdbc:mysql://" + MySQLClient.toLoginParams().gethost() + ":"
            + MySQLClient.toLoginParams().getport() + "/";
    Connection con = DriverManager.getConnection(connectionURL, MySQLClient.toLoginParams().getuser(),
            MySQLClient.toLoginParams().getpass());
    //get table list
    List<String> tableListWithDuplicates = new ArrayList<String>();
    for (String column : column_list) {
        tableListWithDuplicates.add(column.substring(0, column.indexOf('.', column.indexOf('.') + 1)));
    }/*ww w . jav a2s . c  o m*/
    List<String> tableList = new ArrayList<String>(new HashSet<String>(tableListWithDuplicates));
    Statement stmt = null;
    stmt = con.createStatement();
    //Create Fresh Temporary database
    String dropFreshQuery = "drop database if exists " + db_name;
    stmt.execute(dropFreshQuery);
    String create_query = "create database if not exists " + db_name;
    stmt.execute(create_query);
    if (join_cndtn.indexOf('\n') < 0) {
        join_cndtn = join_cndtn.replace('\n', ' ');
    }
    //Create extracted data in form of table
    String query = "CREATE TABLE " + db_name + "." + table_name + " AS SELECT "
            + column_list.toString().substring(1, column_list.toString().length() - 1) + " FROM "
            + tableList.toString().substring(1, tableList.toString().length() - 1) + " " + join_cndtn;
    stmt.execute(query);
    //if MYSQL Selected it will not drop the temporary table
    if (serverType.equalsIgnoreCase(ServerType.HIVE.name().toString().trim())) {
        //Set SSH Connection Parameters
        SSHClient.SetSSHParameters(Host, UserName, Password, Port);
        //Get Own IP Address which where we are client to machine running Hive and SSH
        String grantHost = SSHClient.getIpAddress();
        //grant Privileges to client IP to connect to MYSQL DB on remote machine
        stmt.execute(MySQLClient.grantPrivileges(grantHost));
        //Sqoop Import Data
        SSHClient.sqoopImport(grantHost, MySQLClient.getport(), MySQLClient.getuser(),
                "\"" + MySQLClient.getpass() + "\"", db_name, table_name, db_name);
        //Drop Temporary created database
        String dropQuery = "drop database " + db_name;
        stmt.execute(dropQuery);
    }
    return "Success";
    //      }
    //      catch(Exception e){
    //          return "Failed";
    //       }
}