Example usage for java.sql Statement execute

List of usage examples for java.sql Statement execute

Introduction

In this page you can find the example usage for java.sql Statement execute.

Prototype

boolean execute(String sql) throws SQLException;

Source Link

Document

Executes the given SQL statement, which may return multiple results.

Usage

From source file:de.erdesignerng.test.sql.AbstractReverseEngineeringTestImpl.java

protected void loadSQL(Connection aConnection, String aResource) throws IOException, SQLException {
    BufferedReader theReader = new BufferedReader(
            new InputStreamReader(getClass().getResourceAsStream(aResource)));
    Statement theStatement = aConnection.createStatement();
    while (theReader.ready()) {
        String theLine = theReader.readLine();
        if (StringUtils.isNotEmpty(theLine)) {
            theLine = theLine.trim();//  ww w  .j a  v a2s .  c  o m
        }
        if (StringUtils.isNotEmpty(theLine)) {
            System.out.println(theLine);
            theStatement.execute(theLine);
        }
    }
    theStatement.close();

    theReader.close();
}

From source file:com.linkedin.pinot.integration.tests.HybridClusterIntegrationTest.java

@BeforeClass
public void setUp() throws Exception {
    //Clean up// w w  w  . ja  v  a 2  s.c  o m
    ensureDirectoryExistsAndIsEmpty(_tmpDir);
    ensureDirectoryExistsAndIsEmpty(_segmentDir);
    ensureDirectoryExistsAndIsEmpty(_tarDir);

    // Start Zk, Kafka and Pinot
    startHybridCluster();

    // Unpack the Avro files
    TarGzCompressionUtils.unTar(new File(TestUtils.getFileFromResourceUrl(OfflineClusterIntegrationTest.class
            .getClassLoader().getResource("On_Time_On_Time_Performance_2014_100k_subset_nonulls.tar.gz"))),
            _tmpDir);

    _tmpDir.mkdirs();

    final List<File> avroFiles = getAllAvroFiles();

    File schemaFile = getSchemaFile();
    schema = Schema.fromFile(schemaFile);
    addSchema(schemaFile, schema.getSchemaName());
    final List<String> invertedIndexColumns = makeInvertedIndexColumns();
    final String sortedColumn = makeSortedColumn();

    // Create Pinot table
    addHybridTable("mytable", "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC,
            schema.getSchemaName(), TENANT_NAME, TENANT_NAME, avroFiles.get(0), sortedColumn,
            invertedIndexColumns, null);
    LOGGER.info("Running with Sorted column=" + sortedColumn + " and inverted index columns = "
            + invertedIndexColumns);

    // Create a subset of the first 8 segments (for offline) and the last 6 segments (for realtime)
    final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles);
    final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles);

    // Load data into H2
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);

    // Create segments from Avro data
    LOGGER.info("Creating offline segments from avro files " + offlineAvroFiles);
    buildSegmentsFromAvro(offlineAvroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null);

    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);

    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);

    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
    final CountDownLatch latch = new CountDownLatch(1);
    HelixManager manager = HelixManagerFactory.getZKHelixManager(getHelixClusterName(), "test_instance",
            InstanceType.SPECTATOR, ZkStarter.DEFAULT_ZK_STR);
    manager.connect();
    manager.addExternalViewChangeListener(new ExternalViewChangeListener() {
        @Override
        public void onExternalViewChange(List<ExternalView> externalViewList,
                NotificationContext changeContext) {
            for (ExternalView externalView : externalViewList) {
                if (externalView.getId().contains("mytable")) {

                    Set<String> partitionSet = externalView.getPartitionSet();
                    if (partitionSet.size() == offlineSegmentCount) {
                        int onlinePartitionCount = 0;

                        for (String partitionId : partitionSet) {
                            Map<String, String> partitionStateMap = externalView.getStateMap(partitionId);
                            if (partitionStateMap.containsValue("ONLINE")) {
                                onlinePartitionCount++;
                            }
                        }

                        if (onlinePartitionCount == offlineSegmentCount) {
                            System.out.println("Got " + offlineSegmentCount
                                    + " online tables, unlatching the main thread");
                            latch.countDown();
                        }
                    }
                }
            }
        }
    });

    // Upload the segments
    int i = 0;
    for (String segmentName : _tarDir.list()) {
        System.out.println("Uploading segment " + (i++) + " : " + segmentName);
        File file = new File(_tarDir, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file),
                file.length());
    }

    // Wait for all offline segments to be online
    latch.await();

    // Load realtime data into Kafka
    LOGGER.info("Pushing data from realtime avro files " + realtimeAvroFiles);
    pushAvroIntoKafka(realtimeAvroFiles, KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KAFKA_TOPIC);

    // Wait until the Pinot event count matches with the number of events in the Avro files
    int pinotRecordCount, h2RecordCount;
    long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L;

    Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
    statement.execute("select count(*) from mytable");
    ResultSet rs = statement.getResultSet();
    rs.first();
    h2RecordCount = rs.getInt(1);
    rs.close();

    waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes);
}

From source file:com.nextep.datadesigner.sqlgen.impl.DBErrorsMarkerProvider.java

private synchronized void recompileAndLoadMarkers() {
    if (!isValidated()) {
        final ICoreFactory coreFactory = CorePlugin.getService(ICoreFactory.class);

        // Resetting markers
        markersMap = new MultiValueMap();

        // Fetching for all connections
        final ITargetSet targetSet = VCSPlugin.getViewService().getCurrentViewTargets();
        if (targetSet != null) {
            final ICaptureService captureService = SQLGenPlugin.getService(ICaptureService.class);
            final IConnectionService connectionService = SQLGenPlugin.getService(IConnectionService.class);

            final Collection<IConnection> connections = targetSet.getTarget(SQLGenUtil.getDefaultTargetType());

            for (IConnection dbConn : connections) {
                Connection jdbcConn = null;
                Statement stmt = null;
                try {
                    jdbcConn = connectionService.connect(dbConn);
                    stmt = jdbcConn.createStatement();

                    // Recompiling first
                    stmt.execute("BEGIN DBMS_UTILITY.COMPILE_SCHEMA(USER, FALSE); END;"); //$NON-NLS-1$
                    // Fetching errors

                } catch (SQLException e) {
                    markersMap.put(dbConn, coreFactory.createMarker(dbConn, MarkerType.ERROR,
                            "Connection failed: " + e.getMessage()));
                    // Setting up a timeout before retrying
                    validationTimeout = System.currentTimeMillis() + 120000;
                } finally {
                    CaptureHelper.safeClose(null, stmt);
                    if (jdbcConn != null) {
                        try {
                            jdbcConn.close();
                        } catch (SQLException e) {
                            LOGGER.error("Unable to close connection", e);
                        }/*from  w w  w  .  j  a v  a 2  s  . co m*/
                    }
                }

                final Collection<ErrorInfo> errors = captureService.getErrorsFromDatabase(dbConn,
                        new NullProgressMonitor());

                // Hashing current view contents by name
                Map<String, ITypedObject> objMap = hashCurrentViewByName();
                for (ErrorInfo i : errors) {
                    final ITypedObject o = objMap.get(i.getObjectName());
                    IMarker m = coreFactory.createMarker(o, "ERROR".equals(i.getAttribute()) ? MarkerType.ERROR //$NON-NLS-1$
                            : MarkerType.WARNING, i.getErrorMessage());
                    m.setAttribute(IMarker.ATTR_LINE, i.getLine());
                    m.setAttribute(IMarker.ATTR_COL, i.getCol());

                    // Specific quick'n dirty fix for Triggers
                    if (o != null && o.getType() == IElementType.getInstance(ITrigger.TYPE_ID)) {
                        final ITrigger trg = (ITrigger) o;
                        if (trg.isCustom()) {
                            Pattern pat = Pattern.compile("(DECLARE|BEGIN)"); //$NON-NLS-1$
                            Matcher mat = pat.matcher(trg.getSql().toUpperCase());
                            if (mat.find()) {
                                int index = mat.start();
                                pat = Pattern.compile("\n"); //$NON-NLS-1$
                                mat = pat.matcher(trg.getSql().substring(0, index));
                                int lineShift = 0;
                                while (mat.find()) {
                                    lineShift++;
                                }
                                m.setAttribute(IMarker.ATTR_LINE, i.getLine() + lineShift);
                            }
                        }
                    }
                    m.setAttribute(IMarker.ATTR_EXTERNAL_TYPE, i.getObjectTypeName());
                    m.setAttribute(IMarker.ATTR_CONTEXT, dbConn);
                    if (o instanceof IReferenceable) {
                        markersMap.put(((IReferenceable) o).getReference(), m);
                    } else {
                        markersMap.put(o, m);
                    }
                }
            }
            validated = true;
        }
    }
}

From source file:com.clican.pluto.orm.dynamic.impl.DataBaseOperationImpl.java

private void executeSql(Connection conn, String sql) {
    Statement stat = null;
    try {/*w  w w.j a  v  a 2s  . c om*/
        stat = conn.createStatement();
        stat.execute(sql);
    } catch (Exception e) {
        throw new DDLException(sql, e);
    } finally {
        if (stat != null) {
            try {
                stat.close();
            } catch (Exception e) {
                log.error("", e);
            }
        }

    }
}

From source file:com.jspxcms.core.setup.SetupServlet.java

private void createDatabase(String host, String port, String name, String user, String password)
        throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException {
    Class.forName("com.mysql.jdbc.Driver").newInstance();
    String connectionUrl = "jdbc:mysql://" + host;
    if (StringUtils.isNotBlank(port)) {
        connectionUrl += ":" + port;
    }/*w w w.j  a  va 2s  .co  m*/
    connectionUrl += "?user=" + user + "&password=" + password + "&characterEncoding=utf8";
    System.out.println(connectionUrl);
    Connection connection = DriverManager.getConnection(connectionUrl);
    Statement statement = connection.createStatement();
    String dropDatebaseSql = "drop database if exists " + name;
    String createDatabaseSql = "create database " + name + " character set utf8";
    System.out.println(dropDatebaseSql);
    statement.execute(dropDatebaseSql);
    System.out.println(createDatabaseSql);
    statement.execute(createDatabaseSql);
    statement.close();
    connection.close();
}

From source file:com.cyclopsgroup.tornado.hibernate.taglib.ExecuteSqlsTagBase.java

/**
 * Override method processTag in class CreateTablesTag
 *
 * @see com.cyclopsgroup.waterview.utils.TagSupportBase#processTag(org.apache.commons.jelly.XMLOutput)
 *//*from   w  ww.j  av a2  s.c  o m*/
protected void processTag(XMLOutput output) throws Exception {
    HibernateTag hibernate = (HibernateTag) requireInside(HibernateTag.class);
    String[] sqls = getSqls(hibernate);
    Connection dbcon = hibernate.getConnection();
    Statement s = dbcon.createStatement();
    if (isBatched()) {
        for (int i = 0; i < sqls.length; i++) {
            s.addBatch(sqls[i]);
        }
        try {
            s.executeBatch();
        } catch (Exception e) {
            logger.debug("Dropping table error", e);
        }
    } else {
        for (int i = 0; i < sqls.length; i++) {
            try {
                s.execute(sqls[i]);
            } catch (Exception e) {
                logger.debug("Dropping table error", e);
            }
        }
    }
    s.close();
}

From source file:com.uber.hoodie.hive.client.HoodieHiveClient.java

/**
 * Execute a update in hive metastore with this SQL
 *
 * @param s SQL to execute//from ww  w  . j a v a  2 s  .c  o  m
 * @return
 */
public boolean updateHiveSQL(String s) {
    Statement stmt = null;
    try {
        Connection conn = getConnection();
        stmt = conn.createStatement();
        LOG.info("Executing SQL " + s);
        return stmt.execute(s);
    } catch (SQLException e) {
        throw new HoodieHiveDatasetException("Failed in executing SQL " + s, e);
    } finally {
        closeQuietly(null, stmt);
    }
}

From source file:com.yahoo.ycsb.db.JdbcDBClient.java

@Override
public void init() throws DBException {
    if (initialized) {
        System.err.println("Client connection already initialized.");
        return;//from w w  w .  ja  v a2s . c o  m
    }
    props = getProperties();
    String urls = props.getProperty(CONNECTION_URL, DEFAULT_PROP);
    String user = props.getProperty(CONNECTION_USER, DEFAULT_PROP);
    String passwd = props.getProperty(CONNECTION_PASSWD, DEFAULT_PROP);
    String driver = props.getProperty(DRIVER_CLASS);

    String jdbcFetchSizeStr = props.getProperty(JDBC_FETCH_SIZE);
    if (jdbcFetchSizeStr != null) {
        try {
            this.jdbcFetchSize = Integer.parseInt(jdbcFetchSizeStr);
        } catch (NumberFormatException nfe) {
            System.err.println("Invalid JDBC fetch size specified: " + jdbcFetchSizeStr);
            throw new DBException(nfe);
        }
    }

    String autoCommitStr = props.getProperty(JDBC_AUTO_COMMIT, Boolean.TRUE.toString());
    Boolean autoCommit = Boolean.parseBoolean(autoCommitStr);

    String isCitusStr = props.getProperty(CITUS_ENABLED, Boolean.FALSE.toString());
    isCitus = Boolean.parseBoolean(isCitusStr);

    try {
        if (driver != null) {
            Class.forName(driver);
        }
        int shardCount = 0;
        conns = new ArrayList<Connection>(3);
        for (String url : urls.split(",")) {
            System.out.println("Adding shard node URL: " + url);
            Connection conn = DriverManager.getConnection(url, user, passwd);

            // Since there is no explicit commit method in the DB interface, all
            // operations should auto commit, except when explicitly told not to
            // (this is necessary in cases such as for PostgreSQL when running a
            // scan workload with fetchSize)
            conn.setAutoCommit(autoCommit);

            if (isCitus) {
                Statement stmt = conn.createStatement();
                stmt.execute("SET citusdb.task_executor_type TO 'router'");
            }

            shardCount++;
            conns.add(conn);
        }

        System.out.println("Using " + shardCount + " shards");

    } catch (ClassNotFoundException e) {
        System.err.println("Error in initializing the JDBS driver: " + e);
        throw new DBException(e);
    } catch (SQLException e) {
        System.err.println("Error in database operation: " + e);
        throw new DBException(e);
    } catch (NumberFormatException e) {
        System.err.println("Invalid value for fieldcount property. " + e);
        throw new DBException(e);
    }
    initialized = true;
}

From source file:tools.datasync.db2db.dao.GenericJDBCDao.java

public void saveOrUpdate(String entityName, JSON json, String keyColumn) {

    logger.finest("saveOrUpdate() - entityName=" + entityName + ", json=" + json + ", keyColumn=" + keyColumn);
    Connection connection = null;
    Statement statement = null;
    try {/* w  w  w  . ja v  a 2  s .c  o  m*/
        // Try insert statement first...
        String insert = SQLGenUtil.getInsertStatement(entityName, json);
        connection = dataSource.getConnection();
        statement = connection.createStatement();
        logger.finest("saveOrUpdate() - " + insert);
        statement.execute(insert);
    } catch (SQLException ex) {
        // May be primary key violation, try update statement...
        if (SQLGenUtil.isConstraintViolation(ex)) {
            try {
                String update = SQLGenUtil.getUpdateStatement(entityName, json, keyColumn);
                logger.finest("saveOrUpdate() - " + update);
                statement.execute(update);
            } catch (SQLException e) {
                exceptionHandler.handle(e, Level.WARNING, "Failed to update record", json);
            }
        } else {
            exceptionHandler.handle(ex, Level.SEVERE, "Failed to insert record", json);
        }
    } finally {
        if (statement != null) {
            try {
                logger.finest("saveOrUpdate() - commiting changes.");
                connection.commit();
                statement.close();
                connection.close();
            } catch (SQLException e) {
                exceptionHandler.handle(e, Level.WARNING, "Failed to close connection.");
            }
        }
    }
}

From source file:de.xwic.sandbox.server.installer.impl.SQLServerDatabaseHandler.java

public void executeSqlScript(File file) throws IOException, SQLException {
    BufferedReader reader = null;
    try {/*from  w w w  .  ja v a 2 s.co m*/
        try {
            FileReader fileReader = new FileReader(file);
            reader = new BufferedReader(fileReader);
        } catch (FileNotFoundException e) {
            log.debug("File not found: " + file.getName() + ". Continuing with next script file.");
            return;
        }

        String line = "";

        log.info("Found SQL script file: " + file.getName());
        log.info("Begin script execution.");
        while (null != (line = reader.readLine())) {
            StringBuffer buffer = new StringBuffer();

            // get the SQL query from the file, until "GO" is reached
            while ((null != line) && (!"go".equalsIgnoreCase(line))) {
                buffer.append(line).append('\n');
                line = reader.readLine();
            }

            // execute the SQL query
            Statement stmt = connection.createStatement();
            try {
                stmt.execute(buffer.toString());
            } catch (SQLException e) {
                log.error("Error executing the script snippet: \n" + buffer, e);
                throw e;
            } finally {
                stmt.close();
            }
        }
    } finally {
        StreamUtil.close(log, reader);
    }

    log.info("The script file '" + file.getName() + "' completed succesfully.");
}