Example usage for java.sql ResultSet TYPE_SCROLL_INSENSITIVE

List of usage examples for java.sql ResultSet TYPE_SCROLL_INSENSITIVE

Introduction

In this page you can find the example usage for java.sql ResultSet TYPE_SCROLL_INSENSITIVE.

Prototype

int TYPE_SCROLL_INSENSITIVE

To view the source code for java.sql ResultSet TYPE_SCROLL_INSENSITIVE.

Click Source Link

Document

The constant indicating the type for a ResultSet object that is scrollable but generally not sensitive to changes to the data that underlies the ResultSet.

Usage

From source file:org.apache.bigtop.itest.hive.TestJdbc.java

@Test
public void preparedStmtAndResultSet() throws SQLException {
    final String tableName = "bigtop_jdbc_psars_test_table";
    try (Statement stmt = conn.createStatement()) {
        stmt.execute("drop table if exists " + tableName);
        stmt.execute("create table " + tableName + " (bo boolean, ti tinyint, db double, fl float, "
                + "i int, lo bigint, sh smallint, st varchar(32))");
    }/*w w w .  ja v  a  2 s  .c  om*/

    // NOTE Hive 1.2 theoretically support binary, Date & Timestamp in JDBC, but I get errors when I
    // try to put them in the query.
    try (PreparedStatement ps = conn
            .prepareStatement("insert into " + tableName + " values (?, ?, ?, ?, ?, ?, ?, ?)")) {
        ps.setBoolean(1, true);
        ps.setByte(2, (byte) 1);
        ps.setDouble(3, 3.141592654);
        ps.setFloat(4, 3.14f);
        ps.setInt(5, 3);
        ps.setLong(6, 10L);
        ps.setShort(7, (short) 20);
        ps.setString(8, "abc");
        ps.executeUpdate();
    }

    try (PreparedStatement ps = conn.prepareStatement("insert into " + tableName + " (i, st) " + "values(?, ?)",
            ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY)) {
        ps.setNull(1, Types.INTEGER);
        ps.setObject(2, "mary had a little lamb");
        ps.executeUpdate();
        ps.setNull(1, Types.INTEGER, null);
        ps.setString(2, "its fleece was white as snow");
        ps.clearParameters();
        ps.setNull(1, Types.INTEGER, null);
        ps.setString(2, "its fleece was white as snow");
        ps.execute();

    }

    try (Statement stmt = conn.createStatement()) {

        ResultSet rs = stmt.executeQuery("select * from " + tableName);

        ResultSetMetaData md = rs.getMetaData();

        int colCnt = md.getColumnCount();
        LOG.debug("Column count is " + colCnt);

        for (int i = 1; i <= colCnt; i++) {
            LOG.debug("Looking at column " + i);
            String strrc = md.getColumnClassName(i);
            LOG.debug("Column class name is " + strrc);

            int intrc = md.getColumnDisplaySize(i);
            LOG.debug("Column display size is " + intrc);

            strrc = md.getColumnLabel(i);
            LOG.debug("Column label is " + strrc);

            strrc = md.getColumnName(i);
            LOG.debug("Column name is " + strrc);

            intrc = md.getColumnType(i);
            LOG.debug("Column type is " + intrc);

            strrc = md.getColumnTypeName(i);
            LOG.debug("Column type name is " + strrc);

            intrc = md.getPrecision(i);
            LOG.debug("Precision is " + intrc);

            intrc = md.getScale(i);
            LOG.debug("Scale is " + intrc);

            boolean boolrc = md.isAutoIncrement(i);
            LOG.debug("Is auto increment? " + boolrc);

            boolrc = md.isCaseSensitive(i);
            LOG.debug("Is case sensitive? " + boolrc);

            boolrc = md.isCurrency(i);
            LOG.debug("Is currency? " + boolrc);

            intrc = md.getScale(i);
            LOG.debug("Scale is " + intrc);

            intrc = md.isNullable(i);
            LOG.debug("Is nullable? " + intrc);

            boolrc = md.isReadOnly(i);
            LOG.debug("Is read only? " + boolrc);

        }

        while (rs.next()) {
            LOG.debug("bo = " + rs.getBoolean(1));
            LOG.debug("bo = " + rs.getBoolean("bo"));
            LOG.debug("ti = " + rs.getByte(2));
            LOG.debug("ti = " + rs.getByte("ti"));
            LOG.debug("db = " + rs.getDouble(3));
            LOG.debug("db = " + rs.getDouble("db"));
            LOG.debug("fl = " + rs.getFloat(4));
            LOG.debug("fl = " + rs.getFloat("fl"));
            LOG.debug("i = " + rs.getInt(5));
            LOG.debug("i = " + rs.getInt("i"));
            LOG.debug("lo = " + rs.getLong(6));
            LOG.debug("lo = " + rs.getLong("lo"));
            LOG.debug("sh = " + rs.getShort(7));
            LOG.debug("sh = " + rs.getShort("sh"));
            LOG.debug("st = " + rs.getString(8));
            LOG.debug("st = " + rs.getString("st"));
            LOG.debug("tm = " + rs.getObject(8));
            LOG.debug("tm = " + rs.getObject("st"));
            LOG.debug("tm was null " + rs.wasNull());
        }
        LOG.debug("bo is column " + rs.findColumn("bo"));

        int intrc = rs.getConcurrency();
        LOG.debug("concurrency " + intrc);

        intrc = rs.getFetchDirection();
        LOG.debug("fetch direction " + intrc);

        intrc = rs.getType();
        LOG.debug("type " + intrc);

        Statement copy = rs.getStatement();

        SQLWarning warning = rs.getWarnings();
        while (warning != null) {
            LOG.debug("Found a warning: " + warning.getMessage());
            warning = warning.getNextWarning();
        }
        rs.clearWarnings();
    }
}

From source file:net.starschema.clouddb.jdbc.BQStatementRoot.java

/**
 * <p>//www .  j  a  v a  2 s. c o m
 * <h1>Implementation Details:</h1><br>
 * Updates and deletes not supported.
 * </p>
 * 
 * @return TYPE_SCROLL_INSENSITIVE
 */

public int getResultSetType() throws SQLException {
    return ResultSet.TYPE_SCROLL_INSENSITIVE;
}

From source file:fr.bird.bloom.model.GeographicTreatment.java

/**
 * Convert the iso2 code (2 letters) to iso3 code (3 letters)
 * /*from ww w  .j a v  a  2 s .  co  m*/
 * @param String iso2
 * @return String iso3
 */
public String convertIso2ToIso3(String iso2) {
    String iso3 = "";

    Statement statement = null;
    try {
        statement = ConnectionDatabase.getConnection().createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
                ResultSet.CONCUR_READ_ONLY);
    } catch (SQLException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    DatabaseTreatment newConnection = new DatabaseTreatment(statement);

    List<String> messages = new ArrayList<>();
    messages.add("\n--- Convert iso2 code to iso3 code ---");
    String sqlConvertIso2Iso3 = "SELECT iso3_ FROM Workflow.IsoCode WHERE iso2_ = \""
            + iso2.replaceAll("\"", "") + "\";";
    messages.addAll(newConnection.executeSQLcommand("executeQuery", sqlConvertIso2Iso3));

    List<String> resultatConvert = newConnection.getResultatSelect();

    if (resultatConvert.size() != 2) {
        //System.err.println(iso2 + "\t" + resultatConvert + "\t" + listInfos);
        System.err.println("error to convert iso2 to iso3.\n Iso2 : " + iso2);
        iso3 = "error";
    } else {
        iso3 = resultatConvert.get(1).replaceAll("\"", "");
    }
    return iso3;
}

From source file:org.openbravo.database.ConnectionProviderImpl.java

public Statement getStatement(Connection conn) throws SQLException {
    if (conn == null)
        return null;
    try {//from w  w  w .j  a va2  s. c  o  m
        return (conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY));
    } catch (SQLException e) {
        log4j.error("getStatement: " + e);
        releaseConnection(conn);
        throw e;
    }
}

From source file:org.integratedmodelling.sql.SQLServer.java

/**
 * Submits the given query to the server and saves the result in a new
 * QueryResult object. To be used knowingly. 
 * /*www. j ava  2s  . co m*/
 * @param sql
 * @return
 * @throws ThinklabStorageException
 */
public QueryResult query(String sql) throws ThinklabStorageException {

    QueryResult ret = null;

    Connection conn = null;
    Statement stmt = null;
    ResultSet rset = null;

    try {
        conn = getConnection();
        stmt = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
        rset = stmt.executeQuery(sql);

        ret = new QueryResult(rset);

    } catch (SQLException e) {
        throw new ThinklabStorageException(e);
    } finally {
        try {
            rset.close();
        } catch (Exception e) {
        }
        try {
            stmt.close();
        } catch (Exception e) {
        }
        try {
            conn.close();
        } catch (Exception e) {
        }
    }
    return ret;

}

From source file:com.netspective.axiom.sql.Query.java

protected PreparedStatement createStatement(ConnectionContext cc, Object[] overrideParams, boolean scrollable)
        throws NamingException, SQLException {
    Connection conn = cc.getConnection();
    PreparedStatement stmt = null;
    DbmsSqlText sqlText = getSqlText(cc);
    String sql = sqlText.getSql(cc);
    if (scrollable)
        stmt = conn.prepareStatement(sql, ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
    else//w ww . j  a v a  2s .c  o  m
        stmt = conn.prepareStatement(sql);

    if (overrideParams != null) {
        for (int i = 0; i < overrideParams.length; i++)
            stmt.setObject(i + 1, overrideParams[i]);
    } else {
        final QueryParameters parameters = sqlText.getParams();
        if (parameters != null)
            parameters.apply(cc, stmt);
    }
    return stmt;
}

From source file:edu.ku.brc.specify.conversion.IdHashMapper.java

/**
 * Looks the NewID and returns the OldID
 * @param newId/* w w w.j  a va  2 s  .c  o m*/
 * @return the OldID
 */
@Override
public Integer reverseGet(final Integer newId) {
    if (newId == null) {
        return null;
    }

    try {
        if (stmtNew == null) {
            stmtNew = oldConn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
        }

        Integer oldId = null;

        ResultSet rs = stmtNew.executeQuery("SELECT OldID FROM " + mapTableName + " WHERE NewID = " + newId);
        if (rs.next()) {
            oldId = rs.getInt(1);

        } else {
            rs.close();
            return null;
        }
        rs.close();

        return oldId;

    } catch (SQLException ex) {
        ex.printStackTrace();
        log.error(ex);
    }
    return null;
}

From source file:jp.mathes.databaseWiki.db.postgres.PostgresBackend.java

@SuppressWarnings({ "rawtypes", "unchecked" })
@Override//from ww  w . j a v  a2s .c  om
public Document getDocument(final String user, final String password, final String db, final String table,
        final String name, final boolean allowEmpty, final Map<String, String[]> defaultFieldValues)
        throws BackendException {
    Connection conn = null;
    Statement st = null;
    ResultSet rs = null;
    try {
        conn = this.connectToDB(user, password, db);
        PostgresDocument doc = this.createEmptyDocument(conn, table, name, db);
        String nameField = this.getNameField(conn, table, db);
        st = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
        String queryString = String.format("select * from \"%s\".\"%s\" where \"%s\"='%s'",
                this.getSchemaName(table, db), this.getPlainTableName(table), nameField, name);
        this.logString(queryString, user);
        rs = st.executeQuery(queryString);
        if (this.getNumRows(rs) == 0) {
            if (allowEmpty) {
                if (defaultFieldValues != null) {
                    for (String key : defaultFieldValues.keySet()) {
                        Field field = doc.getAllFields().get(key);
                        if (field != null) {
                            if (field.getType() == FieldType.string || field.getType() == FieldType.text) {
                                field.setValue(defaultFieldValues.get(key)[0]);
                            } else if (field.getType() == FieldType.date) {
                                try {
                                    field.setValue(new SimpleDateFormat("y-M-d")
                                            .parse(defaultFieldValues.get(key)[0]));
                                } catch (ParseException e) {
                                    throw new BackendException(e);
                                }
                            } else if (field.getType() == FieldType.dec) {
                                field.setValue(Integer.valueOf(defaultFieldValues.get(key)[0]));
                            } else if (field.getType() == FieldType.num) {
                                field.setValue(Double.valueOf(defaultFieldValues.get(key)[0]));
                            }
                        }
                    }
                }
                return doc;
            } else {
                throw new DocumentNotFoundException(String.format("Document '%s' not found in table '%s.%s'.",
                        name, this.getSchemaName(table, db), this.getPlainTableName(table)));
            }
        }
        rs.next();
        ResultSetMetaData md = rs.getMetaData();
        for (int i = 1; i <= md.getColumnCount(); i++) {
            Field field = doc.getAllFields().get(md.getColumnName(i));
            if (field.getType() == FieldType.string || field.getType() == FieldType.text) {
                field.setValue(rs.getString(i));
            } else if (field.getType() == FieldType.date) {
                field.setValue(rs.getDate(i));
            } else if (field.getType() == FieldType.dec) {
                field.setValue(rs.getInt(i));
            } else if (field.getType() == FieldType.num) {
                field.setValue(rs.getDouble(i));
            }
        }
        return doc;
    } catch (SQLException e) {
        throw new BackendException(e);
    } catch (ClassNotFoundException e) {
        throw new BackendException(e);
    } finally {
        DbUtils.closeQuietly(rs);
        DbUtils.closeQuietly(st);
        DbUtils.closeQuietly(conn);
    }
}

From source file:com.streamsets.pipeline.stage.origin.jdbc.JdbcSource.java

@Override
public String produce(String lastSourceOffset, int maxBatchSize, BatchMaker batchMaker) throws StageException {
    int batchSize = Math.min(this.commonSourceConfigBean.maxBatchSize, maxBatchSize);
    String nextSourceOffset = lastSourceOffset == null ? initialOffset : lastSourceOffset;

    long now = System.currentTimeMillis();
    long delay = Math.max(0, (lastQueryCompletedTime + queryIntervalMillis) - now);

    if (delay > 0) {
        // Sleep in one second increments so we don't tie up the app.
        LOG.debug("{}ms remaining until next fetch.", delay);
        ThreadUtil.sleep(Math.min(delay, 1000));
    } else {/*from   w  w w. ja va2s .  com*/
        Statement statement = null;
        Hasher hasher = HF.newHasher();
        try {
            if (null == resultSet || resultSet.isClosed()) {
                // The result set got closed outside of us, so we also clean up the connection (if any)
                closeQuietly(connection);

                connection = dataSource.getConnection();

                if (!txnColumnName.isEmpty()) {
                    // CDC requires scrollable cursors.
                    statement = connection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
                            ResultSet.CONCUR_READ_ONLY);
                } else {
                    statement = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY,
                            ResultSet.CONCUR_READ_ONLY);
                }

                int fetchSize = batchSize;
                // MySQL does not support cursors or fetch size except 0 and "streaming" (1 at a time).
                if (hikariConfigBean.getConnectionString().toLowerCase().contains("mysql")) {
                    // Enable MySQL streaming mode.
                    fetchSize = Integer.MIN_VALUE;
                }
                LOG.debug("Using query fetch size: {}", fetchSize);
                statement.setFetchSize(fetchSize);

                if (getContext().isPreview()) {
                    statement.setMaxRows(batchSize);
                }
                preparedQuery = prepareQuery(query, lastSourceOffset);
                LOG.trace("Executing query: " + preparedQuery);
                hashedQuery = hasher.putString(preparedQuery, Charsets.UTF_8).hash().toString();
                LOG.debug("Executing query: " + hashedQuery);
                resultSet = statement.executeQuery(preparedQuery);
                queryRowCount = 0;
                numQueryErrors = 0;
                firstQueryException = null;
            }

            // Read Data and track last offset
            int rowCount = 0;
            String lastTransactionId = "";
            boolean haveNext = true;
            while (continueReading(rowCount, batchSize) && (haveNext = resultSet.next())) {
                final Record record = processRow(resultSet, rowCount);

                if (null != record) {
                    if (!txnColumnName.isEmpty()) {
                        String newTransactionId = resultSet.getString(txnColumnName);
                        if (lastTransactionId.isEmpty()) {
                            lastTransactionId = newTransactionId;
                            batchMaker.addRecord(record);
                        } else if (lastTransactionId.equals(newTransactionId)) {
                            batchMaker.addRecord(record);
                        } else {
                            // The Transaction ID Column Name config should not be used with MySQL as it
                            // does not provide a change log table and the JDBC driver may not support scrollable cursors.
                            resultSet.relative(-1);
                            break; // Complete this batch without including the new record.
                        }
                    } else {
                        batchMaker.addRecord(record);
                    }
                }

                // Get the offset column value for this record
                if (isIncrementalMode) {
                    nextSourceOffset = resultSet.getString(offsetColumn);
                } else {
                    nextSourceOffset = initialOffset;
                }
                ++rowCount;
                ++queryRowCount;
                ++noMoreDataRecordCount;
                shouldFire = true;
            }
            LOG.debug("Processed rows: " + rowCount);

            if (!haveNext || rowCount == 0) {
                // We didn't have any data left in the cursor. Close everything
                // We may not have the statement here if we're not producing the
                // same batch as when we got it, so get it from the result set
                // Get it before we close the result set, just to be safe!
                statement = resultSet.getStatement();
                closeQuietly(resultSet);
                closeQuietly(statement);
                closeQuietly(connection);
                lastQueryCompletedTime = System.currentTimeMillis();
                LOG.debug("Query completed at: {}", lastQueryCompletedTime);
                QUERY_SUCCESS.create(getContext()).with(QUERY, preparedQuery)
                        .with(TIMESTAMP, lastQueryCompletedTime).with(ROW_COUNT, queryRowCount)
                        .with(SOURCE_OFFSET, nextSourceOffset).createAndSend();

                // In case of non-incremental mode, we need to generate no-more-data event as soon as we hit end of the
                // result set. Incremental mode will try to run the query again and generate the event if and only if
                // the next query results in zero rows.
                if (!isIncrementalMode) {
                    generateNoMoreDataEvent();
                }
            }

            /*
             * We want to generate no-more data event on next batch if:
             * 1) We run a query in this batch and returned empty.
             * 2) We consumed at least some data since last time (to not generate the event all the time)
             */

            if (isIncrementalMode && rowCount == 0 && !haveNext && shouldFire && !firstTime) {
                generateNoMoreDataEvent();
                shouldFire = false;
            }
            firstTime = false;

        } catch (SQLException e) {
            if (++numQueryErrors == 1) {
                firstQueryException = e;
            }
            String formattedError = jdbcUtil.formatSqlException(e);
            LOG.error(formattedError, e);
            if (resultSet != null) {
                try {
                    statement = resultSet.getStatement();
                } catch (SQLException e1) {
                    LOG.debug("Error while getting statement from result set: {}", e1.toString(), e1);
                }
                closeQuietly(resultSet);
                closeQuietly(statement);
            }
            closeQuietly(connection);
            lastQueryCompletedTime = System.currentTimeMillis();
            QUERY_FAILURE.create(getContext()).with(QUERY, preparedQuery)
                    .with(TIMESTAMP, lastQueryCompletedTime).with(ERROR, formattedError)
                    .with(ROW_COUNT, queryRowCount).with(SOURCE_OFFSET, nextSourceOffset).createAndSend();
            LOG.debug("Query '{}' failed at: {}; {} errors so far", preparedQuery, lastQueryCompletedTime,
                    numQueryErrors);
            if (numQueryErrors > commonSourceConfigBean.numSQLErrorRetries) {
                throw new StageException(JdbcErrors.JDBC_77, e.getClass().getSimpleName(), preparedQuery,
                        numQueryErrors, jdbcUtil.formatSqlException(firstQueryException));
            } // else allow nextSourceOffset to be returned, to retry
        }
    }
    return nextSourceOffset;
}

From source file:BQJDBC.QueryResultTest.QueryResultTest.java

@Test
public void QueryResultTest12() {
    int limitNum = 40000;
    final String sql = "SELECT weight_pounds  FROM publicdata:samples.natality LIMIT " + limitNum;

    this.logger.info("Test number: 12");
    this.logger.info("Running query:" + sql);

    java.sql.ResultSet Result = null;
    try {/*from   ww w .  j  ava  2  s. c o  m*/
        Statement stm = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
        stm.setFetchSize(1000);
        Result = stm.executeQuery(sql);
    } catch (SQLException e) {
        this.logger.error("SQLexception" + e.toString());
        Assert.fail("SQLException" + e.toString());
    }
    Assert.assertNotNull(Result);
    try {/*
         int j = 0;
         for (int i = 0; i < limitNum-1; i++) {
         if(i%1000 == 0) {
             logger.debug("fetched 1k for the " + ++j + ". time");
         }
         Assert.assertTrue(Result.next());  
         }*/
        Result.absolute(limitNum);
        Assert.assertTrue(true);
    } catch (SQLException e) {
        e.printStackTrace();
        Assert.fail();
    }
}