Example usage for java.sql Statement setFetchSize

List of usage examples for java.sql Statement setFetchSize

Introduction

In this page you can find the example usage for java.sql Statement setFetchSize.

Prototype

void setFetchSize(int rows) throws SQLException;

Source Link

Document

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for ResultSet objects generated by this Statement.

Usage

From source file:nl.nn.adapterframework.jdbc.SimpleJdbcListener.java

protected Object getRawMessage(Connection conn, Map threadContext) throws ListenerException {
    String query = getSelectQuery();
    try {/*from  w w w  .j  a  v a 2s .  c  o  m*/
        Statement stmt = null;
        try {
            stmt = conn.createStatement();
            stmt.setFetchSize(1);
            ResultSet rs = null;
            try {
                if (trace && log.isDebugEnabled())
                    log.debug("executing query for [" + query + "]");
                rs = stmt.executeQuery(query);
                if (!rs.next()) {
                    return null;
                }
                int count = rs.getInt(1);
                if (count == 0) {
                    return null;
                }
                return "<count>" + count + "</count>";
            } finally {
                if (rs != null) {
                    rs.close();
                }
            }

        } finally {
            if (stmt != null) {
                stmt.close();
            }
        }
    } catch (Exception e) {
        throw new ListenerException(
                getLogPrefix() + "caught exception retrieving message using query [" + query + "]", e);
    }

}

From source file:org.apache.gobblin.source.jdbc.JdbcExtractor.java

/**
 * Execute query using JDBC simple Statement Set fetch size
 *
 * @param cmds commands - query, fetch size
 * @return JDBC ResultSet//from w  ww. j a  v  a2  s.  c o m
 * @throws Exception
 */
private CommandOutput<?, ?> executeSql(List<Command> cmds) {
    String query = null;
    int fetchSize = 0;

    for (Command cmd : cmds) {
        if (cmd instanceof JdbcCommand) {
            JdbcCommandType type = (JdbcCommandType) cmd.getCommandType();
            switch (type) {
            case QUERY:
                query = cmd.getParams().get(0);
                break;
            case FETCHSIZE:
                fetchSize = Integer.parseInt(cmd.getParams().get(0));
                break;
            default:
                this.log.error("Command " + type.toString() + " not recognized");
                break;
            }
        }
    }

    this.log.info("Executing query:" + query);
    ResultSet resultSet = null;
    try {
        this.jdbcSource = createJdbcSource();
        if (this.dataConnection == null) {
            this.dataConnection = this.jdbcSource.getConnection();
        }
        Statement statement = this.dataConnection.createStatement();

        if (fetchSize != 0 && this.getExpectedRecordCount() > 2000) {
            statement.setFetchSize(fetchSize);
        }
        final boolean status = statement.execute(query);
        if (status == false) {
            this.log.error("Failed to execute sql:" + query);
        }
        resultSet = statement.getResultSet();
    } catch (Exception e) {
        this.log.error("Failed to execute sql:" + query + " ;error-" + e.getMessage(), e);
    }

    CommandOutput<JdbcCommand, ResultSet> output = new JdbcCommandOutput();
    output.put((JdbcCommand) cmds.get(0), resultSet);
    return output;
}

From source file:org.apache.hadoop.hive.jdbc.TestJdbcDriver.java

private void doTestSelectAll(String tableName, int maxRows, int fetchSize) throws Exception {
    boolean isPartitionTable = tableName.equals(partitionedTableName);

    Statement stmt = con.createStatement();
    if (maxRows >= 0) {
        stmt.setMaxRows(maxRows);/*w w  w  .  j  a  v a 2 s  .  c om*/
    }
    if (fetchSize > 0) {
        stmt.setFetchSize(fetchSize);
        assertEquals(fetchSize, stmt.getFetchSize());
    }

    // JDBC says that 0 means return all, which is the default
    int expectedMaxRows = maxRows < 1 ? 0 : maxRows;

    assertNotNull("Statement is null", stmt);
    assertEquals("Statement max rows not as expected", expectedMaxRows, stmt.getMaxRows());
    assertFalse("Statement should not be closed", stmt.isClosed());

    ResultSet res;

    // run some queries
    res = stmt.executeQuery("select * from " + tableName);
    assertNotNull("ResultSet is null", res);
    assertTrue("getResultSet() not returning expected ResultSet", res == stmt.getResultSet());
    assertEquals("get update count not as expected", 0, stmt.getUpdateCount());
    int i = 0;

    ResultSetMetaData meta = res.getMetaData();
    int expectedColCount = isPartitionTable ? 3 : 2;
    assertEquals("Unexpected column count", expectedColCount, meta.getColumnCount());

    String colQualifier = ((tableName != null) && !tableName.isEmpty()) ? tableName.toLowerCase() + "." : "";
    boolean moreRow = res.next();
    while (moreRow) {
        try {
            i++;
            assertEquals(res.getInt(1), res.getInt(colQualifier + "under_col"));
            assertEquals(res.getString(1), res.getString(colQualifier + "under_col"));
            assertEquals(res.getString(2), res.getString(colQualifier + "value"));
            if (isPartitionTable) {
                assertEquals(res.getString(3), partitionedColumnValue);
                assertEquals(res.getString(3), res.getString(colQualifier + partitionedColumnName));
            }
            assertFalse("Last result value was not null", res.wasNull());
            assertNull("No warnings should be found on ResultSet", res.getWarnings());
            res.clearWarnings(); // verifying that method is supported

            // System.out.println(res.getString(1) + " " + res.getString(2));
            assertEquals("getInt and getString don't align for the same result value",
                    String.valueOf(res.getInt(1)), res.getString(1));
            assertEquals("Unexpected result found", "val_" + res.getString(1), res.getString(2));
            moreRow = res.next();
        } catch (SQLException e) {
            System.out.println(e.toString());
            e.printStackTrace();
            throw new Exception(e.toString());
        }
    }

    // supposed to get 500 rows if maxRows isn't set
    int expectedRowCount = maxRows > 0 ? maxRows : 500;
    assertEquals("Incorrect number of rows returned", expectedRowCount, i);

    // should have no more rows
    assertEquals(false, moreRow);

    assertNull("No warnings should be found on statement", stmt.getWarnings());
    stmt.clearWarnings(); // verifying that method is supported

    assertNull("No warnings should be found on connection", con.getWarnings());
    con.clearWarnings(); // verifying that method is supported

    stmt.close();
    assertTrue("Statement should be closed", stmt.isClosed());
}

From source file:org.apache.hadoop.hive.ql.dataImport.BaseDataExtract.java

public ResultSet executeQuery(String sql) throws HiveException {
    try {// w w  w  .  j a  va2s  . c  om
        Statement st = conn.createStatement();
        st.setFetchSize(config.getBufferLimit());
        return st.executeQuery(sql);
    } catch (SQLException e) {
        throw new HiveException(e.getMessage());
    }
}

From source file:org.apache.hadoop.hive.ql.dataToDB.BaseDBExternalDataLoad.java

private ResultSetMetaData getMetaData() throws SemanticException {
    Connection conn = null;/*  w  w  w.  java2  s . c o  m*/
    try {
        String sql = "select * from " + config.getDbTable();
        Class.forName(config.getDriver());
        conn = DriverManager.getConnection(config.getUrl(), config.getUser(), config.getPwd());
        try {
            String processName = java.lang.management.ManagementFactory.getRuntimeMXBean().getName();
            String processID = processName.substring(0, processName.indexOf('@'));
            String appinfo = "getMetaData_" + processID + "_" + SessionState.get().getSessionName();
            conn.setClientInfo("ApplicationName", appinfo);
        } catch (Exception e) {
            e.printStackTrace();
        }
        conn.setAutoCommit(false);
        Statement s = conn.createStatement();
        s.setFetchSize(1);
        ResultSet set = s.executeQuery(sql);
        conn.commit();
        ResultSetMetaData meta = set.getMetaData();
        return meta;

    } catch (Exception e) {
        throw new SemanticException(e.getMessage());
    }
}

From source file:org.apache.hive.jdbc.TestJdbcDriver2.java

private void doTestSelectAll(String tableName, int maxRows, int fetchSize) throws Exception {
    boolean isPartitionTable = tableName.equals(partitionedTableName);

    Statement stmt = con.createStatement();
    if (maxRows >= 0) {
        stmt.setMaxRows(maxRows);//  w ww .  j a va2 s.  c om
    }
    if (fetchSize > 0) {
        stmt.setFetchSize(fetchSize);
        assertEquals(fetchSize, stmt.getFetchSize());
    }

    // JDBC says that 0 means return all, which is the default
    int expectedMaxRows = maxRows < 1 ? 0 : maxRows;

    assertNotNull("Statement is null", stmt);
    assertEquals("Statement max rows not as expected", expectedMaxRows, stmt.getMaxRows());
    assertFalse("Statement should not be closed", stmt.isClosed());

    ResultSet res;

    // run some queries
    res = stmt.executeQuery("select * from " + tableName);
    assertNotNull("ResultSet is null", res);
    assertTrue("getResultSet() not returning expected ResultSet", res == stmt.getResultSet());
    assertEquals("get update count not as expected", -1, stmt.getUpdateCount());
    int i = 0;

    ResultSetMetaData meta = res.getMetaData();
    int expectedColCount = isPartitionTable ? 3 : 2;
    assertEquals("Unexpected column count", expectedColCount, meta.getColumnCount());

    boolean moreRow = res.next();
    while (moreRow) {
        try {
            i++;
            assertEquals(res.getInt(1), res.getInt(tableName + ".under_col"));
            assertEquals(res.getInt(1), res.getInt("under_col"));
            assertEquals(res.getString(1), res.getString(tableName + ".under_col"));
            assertEquals(res.getString(1), res.getString("under_col"));
            assertEquals(res.getString(2), res.getString(tableName + ".value"));
            assertEquals(res.getString(2), res.getString("value"));
            if (isPartitionTable) {
                assertEquals(res.getString(3), partitionedColumnValue);
                assertEquals(res.getString(3), res.getString(partitionedColumnName));
                assertEquals(res.getString(3), res.getString(tableName + "." + partitionedColumnName));
            }
            assertFalse("Last result value was not null", res.wasNull());
            assertNull("No warnings should be found on ResultSet", res.getWarnings());
            res.clearWarnings(); // verifying that method is supported

            // System.out.println(res.getString(1) + " " + res.getString(2));
            assertEquals("getInt and getString don't align for the same result value",
                    String.valueOf(res.getInt(1)), res.getString(1));
            assertEquals("Unexpected result found", "val_" + res.getString(1), res.getString(2));
            moreRow = res.next();
        } catch (SQLException e) {
            System.out.println(e.toString());
            e.printStackTrace();
            throw new Exception(e.toString());
        }
    }

    // supposed to get 500 rows if maxRows isn't set
    int expectedRowCount = maxRows > 0 ? maxRows : 500;
    assertEquals("Incorrect number of rows returned", expectedRowCount, i);

    // should have no more rows
    assertEquals(false, moreRow);

    assertNull("No warnings should be found on statement", stmt.getWarnings());
    stmt.clearWarnings(); // verifying that method is supported

    assertNull("No warnings should be found on connection", con.getWarnings());
    con.clearWarnings(); // verifying that method is supported

    stmt.close();
    assertTrue("Statement should be closed", stmt.isClosed());
}

From source file:org.apache.metamodel.jdbc.JdbcDataContextTest.java

public void testMaxRows() throws Exception {
    final Connection realCon = getTestDbConnection();
    final Statement realStatement = realCon.createStatement();

    final Connection mockCon = EasyMock.createMock(Connection.class);
    final Statement mockStatement = EasyMock.createMock(Statement.class);

    EasyMock.expect(mockCon.getMetaData()).andReturn(realCon.getMetaData()).anyTimes();

    EasyMock.expect(mockCon.getAutoCommit()).andReturn(true);

    EasyMock.expect(mockCon.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY))
            .andReturn(mockStatement);/*from   ww w .j a va2  s  .com*/

    EasyMock.expect(mockStatement.getFetchSize()).andReturn(10);
    mockStatement.setFetchSize(EasyMock.anyInt());
    mockStatement.setMaxRows(3);
    EasyMock.expectLastCall().andThrow(new SQLException("I wont allow max rows"));

    EasyMock.expect(mockStatement.executeQuery(
            "SELECT a.\"CUSTOMERNUMBER\", a.\"CUSTOMERNAME\", a.\"CONTACTLASTNAME\", a.\"CONTACTFIRSTNAME\", "
                    + "a.\"PHONE\", a.\"ADDRESSLINE1\", a.\"ADDRESSLINE2\", a.\"CITY\", a.\"STATE\", "
                    + "a.\"POSTALCODE\", a.\"COUNTRY\", a.\"SALESREPEMPLOYEENUMBER\", "
                    + "a.\"CREDITLIMIT\" FROM PUBLIC.\"CUSTOMERS\" a"))
            .andReturn(realStatement
                    .executeQuery("SELECT a.\"CUSTOMERNUMBER\", a.\"CUSTOMERNAME\", a.\"CONTACTLASTNAME\", "
                            + "a.\"CONTACTFIRSTNAME\", a.\"PHONE\", a.\"ADDRESSLINE1\", a.\"ADDRESSLINE2\", a.\"CITY\", "
                            + "a.\"STATE\", a.\"POSTALCODE\", a.\"COUNTRY\", a.\"SALESREPEMPLOYEENUMBER\", "
                            + "a.\"CREDITLIMIT\" FROM PUBLIC.\"CUSTOMERS\" a"));

    mockStatement.close();

    EasyMock.replay(mockCon, mockStatement);

    JdbcDataContext dc = new JdbcDataContext(mockCon, new TableType[] { TableType.TABLE, TableType.VIEW },
            null);
    dc.setQueryRewriter(new DefaultQueryRewriter(dc));
    Schema schema = dc.getDefaultSchema();

    Query q = new Query().setMaxRows(3);
    Table table = schema.getTables()[0];
    q.from(table, "a");
    q.select(table.getColumns());
    assertEquals(
            "SELECT a.\"CUSTOMERNUMBER\", a.\"CUSTOMERNAME\", a.\"CONTACTLASTNAME\", a.\"CONTACTFIRSTNAME\", "
                    + "a.\"PHONE\", a.\"ADDRESSLINE1\", a.\"ADDRESSLINE2\", a.\"CITY\", a.\"STATE\", a.\"POSTALCODE\", "
                    + "a.\"COUNTRY\", a.\"SALESREPEMPLOYEENUMBER\", a.\"CREDITLIMIT\" FROM PUBLIC.\"CUSTOMERS\" a",
            q.toString());
    DataSet result = dc.executeQuery(q);
    assertTrue(result.next());
    assertEquals(
            "Row[values=[103, Atelier graphique, Schmitt, Carine, 40.32.2555, 54, rue Royale, null, Nantes, null, "
                    + "44000, France, 1370, 21000.0]]",
            result.getRow().toString());
    assertTrue(result.next());
    assertTrue(result.next());
    assertFalse(result.next());

    result.close();

    EasyMock.verify(mockCon, mockStatement);
    realStatement.close();
}

From source file:org.apache.oozie.tools.OozieDBCLI.java

private void replaceForDerby(String oldStr, String newStr) throws Exception {
    Connection connRead = createConnection();
    try {/*  w w w .ja  v  a2  s  .  co m*/
        connRead.setAutoCommit(false);
        Statement st = connRead.createStatement();
        // set fetch size to limit number of rows into memory for large table
        st.setFetchSize(100);
        ResultSet rs = st.executeQuery(COORD_ACTION_ID_DEPS);
        while (rs.next()) {
            String id = rs.getString(1);
            Clob clob = rs.getClob(2);
            String clobStr = clob.getSubString(1, (int) clob.length());
            clob.setString(1, clobStr.replace(oldStr, newStr));
            PreparedStatement prepStmt = connRead
                    .prepareStatement("UPDATE COORD_ACTIONS SET MISSING_DEPENDENCIES=? WHERE ID=?");
            prepStmt.setString(1, clob.getSubString(1, (int) clob.length()));
            prepStmt.setString(2, id);
            prepStmt.execute();
            prepStmt.close();
        }
    } finally {
        connRead.commit();
        connRead.close();
    }
}

From source file:org.apache.phoenix.end2end.PartialScannerResultsDisabledIT.java

/**
 * Simple select query with fetch size that exceed the result size. In that case scan would start to produce
 * partial result sets that from Phoenix perspective are the rows with NULL values.
 * @throws SQLException// www .  j  a  va 2s. c om
 */
@Test
public void partialResultDuringSelect() throws SQLException {
    String tableName = generateUniqueName();
    Properties props = new Properties();
    props.setProperty(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, "5");
    int numRecords = 10;
    try (Connection conn = DriverManager.getConnection(url, props)) {
        conn.createStatement()
                .execute("CREATE TABLE " + tableName + " (PK1 INTEGER NOT NULL PRIMARY KEY, KV1 VARCHAR)");
        int i = 0;
        String upsert = "UPSERT INTO " + tableName + " VALUES (?, ?)";
        PreparedStatement stmt = conn.prepareStatement(upsert);
        while (i < numRecords) {
            stmt.setInt(1, i);
            stmt.setString(2, UUID.randomUUID().toString());
            stmt.executeUpdate();
            i++;
        }
        conn.commit();

        String sql = "SELECT * FROM " + tableName;
        // at every next call wait for this period. This will cause lease to expire.
        Statement s = conn.createStatement();
        s.setFetchSize(100);
        ResultSet rs = s.executeQuery(sql);
        int count = 0;
        while (rs.next()) {
            if (rs.getString(2) == null)
                fail("Null value because of partial row scan");
        }
        count++;
    }

}

From source file:org.apache.sqoop.connector.jdbc.GenericJdbcExecutor.java

private void setFetchSize(Statement statement) throws SQLException {
    if (link.linkConfig.fetchSize != null) {
        statement.setFetchSize(link.linkConfig.fetchSize);
    }//from  w ww  .ja  v  a2s  . c o m
}