List of usage examples for java.sql ResultSet getType
int getType() throws SQLException;
ResultSet
object. From source file:com.glaf.core.jdbc.QueryHelper.java
protected void skipRows(ResultSet rs, int firstResult, int maxResults) throws SQLException { if (rs.getType() != ResultSet.TYPE_FORWARD_ONLY) { if (firstResult != 0) { rs.absolute(firstResult);//from w ww .j ava2s .c om } } else { for (int i = 0; i < firstResult; i++) { rs.next(); } } }
From source file:com.github.woonsan.jdbc.jcr.impl.JcrJdbcResultSetTest.java
@Test public void testExecuteSQLQuery() throws Exception { Statement statement = getConnection().createStatement(); ResultSet rs = statement.executeQuery(SQL_EMPS); assertSame(statement, rs.getStatement()); assertEquals(ResultSet.TYPE_FORWARD_ONLY, rs.getType()); assertEquals(ResultSet.CONCUR_READ_ONLY, rs.getConcurrency()); assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, rs.getHoldability()); assertFalse(rs.isClosed());/* w ww . j a v a 2 s . co m*/ assertTrue(rs.isBeforeFirst()); assertFalse(rs.isAfterLast()); assertEquals(1, rs.findColumn("empno")); assertEquals(2, rs.findColumn("ename")); assertEquals(3, rs.findColumn("salary")); assertEquals(4, rs.findColumn("hiredate")); int count = printResultSet(rs); assertEquals(getEmpRowCount(), count); assertFalse(rs.isBeforeFirst()); assertTrue(rs.isAfterLast()); rs.close(); assertTrue(rs.isClosed()); statement.close(); assertTrue(statement.isClosed()); }
From source file:com.github.woonsan.jdbc.jcr.impl.JcrJdbcResultSetTest.java
@SuppressWarnings("deprecation") @Test//from w w w .j ava2 s .c om public void testResultSetWhenClosed() throws Exception { Statement statement = getConnection().createStatement(); ResultSet rs = statement.executeQuery(SQL_EMPS); rs.close(); try { rs.isBeforeFirst(); fail(); } catch (SQLException ignore) { } try { rs.isAfterLast(); fail(); } catch (SQLException ignore) { } try { rs.isFirst(); fail(); } catch (SQLException ignore) { } try { rs.isLast(); fail(); } catch (SQLException ignore) { } try { rs.beforeFirst(); fail(); } catch (SQLException ignore) { } try { rs.afterLast(); fail(); } catch (SQLException ignore) { } try { rs.first(); fail(); } catch (SQLException ignore) { } try { rs.last(); fail(); } catch (SQLException ignore) { } try { rs.next(); fail(); } catch (SQLException ignore) { } try { rs.getRow(); fail(); } catch (SQLException ignore) { } try { rs.getType(); fail(); } catch (SQLException ignore) { } try { rs.getConcurrency(); fail(); } catch (SQLException ignore) { } try { rs.rowUpdated(); fail(); } catch (SQLException ignore) { } try { rs.rowDeleted(); fail(); } catch (SQLException ignore) { } try { rs.rowInserted(); fail(); } catch (SQLException ignore) { } try { rs.getStatement(); fail(); } catch (SQLException ignore) { } try { rs.wasNull(); fail(); } catch (SQLException ignore) { } try { rs.getString(1); fail(); } catch (SQLException ignore) { } try { rs.getString("col1"); fail(); } catch (SQLException ignore) { } try { rs.getBoolean(1); fail(); } catch (SQLException ignore) { } try { rs.getBoolean("col1"); fail(); } catch (SQLException ignore) { } try { rs.getByte(1); fail(); } catch (SQLException ignore) { } try { rs.getByte("col1"); fail(); } catch (SQLException ignore) { } try { rs.getShort(1); fail(); } catch (SQLException ignore) { } try { rs.getShort("col1"); fail(); } catch (SQLException ignore) { } try { rs.getInt(1); fail(); } catch (SQLException ignore) { } try { rs.getInt("col1"); fail(); } catch (SQLException ignore) { } try { rs.getLong(1); fail(); } catch (SQLException ignore) { } try { rs.getLong("col1"); fail(); } catch (SQLException ignore) { } try { rs.getFloat(1); fail(); } catch (SQLException ignore) { } try { rs.getFloat("col1"); fail(); } catch (SQLException ignore) { } try { rs.getDouble(1); fail(); } catch (SQLException ignore) { } try { rs.getDouble("col1"); fail(); } catch (SQLException ignore) { } try { rs.getBigDecimal(1); fail(); } catch (SQLException ignore) { } try { rs.getBigDecimal("col1"); fail(); } catch (SQLException ignore) { } try { rs.getBytes(1); fail(); } catch (SQLException ignore) { } try { rs.getBytes("col1"); fail(); } catch (SQLException ignore) { } try { rs.getDate(1); fail(); } catch (SQLException ignore) { } try { rs.getDate(1, null); fail(); } catch (SQLException ignore) { } try { rs.getDate("col1"); fail(); } catch (SQLException ignore) { } try { rs.getDate("col1", null); fail(); } catch (SQLException ignore) { } try { rs.getTime(1); fail(); } catch (SQLException ignore) { } try { rs.getTime(1, null); fail(); } catch (SQLException ignore) { } try { rs.getTime("col1"); fail(); } catch (SQLException ignore) { } try { rs.getTime("col1", null); fail(); } catch (SQLException ignore) { } try { rs.getTimestamp(1); fail(); } catch (SQLException ignore) { } try { rs.getTimestamp(1, null); fail(); } catch (SQLException ignore) { } try { rs.getTimestamp("col1"); fail(); } catch (SQLException ignore) { } try { rs.getTimestamp("col1", null); fail(); } catch (SQLException ignore) { } try { rs.getAsciiStream(1); fail(); } catch (SQLException ignore) { } try { rs.getAsciiStream("col1"); fail(); } catch (SQLException ignore) { } try { rs.getUnicodeStream(1); fail(); } catch (SQLException ignore) { } try { rs.getUnicodeStream("col1"); fail(); } catch (SQLException ignore) { } try { rs.getBinaryStream(1); fail(); } catch (SQLException ignore) { } try { rs.getBinaryStream("col1"); fail(); } catch (SQLException ignore) { } try { rs.getCharacterStream(1); fail(); } catch (SQLException ignore) { } try { rs.getCharacterStream("col1"); fail(); } catch (SQLException ignore) { } try { rs.getMetaData(); fail(); } catch (SQLException ignore) { } try { rs.setFetchDirection(1); fail(); } catch (SQLException ignore) { } try { rs.getFetchDirection(); fail(); } catch (SQLException ignore) { } try { rs.setFetchSize(100); fail(); } catch (SQLException ignore) { } try { rs.getFetchSize(); fail(); } catch (SQLException ignore) { } try { rs.getHoldability(); fail(); } catch (SQLException ignore) { } statement.close(); }
From source file:org.apache.bigtop.itest.hive.TestJdbc.java
@Test public void preparedStmtAndResultSet() throws SQLException { final String tableName = "bigtop_jdbc_psars_test_table"; try (Statement stmt = conn.createStatement()) { stmt.execute("drop table if exists " + tableName); stmt.execute("create table " + tableName + " (bo boolean, ti tinyint, db double, fl float, " + "i int, lo bigint, sh smallint, st varchar(32))"); }/* w w w. j a v a 2 s . co m*/ // NOTE Hive 1.2 theoretically support binary, Date & Timestamp in JDBC, but I get errors when I // try to put them in the query. try (PreparedStatement ps = conn .prepareStatement("insert into " + tableName + " values (?, ?, ?, ?, ?, ?, ?, ?)")) { ps.setBoolean(1, true); ps.setByte(2, (byte) 1); ps.setDouble(3, 3.141592654); ps.setFloat(4, 3.14f); ps.setInt(5, 3); ps.setLong(6, 10L); ps.setShort(7, (short) 20); ps.setString(8, "abc"); ps.executeUpdate(); } try (PreparedStatement ps = conn.prepareStatement("insert into " + tableName + " (i, st) " + "values(?, ?)", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY)) { ps.setNull(1, Types.INTEGER); ps.setObject(2, "mary had a little lamb"); ps.executeUpdate(); ps.setNull(1, Types.INTEGER, null); ps.setString(2, "its fleece was white as snow"); ps.clearParameters(); ps.setNull(1, Types.INTEGER, null); ps.setString(2, "its fleece was white as snow"); ps.execute(); } try (Statement stmt = conn.createStatement()) { ResultSet rs = stmt.executeQuery("select * from " + tableName); ResultSetMetaData md = rs.getMetaData(); int colCnt = md.getColumnCount(); LOG.debug("Column count is " + colCnt); for (int i = 1; i <= colCnt; i++) { LOG.debug("Looking at column " + i); String strrc = md.getColumnClassName(i); LOG.debug("Column class name is " + strrc); int intrc = md.getColumnDisplaySize(i); LOG.debug("Column display size is " + intrc); strrc = md.getColumnLabel(i); LOG.debug("Column label is " + strrc); strrc = md.getColumnName(i); LOG.debug("Column name is " + strrc); intrc = md.getColumnType(i); LOG.debug("Column type is " + intrc); strrc = md.getColumnTypeName(i); LOG.debug("Column type name is " + strrc); intrc = md.getPrecision(i); LOG.debug("Precision is " + intrc); intrc = md.getScale(i); LOG.debug("Scale is " + intrc); boolean boolrc = md.isAutoIncrement(i); LOG.debug("Is auto increment? " + boolrc); boolrc = md.isCaseSensitive(i); LOG.debug("Is case sensitive? " + boolrc); boolrc = md.isCurrency(i); LOG.debug("Is currency? " + boolrc); intrc = md.getScale(i); LOG.debug("Scale is " + intrc); intrc = md.isNullable(i); LOG.debug("Is nullable? " + intrc); boolrc = md.isReadOnly(i); LOG.debug("Is read only? " + boolrc); } while (rs.next()) { LOG.debug("bo = " + rs.getBoolean(1)); LOG.debug("bo = " + rs.getBoolean("bo")); LOG.debug("ti = " + rs.getByte(2)); LOG.debug("ti = " + rs.getByte("ti")); LOG.debug("db = " + rs.getDouble(3)); LOG.debug("db = " + rs.getDouble("db")); LOG.debug("fl = " + rs.getFloat(4)); LOG.debug("fl = " + rs.getFloat("fl")); LOG.debug("i = " + rs.getInt(5)); LOG.debug("i = " + rs.getInt("i")); LOG.debug("lo = " + rs.getLong(6)); LOG.debug("lo = " + rs.getLong("lo")); LOG.debug("sh = " + rs.getShort(7)); LOG.debug("sh = " + rs.getShort("sh")); LOG.debug("st = " + rs.getString(8)); LOG.debug("st = " + rs.getString("st")); LOG.debug("tm = " + rs.getObject(8)); LOG.debug("tm = " + rs.getObject("st")); LOG.debug("tm was null " + rs.wasNull()); } LOG.debug("bo is column " + rs.findColumn("bo")); int intrc = rs.getConcurrency(); LOG.debug("concurrency " + intrc); intrc = rs.getFetchDirection(); LOG.debug("fetch direction " + intrc); intrc = rs.getType(); LOG.debug("type " + intrc); Statement copy = rs.getStatement(); SQLWarning warning = rs.getWarnings(); while (warning != null) { LOG.debug("Found a warning: " + warning.getMessage()); warning = warning.getNextWarning(); } rs.clearWarnings(); } }
From source file:org.apache.hive.beeline.BeeLine.java
/** * Try to obtain the current size of the specified {@link ResultSet} by jumping to the last row * and getting the row number.//from w w w. j a va2 s.co m * * @param rs * the {@link ResultSet} to get the size for * @return the size, or -1 if it could not be obtained */ int getSize(ResultSet rs) { try { if (rs.getType() == rs.TYPE_FORWARD_ONLY) { return -1; } rs.last(); int total = rs.getRow(); rs.beforeFirst(); return total; } catch (SQLException sqle) { return -1; } // JDBC 1 driver error catch (AbstractMethodError ame) { return -1; } }
From source file:org.apache.ibatis.executor.resultset.FastResultSetHandler.java
protected void skipRows(ResultSet rs, RowBounds rowBounds) throws SQLException { if (rs.getType() != ResultSet.TYPE_FORWARD_ONLY) { if (rowBounds.getOffset() != RowBounds.NO_ROW_OFFSET) { rs.absolute(rowBounds.getOffset()); }//from ww w. j av a 2 s . c om } else { for (int i = 0; i < rowBounds.getOffset(); i++) rs.next(); } }
From source file:org.giswater.dao.MainDao.java
public static int getNumberOfRows(ResultSet rs) { if (rs == null) { return 0; }//w w w. j a va 2 s . c o m try { if (rs.getType() == ResultSet.TYPE_FORWARD_ONLY) { System.out.println("FORWARD"); return 0; } rs.last(); return rs.getRow(); } catch (SQLException e) { Utils.logError(e); } finally { try { rs.beforeFirst(); } catch (SQLException e) { Utils.logError(e); } } return 0; }
From source file:org.kawanfw.test.api.client.DatabaseMetaDataTest.java
/** * Print a ResultSet and reinit it//from w w w . ja va2 s.c om * * @param rs * @throws SQLException */ private void printResultSet(ResultSet rs) throws SQLException { MessageDisplayer.display(); if (rs.getType() == ResultSet.TYPE_FORWARD_ONLY) { System.out.println("Impossible to print the ResultSet: TYPE_FORWARD_ONLY"); MessageDisplayer.display(); return; } ResultSetPrinter resultSetPrinter; rs.beforeFirst(); // Special treatment for Androd if (FrameworkSystemUtil.isAndroid()) { File file = createAceqlTempFile(); // MessageDisplayer.display(file.toString()); PrintStream ps = null; BufferedReader reader = null; try { ps = new PrintStream(new BufferedOutputStream(new FileOutputStream(file))); resultSetPrinter = new ResultSetPrinter(rs, ps, false); resultSetPrinter.print(); ps.close(); // Or, read back the file and print it with reader = new BufferedReader(new FileReader(file)); String line = null; while ((line = reader.readLine()) != null) { MessageDisplayer.display(line); } } catch (IOException ioe) { throw new SQLException(ioe); } finally { IOUtils.closeQuietly(reader); IOUtils.closeQuietly(ps); file.delete(); } } else { resultSetPrinter = new ResultSetPrinter(rs, System.out, false); resultSetPrinter.print(); } rs.beforeFirst(); MessageDisplayer.display(); }
From source file:org.orbisgis.corejdbc.internal.ReadRowSetImpl.java
/** * Read the content of the DB near the current row id *//* w ww .ja v a 2s . com*/ protected void refreshRowCache() throws SQLException { if (!cache.containsKey(rowId) && rowId > 0 && rowId <= getRowCount()) { try (Resource res = resultSetHolder.getResource()) { ResultSet rs = res.getResultSet(); final int columnCount = getColumnCount(); if (cachedColumnNames == null) { cacheColumnNames(); } // Do not use pk if not available or if using indeterminate fetch without filtering if (pk_name.isEmpty()) { boolean validRow = false; if (rs.getType() == ResultSet.TYPE_FORWARD_ONLY) { if (rowId < rs.getRow()) { // If the result set is Forward only, we have to re-execute the request in order to read the row resultSetHolder.close(); res.close(); try (Resource res2 = resultSetHolder.getResource()) { rs = res2.getResultSet(); } } while (rs.getRow() < rowId) { validRow = rs.next(); } } else { validRow = rs.absolute((int) rowId); } if (validRow) { Object[] row = new Object[columnCount]; for (int idColumn = 1; idColumn <= columnCount; idColumn++) { Object obj = rs.getObject(idColumn); if (obj instanceof Clob) { Clob clob = (Clob) obj; obj = clob.getSubString(1, (int) clob.length()); } row[idColumn - 1] = obj; } cache.put(rowId, new Row(row, null)); } } else { // Fetch block pk of current row final int targetBatch = (int) (rowId - 1) / fetchSize; if (currentBatchId != targetBatch) { if (targetBatch >= rowFetchFirstPk.size() || (targetBatch != 0 && rowFetchFirstPk.get(targetBatch) == null)) { // For optimisation sake // Like binary search if the gap of target batch is too wide, require average PK values int topBatchCount = getBatchCount(); int lowerBatchCount = 0; int intermediateBatchFetching = 0; while (lowerBatchCount + ((topBatchCount - lowerBatchCount) / 2) != targetBatch && intermediateBatchFetching < MAX_INTERMEDIATE_BATCH) { int midleBatchTarget = lowerBatchCount + ((topBatchCount - lowerBatchCount) / 2); if (targetBatch < midleBatchTarget) { topBatchCount = midleBatchTarget; } else { if (midleBatchTarget >= rowFetchFirstPk.size() || rowFetchFirstPk.get(midleBatchTarget) == null) { fetchBatchPk(midleBatchTarget); } intermediateBatchFetching++; lowerBatchCount = midleBatchTarget; } } fetchBatchPk(targetBatch); } // Fetch all data of current batch Long firstPk = fetchBatch(rowFetchFirstPk.get(targetBatch), true, 0); if (firstPk != null) { if (targetBatch + 1 < rowFetchFirstPk.size()) { rowFetchFirstPk.set(targetBatch + 1, firstPk); } else { rowFetchFirstPk.add(firstPk); } } currentBatchId = targetBatch; } // Ok, still in current batch int targetRowInBatch = (int) (rowId - 1) % fetchSize; if (targetRowInBatch < currentBatch.size()) { cache.put(rowId, currentBatch.get(targetRowInBatch)); } } } } currentRow = cache.get(rowId); }
From source file:org.pentaho.reporting.engine.classic.core.modules.misc.datafactory.sql.ResultSetTableModelFactory.java
/** * Creates a table model by using the given <code>ResultSet</code> as the backend. If the <code>ResultSet</code> is * scrollable (the type is not <code>TYPE_FORWARD_ONLY</code>), an instance of {@link * org.pentaho.reporting.engine.classic.core.modules.misc.datafactory.sql.ScrollableResultSetTableModel} is returned. * This model uses the extended capabilities of scrollable result sets to directly read data from the database without * caching or the need of copying the complete <code>ResultSet</code> into the programs memory. * <p/>//from w ww. j a va2s . com * If the <code>ResultSet</code> lacks the scrollable features, the data will be copied into a * <code>DefaultTableModel</code> and the <code>ResultSet</code> gets closed. * * @param rs the result set. * @param columnNameMapping defines, whether to use column names or column labels to compute the column index. If * true, then we map the Name. If false, then we map the Label * @param closeStatement a flag indicating whether closing the resultset should also close the statement. * @return a closeable table model. * @throws SQLException if there is a problem with the result set. */ public CloseableTableModel createTableModel(final ResultSet rs, final boolean columnNameMapping, final boolean closeStatement) throws SQLException { // Allow for override, some jdbc drivers are buggy :( final String prop = ClassicEngineBoot.getInstance().getGlobalConfig() .getConfigProperty(ResultSetTableModelFactory.RESULTSET_FACTORY_MODE, "auto"); //$NON-NLS-1$ if ("simple".equalsIgnoreCase(prop)) { //$NON-NLS-1$ return generateDefaultTableModel(rs, columnNameMapping); } int resultSetType = ResultSet.TYPE_FORWARD_ONLY; try { resultSetType = rs.getType(); } catch (SQLException sqle) { ResultSetTableModelFactory.logger .info("ResultSet type could not be determined, assuming default table model."); //$NON-NLS-1$ } if (resultSetType == ResultSet.TYPE_FORWARD_ONLY) { return generateDefaultTableModel(rs, columnNameMapping); } else { rs.last(); int rowCount = rs.getRow(); rs.beforeFirst(); if (rowCount < 500) { return generateDefaultTableModel(rs, columnNameMapping); } return new ScrollableResultSetTableModel(rs, columnNameMapping, closeStatement); } }