List of usage examples for java.sql ResultSet FETCH_FORWARD
int FETCH_FORWARD
To view the source code for java.sql ResultSet FETCH_FORWARD.
Click Source Link
From source file:net.solarnetwork.node.dao.jdbc.AbstractJdbcDatumDao.java
/** * Find datum entities that have not been uploaded to a specific * destination./* ww w . j ava 2 s. c o m*/ * * <p> * This executes SQL from the {@code findForUploadSql} property. It uses the * {@code maxFetchForUpload} property to limit the number of rows returned, * so the call may not return all rows available from the database (this is * to conserve memory and process the data in small batches). * </p> * * @param destination * the destination to look for * @param rowMapper * a {@link RowMapper} implementation to instantiate entities from * found rows * @return the matching rows, never <em>null</em> */ protected List<T> findDatumNotUploaded(final RowMapper<T> rowMapper) { List<T> result = getJdbcTemplate().query(new PreparedStatementCreator() { @Override public PreparedStatement createPreparedStatement(Connection con) throws SQLException { String sql = getSqlResource(SQL_RESOURCE_FIND_FOR_UPLOAD); if (log.isTraceEnabled()) { log.trace("Preparing SQL to find datum not uploaded [" + sql + "] with maxFetchForUpload [" + maxFetchForUpload + ']'); } PreparedStatement ps = con.prepareStatement(sql); ps.setFetchDirection(ResultSet.FETCH_FORWARD); ps.setFetchSize(maxFetchForUpload); ps.setMaxRows(maxFetchForUpload); return ps; } }, rowMapper); if (log.isDebugEnabled()) { log.debug("Found " + result.size() + " datum entities not uploaded"); } return result; }
From source file:edu.ku.brc.specify.toycode.mexconabio.MakeGBIFProcessHash.java
@Override public void process(final int type, final int options) { final double HRS = 1000.0 * 60.0 * 60.0; final long PAGE_CNT = 1000000; totalRecs = BasicSQLUtils.getCount(dbGBIFConn, "SELECT COUNT(*) FROM raw"); int minIndex = BasicSQLUtils.getCount(dbGBIFConn, "SELECT MIN(id) FROM raw"); //int maxIndex = BasicSQLUtils.getCount(dbGBIFConn, "SELECT MAX(id) FROM raw"); int segs = (int) (totalRecs / PAGE_CNT) + 1; try {//from w w w. ja v a 2 s . co m pw = new PrintWriter("GroupHash.log"); } catch (FileNotFoundException e) { e.printStackTrace(); } long procRecs = 0; long startTime = System.currentTimeMillis(); int secsThreshold = 0; try { String idsInsert = "INSERT INTO group_hash_ids (GrpID, RawID) VALUES (?,?)"; insertIds = dbDstConn.prepareStatement(idsInsert); String gbifsnibInsert = "INSERT INTO group_hash (collnum, genus, year, mon, cnt) VALUES (?,?,?,?,?)"; insertStmt = dbDstConn.prepareStatement(gbifsnibInsert); String gbifsnibUpdate = "UPDATE group_hash SET cnt=? WHERE id = ?"; updateStmt = dbDstConn.prepareStatement(gbifsnibUpdate); String gbifsnibCheck = "SELECT id FROM group_hash WHERE collnum=? AND genus=? AND year=?"; checkStmt = dbDstConn.prepareStatement(gbifsnibCheck); } catch (SQLException ex) { ex.printStackTrace(); } for (int pc = 0; pc < segs; pc++) { try { String clause = String.format(" FROM raw WHERE id > %d AND id < %d", (pc * PAGE_CNT) + minIndex, ((pc + 1) * PAGE_CNT) + minIndex + 1); String gbifSQL = "SELECT id, collector_num, genus, year, month " + clause; System.out.println(gbifSQL); pw.println(gbifSQL); stmt = dbGBIFConn.createStatement(ResultSet.FETCH_FORWARD, ResultSet.CONCUR_READ_ONLY); stmt.setFetchSize(Integer.MIN_VALUE); String msg = "Starting Query... " + totalRecs; System.out.println(msg); pw.println(msg); ResultSet rs = stmt.executeQuery(gbifSQL); msg = String.format("Starting Processing... Total Records %d Max Score: %d Threshold: %d", totalRecs, maxScore, thresholdScore); System.out.println(msg); pw.println(msg); while (rs.next()) { procRecs++; String year = rs.getString(4); year = StringUtils.isNotEmpty(year) ? year.trim() : null; if (StringUtils.isNotEmpty(year) && !StringUtils.isNumeric(year)) { continue; } int rawId = rs.getInt(1); String collnum = rs.getString(2); String genus = rs.getString(3); String mon = rs.getString(5); collnum = StringUtils.isNotEmpty(collnum) ? collnum.trim() : null; genus = StringUtils.isNotEmpty(genus) ? genus.trim() : null; mon = StringUtils.isNotEmpty(mon) ? mon.trim() : null; int c = 0; if (collnum == null) c++; if (genus == null) c++; if (year == null) c++; if (c == 2) { continue; } collnum = collnum != null ? collnum : ""; genus = genus != null ? genus : ""; year = year != null ? year : ""; mon = mon != null ? mon : ""; if (collnum.length() > 64) { collnum = collnum.substring(0, 63); } if (genus.length() > 64) { genus = genus.substring(0, 63); } if (year.length() > 8) { year = year.substring(0, 8); } if (mon.length() > 8) { mon = year.substring(0, 8); } String name = String.format("%s_%s_%s", collnum, genus, year); DataEntry de = groupHash.get(name); if (de != null) { de.cnt++; } else { de = getDataEntry(collnum, genus, year, mon); groupHash.put(name, de); } de.ids.add(rawId); if (groupHash.size() > MAX_RECORDS_SEG) { writeHash(); } } rs.close(); if (groupHash.size() > 0) { writeHash(); } System.out.println("Done with seg " + pc); pw.println("Done with seg " + pc); } catch (Exception ex) { ex.printStackTrace(); } finally { try { if (stmt != null) { stmt.close(); } } catch (Exception ex) { } } long endTime = System.currentTimeMillis(); long elapsedTime = endTime - startTime; double timePerRecord = (elapsedTime / procRecs); double hrsLeft = ((totalRecs - procRecs) * timePerRecord) / HRS; int seconds = (int) (elapsedTime / 60000.0); if (secsThreshold != seconds) { secsThreshold = seconds; String msg = String.format("Elapsed %8.2f hr.mn Percent: %6.3f Hours Left: %8.2f ", ((double) (elapsedTime)) / HRS, 100.0 * ((double) procRecs / (double) totalRecs), hrsLeft); System.out.println(msg); pw.println(msg); pw.flush(); } } try { if (insertStmt != null) { insertStmt.close(); } if (updateStmt != null) { updateStmt.close(); } if (checkStmt != null) { checkStmt.close(); } } catch (SQLException ex) { ex.printStackTrace(); } String msg = String.format("Done - Writes: %d Updates: %d", writeCnt, updateCnt); System.out.println(msg); pw.println(msg); pw.flush(); pw.close(); }
From source file:org.apache.jena.jdbc.remote.connections.RemoteEndpointConnection.java
@Override protected JenaStatement createStatementInternal(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { if (this.isClosed()) throw new SQLException("Cannot create a statement after the connection was closed"); if (resultSetType == ResultSet.TYPE_SCROLL_SENSITIVE) throw new SQLFeatureNotSupportedException( "Remote endpoint backed connection do not support scroll sensitive result sets"); if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) throw new SQLFeatureNotSupportedException( "Remote endpoint backed connections only support read-only result sets"); return new RemoteEndpointStatement(this, this.client, resultSetType, ResultSet.FETCH_FORWARD, 0, resultSetHoldability);/*from w ww . jav a 2 s .c o m*/ }
From source file:org.apache.jena.jdbc.remote.connections.RemoteEndpointConnection.java
@Override protected JenaPreparedStatement createPreparedStatementInternal(String sparql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { if (this.isClosed()) throw new SQLException("Cannot create a statement after the connection was closed"); if (resultSetType == ResultSet.TYPE_SCROLL_SENSITIVE) throw new SQLFeatureNotSupportedException( "Remote endpoint backed connection do not support scroll sensitive result sets"); if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) throw new SQLFeatureNotSupportedException( "Remote endpoint backed connections only support read-only result sets"); return new RemoteEndpointPreparedStatement(sparql, this, this.client, resultSetType, ResultSet.FETCH_FORWARD, 0, resultSetHoldability); }
From source file:com.alibaba.wasp.jdbc.JdbcStatement.java
/** * Gets the fetch direction./* ww w. j a v a 2 s . co m*/ * * @return FETCH_FORWARD * @throws java.sql.SQLException * if this object is closed */ @Override public int getFetchDirection() throws SQLException { try { checkClosed(); return ResultSet.FETCH_FORWARD; } catch (Exception e) { throw Logger.logAndConvert(log, e); } }
From source file:fr.cnes.sitools.datasource.jdbc.business.SitoolsDataSource.java
/** * Make the SQL request starting at offset and returning maxrows records * //from www . j a v a 2s.c o m * * @param sql * SQL request * @param maxrows * the maximal number of rows * @param offset * the offset in rows * @return ResultSet */ public ResultSet limitedQuery(String sql, int maxrows, int offset) { Connection conn = null; ResultSet rs = null; try { String sqlCompleted = addLimitOffset(sql, maxrows, offset); conn = getConnection(); if (conn == null) { LOG.log(Level.WARNING, "getConnection failed"); return null; } // modif inspir par le LAM LOG.log(Level.INFO, "Limited query = " + sqlCompleted); // set autocommit false to enable the use of cursors conn.setAutoCommit(false); // Cela permet d'utiliser les mcanismes de streaming de JDBC PreparedStatement prep = conn.prepareStatement(sqlCompleted, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); // , if (prep == null) { LOG.log(Level.WARNING, "prepareStatement failed"); return null; } int fetchSize = SitoolsSettings.getInstance().getInt("Starter.JDBC_FETCH_SIZE"); // modif inspire par le LAM // On positionne la taille de chaque streaming prep.setFetchSize(fetchSize); prep.setFetchDirection(ResultSet.FETCH_FORWARD); rs = prep.executeQuery(); return new DBResultSet(rs, prep, conn); } catch (SQLException ex) { LOG.log(Level.SEVERE, null, ex); closeConnection(conn); closeResultSet(rs); conn = null; } catch (RuntimeException ex) { LOG.log(Level.SEVERE, null, ex); closeConnection(conn); closeResultSet(rs); } catch (Exception ex) { LOG.log(Level.SEVERE, null, ex); closeConnection(conn); closeResultSet(rs); conn = null; } return null; }
From source file:com.github.adejanovski.cassandra.jdbc.CassandraStatement.java
@SuppressWarnings("boxing") public void setFetchDirection(int direction) throws SQLException { checkNotClosed();/*from w w w . j a v a 2 s . co m*/ if (direction == ResultSet.FETCH_FORWARD || direction == ResultSet.FETCH_REVERSE || direction == ResultSet.FETCH_UNKNOWN) { if ((getResultSetType() == ResultSet.TYPE_FORWARD_ONLY) && (direction != ResultSet.FETCH_FORWARD)) throw new SQLSyntaxErrorException(String.format(BAD_FETCH_DIR, direction)); fetchDirection = direction; } else throw new SQLSyntaxErrorException(String.format(BAD_FETCH_DIR, direction)); }
From source file:com.taobao.tdhs.jdbc.TDHSStatement.java
public int getFetchDirection() throws SQLException { return ResultSet.FETCH_FORWARD; }
From source file:net.starschema.clouddb.jdbc.BQForwardOnlyResultSet.java
/** * <p>/*www . j a v a 2 s . c o m*/ * <h1>Implementation Details:</h1><br> * Not implemented yet. * </p> * * @throws BQSQLException */ @Override public int getFetchDirection() throws SQLException { return ResultSet.FETCH_FORWARD; }
From source file:org.apache.bigtop.itest.hive.TestJdbc.java
@Test public void statement() throws SQLException { try (Statement stmt = conn.createStatement()) { stmt.cancel();/*from ww w. j av a 2s . c o m*/ } try (Statement stmt = conn.createStatement()) { stmt.clearWarnings(); final String tableName = "bigtop_jdbc_statement_test_table"; stmt.execute("drop table if exists " + tableName); stmt.execute("create table " + tableName + " (a int, b varchar(32))"); stmt.executeUpdate("insert into " + tableName + " values (1, 'abc'), (2, 'def')"); int intrc = stmt.getUpdateCount(); LOG.debug("Update count is " + intrc); ResultSet rs = stmt.executeQuery("select * from " + tableName); while (rs.next()) { LOG.debug("Fetched " + rs.getInt(1) + "," + rs.getString(2)); } Connection localConn = stmt.getConnection(); intrc = stmt.getFetchDirection(); LOG.debug("Fetch direction is " + intrc); intrc = stmt.getFetchSize(); LOG.debug("Fetch size is " + intrc); intrc = stmt.getMaxRows(); LOG.debug("max rows is " + intrc); boolean boolrc = stmt.getMoreResults(); LOG.debug("more results is " + boolrc); intrc = stmt.getQueryTimeout(); LOG.debug("query timeout is " + intrc); stmt.execute("select * from " + tableName); rs = stmt.getResultSet(); while (rs.next()) { LOG.debug("Fetched " + rs.getInt(1) + "," + rs.getString(2)); } intrc = stmt.getResultSetType(); LOG.debug("result set type is " + intrc); SQLWarning warning = stmt.getWarnings(); while (warning != null) { LOG.debug("Found a warning: " + warning.getMessage()); warning = warning.getNextWarning(); } boolrc = stmt.isClosed(); LOG.debug("is closed " + boolrc); boolrc = stmt.isCloseOnCompletion(); LOG.debug("is close on completion " + boolrc); boolrc = stmt.isPoolable(); LOG.debug("is poolable " + boolrc); stmt.setFetchDirection(ResultSet.FETCH_FORWARD); stmt.setFetchSize(500); stmt.setMaxRows(500); } }