List of usage examples for java.sql PreparedStatement setMaxRows
void setMaxRows(int max) throws SQLException;
ResultSet
object generated by this Statement
object can contain to the given number. From source file:org.wso2.carbon.dataservices.core.description.query.SQLQuery.java
private PreparedStatement createProcessedPreparedStatement(int queryType, InternalParamCollection params, Connection conn) throws DataServiceFault { try {/* w w w . j a va 2s.c o m*/ /* * lets see first if there's already a batch prepared statement * created */ boolean inTheMiddleOfABatch = false; PreparedStatement stmt = this.getBatchPreparedStatement(); int currentParamCount = this.getParamCount(); /* create a new prepared statement */ if (stmt == null) { /* batch mode is not supported for dynamic queries */ Object[] result = this.processDynamicQuery(this.getQuery(), params); String dynamicSQL = (String) result[0]; currentParamCount = (Integer) result[1]; String processedSQL = this.createProcessedQuery(dynamicSQL, params, currentParamCount); if (queryType == SQLQuery.DS_QUERY_TYPE_NORMAL) { if (this.isReturnGeneratedKeys()) { if (this.getKeyColumns() != null) { stmt = conn.prepareStatement(processedSQL, this.getKeyColumns()); } else { stmt = conn.prepareStatement(processedSQL, Statement.RETURN_GENERATED_KEYS); } } else { stmt = conn.prepareStatement(processedSQL); } } else if (queryType == SQLQuery.DS_QUERY_TYPE_STORED_PROC) { stmt = conn.prepareCall(processedSQL); } else { throw new DataServiceFault("Unsupported query type: " + queryType); } } else { inTheMiddleOfABatch = true; } if (!inTheMiddleOfABatch) { /* set query timeout */ if (this.isHasQueryTimeout()) { stmt.setQueryTimeout(this.getQueryTimeout()); } /* adding the try catch to avoid setting this for jdbc drivers that do not implement this method. */ try { /* set fetch direction */ if (this.isHasFetchDirection()) { stmt.setFetchDirection(this.getFetchDirection()); } /* set fetch size - user's setting */ if (this.isHasFetchSize()) { stmt.setFetchSize(this.getFetchSize()); } else { /* * stream data by sections - avoid the full result set * to be loaded to memory, and only stream if there * aren't any OUT parameters, MySQL fails in the * scenario of streaming and OUT parameters, so the * possibility is there for other DBMSs */ if (!this.hasOutParams() && this.getFetchSizeProperty().isChangeFetchSize()) { stmt.setFetchSize(this.getFetchSizeProperty().getFetchSize()); } } } catch (Throwable e) { log.debug("Exception while setting fetch size: " + e.getMessage(), e); } /* set max field size */ if (this.isHasMaxFieldSize()) { stmt.setMaxFieldSize(this.getMaxFieldSize()); } /* set max rows */ if (this.isHasMaxRows()) { stmt.setMaxRows(this.getMaxRows()); } } int currentOrdinal = 0; InternalParam param; ParamValue value; for (int i = 1; i <= currentParamCount; i++) { param = params.getParam(i); value = param.getValue(); /* * handle array values, if value is null, this param has to be * an OUT param */ if (value != null && value.getValueType() == ParamValue.PARAM_VALUE_ARRAY) { for (ParamValue arrayElement : value.getArrayValue()) { this.setParamInPreparedStatement(stmt, param, arrayElement == null ? null : arrayElement.toString(), queryType, currentOrdinal); currentOrdinal++; } } else { /* scalar value */ this.setParamInPreparedStatement(stmt, param, value != null ? value.getScalarValue() : null, queryType, currentOrdinal); currentOrdinal++; } } /* if we are in JDBC batch processing mode, batch it! */ if (this.isJDBCBatchRequest()) { stmt.addBatch(); } return stmt; } catch (SQLException e) { throw new DataServiceFault(e, "Error in 'createProcessedPreparedStatement'"); } }
From source file:org.wso2.carbon.user.core.hybrid.HybridRoleManager.java
/** * @param filter/*ww w . j a v a 2 s . c om*/ * @return * @throws UserStoreException */ public String[] getHybridRoles(String filter) throws UserStoreException { Connection dbConnection = null; PreparedStatement prepStmt = null; ResultSet rs = null; String sqlStmt = HybridJDBCConstants.GET_ROLES; int maxItemLimit = UserCoreConstants.MAX_USER_ROLE_LIST; int searchTime = UserCoreConstants.MAX_SEARCH_TIME; try { maxItemLimit = Integer.parseInt( realmConfig.getUserStoreProperty(UserCoreConstants.RealmConfig.PROPERTY_MAX_ROLE_LIST)); } catch (Exception e) { maxItemLimit = DEFAULT_MAX_ROLE_LIST_SIZE; } try { searchTime = Integer.parseInt( realmConfig.getUserStoreProperty(UserCoreConstants.RealmConfig.PROPERTY_MAX_SEARCH_TIME)); } catch (Exception e) { searchTime = DEFAULT_MAX_SEARCH_TIME; } try { if (filter != null && filter.trim().length() != 0) { filter = filter.trim(); filter = filter.replace("*", "%"); filter = filter.replace("?", "_"); } else { filter = "%"; } dbConnection = DatabaseUtil.getDBConnection(dataSource); if (dbConnection == null) { throw new UserStoreException("null connection"); } dbConnection.setAutoCommit(false); dbConnection.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); prepStmt = dbConnection.prepareStatement(sqlStmt); prepStmt.setString(1, filter); if (sqlStmt.contains(UserCoreConstants.UM_TENANT_COLUMN)) { prepStmt.setInt(2, tenantId); } prepStmt.setMaxRows(maxItemLimit); try { prepStmt.setQueryTimeout(searchTime); } catch (Exception e) { // this can be ignored since timeout method is not implemented log.debug(e); } List<String> filteredRoles = new ArrayList<String>(); try { rs = prepStmt.executeQuery(); } catch (SQLException e) { log.error("Error while retrieving roles from Internal JDBC role store", e); // May be due time out, therefore ignore this exception } if (rs != null) { while (rs.next()) { String name = rs.getString(1); // Append the domain if (!name.contains(UserCoreConstants.DOMAIN_SEPARATOR)) { name = UserCoreConstants.INTERNAL_DOMAIN + CarbonConstants.DOMAIN_SEPARATOR + name; } filteredRoles.add(name); } } return filteredRoles.toArray(new String[filteredRoles.size()]); } catch (SQLException e) { String errorMessage = "Error occurred while getting hybrid roles from filter : " + filter; if (log.isDebugEnabled()) { log.debug(errorMessage, e); } throw new UserStoreException(errorMessage, e); } finally { DatabaseUtil.closeAllConnections(dbConnection, rs, prepStmt); } }
From source file:org.wso2.carbon.user.core.jdbc.JDBCUserStoreManager.java
/** * *//*from w w w . ja va 2 s.c o m*/ public String[] doListUsers(String filter, int maxItemLimit) throws UserStoreException { String[] users = new String[0]; Connection dbConnection = null; String sqlStmt = null; PreparedStatement prepStmt = null; ResultSet rs = null; if (maxItemLimit == 0) { return new String[0]; } int givenMax = UserCoreConstants.MAX_USER_ROLE_LIST; int searchTime = UserCoreConstants.MAX_SEARCH_TIME; try { givenMax = Integer.parseInt( realmConfig.getUserStoreProperty(UserCoreConstants.RealmConfig.PROPERTY_MAX_USER_LIST)); } catch (Exception e) { givenMax = UserCoreConstants.MAX_USER_ROLE_LIST; } try { searchTime = Integer.parseInt( realmConfig.getUserStoreProperty(UserCoreConstants.RealmConfig.PROPERTY_MAX_SEARCH_TIME)); } catch (Exception e) { searchTime = UserCoreConstants.MAX_SEARCH_TIME; } if (maxItemLimit < 0 || maxItemLimit > givenMax) { maxItemLimit = givenMax; } try { if (filter != null && filter.trim().length() != 0) { filter = filter.trim(); filter = filter.replace("*", "%"); filter = filter.replace("?", "_"); } else { filter = "%"; } List<String> lst = new LinkedList<String>(); dbConnection = getDBConnection(); if (dbConnection == null) { throw new UserStoreException("null connection"); } if (isCaseSensitiveUsername()) { sqlStmt = realmConfig.getUserStoreProperty(JDBCRealmConstants.GET_USER_FILTER); } else { sqlStmt = realmConfig.getUserStoreProperty(JDBCRealmConstants.GET_USER_FILTER_CASE_INSENSITIVE); } prepStmt = dbConnection.prepareStatement(sqlStmt); prepStmt.setString(1, filter); if (sqlStmt.contains(UserCoreConstants.UM_TENANT_COLUMN)) { prepStmt.setInt(2, tenantId); } prepStmt.setMaxRows(maxItemLimit); try { prepStmt.setQueryTimeout(searchTime); } catch (Exception e) { // this can be ignored since timeout method is not implemented log.debug(e); } try { rs = prepStmt.executeQuery(); } catch (SQLException e) { if (e instanceof SQLTimeoutException) { log.error("The cause might be a time out. Hence ignored", e); return users; } String errorMessage = "Error while fetching users according to filter : " + filter + " & max Item limit " + ": " + maxItemLimit; if (log.isDebugEnabled()) { log.debug(errorMessage, e); } throw new UserStoreException(errorMessage, e); } while (rs.next()) { String name = rs.getString(1); if (CarbonConstants.REGISTRY_ANONNYMOUS_USERNAME.equals(name)) { continue; } // append the domain if exist String domain = realmConfig .getUserStoreProperty(UserCoreConstants.RealmConfig.PROPERTY_DOMAIN_NAME); name = UserCoreUtil.addDomainToName(name, domain); lst.add(name); } rs.close(); if (lst.size() > 0) { users = lst.toArray(new String[lst.size()]); } Arrays.sort(users); } catch (SQLException e) { String msg = "Error occurred while retrieving users for filter : " + filter + " & max Item limit : " + maxItemLimit; if (log.isDebugEnabled()) { log.debug(msg, e); } throw new UserStoreException(msg, e); } finally { DatabaseUtil.closeAllConnections(dbConnection, rs, prepStmt); } return users; }
From source file:org.wso2.carbon.user.core.jdbc.JDBCUserStoreManager.java
private void setPSRestrictions(PreparedStatement ps, int maxItemLimit) throws SQLException { int givenMax = UserCoreConstants.MAX_USER_ROLE_LIST; int searchTime = UserCoreConstants.MAX_SEARCH_TIME; try {/*from w ww .j av a 2 s . c o m*/ givenMax = Integer.parseInt( realmConfig.getUserStoreProperty(UserCoreConstants.RealmConfig.PROPERTY_MAX_ROLE_LIST)); } catch (Exception e) { givenMax = UserCoreConstants.MAX_USER_ROLE_LIST; } try { searchTime = Integer.parseInt( realmConfig.getUserStoreProperty(UserCoreConstants.RealmConfig.PROPERTY_MAX_SEARCH_TIME)); } catch (Exception e) { searchTime = UserCoreConstants.MAX_SEARCH_TIME; } if (maxItemLimit < 0 || maxItemLimit > givenMax) { maxItemLimit = givenMax; } ps.setMaxRows(maxItemLimit); try { ps.setQueryTimeout(searchTime); } catch (Exception e) { // this can be ignored since timeout method is not implemented log.debug(e); } }
From source file:pub.platform.db.DatabaseConnection.java
/** * Description of the Method//from ww w. ja v a 2 s . c o m * * @param pst * Description of the Parameter * @param beginIndex * Description of the Parameter * @param resultNo * Description of the Parameter * @return Description of the Return Value */ public RecordSet executeQuery(PreparedStatement pst, int beginIndex, int resultNo) { if (pst == null) { System.out.println("DatabaseConnection.executeQuery's pst parameter is null!!!"); return new RecordSet(); } try { pst.setMaxRows(beginIndex - 1 + resultNo); ResultSet rs = pst.executeQuery(); if (beginIndex != 1) { rs.absolute(beginIndex - 1); } RecordSet records = new RecordSet(rs, resultNo); rs.close(); pst.close(); return records; } catch (SQLException sqle) { errorException = sqle; logger.error("", sqle); return new RecordSet(); } }
From source file:ro.nextreports.engine.queryexec.QueryExecutor.java
private PreparedStatement createStatement(String queryString) throws QueryException { // create the prepared statement PreparedStatement pstmt; try {//from w ww . j av a2s. c o m boolean hasScrollType = false; try { hasScrollType = DialectUtil.isSupportedResultSetType(conn, ResultSet.TYPE_SCROLL_INSENSITIVE); } catch (Exception ex) { ex.printStackTrace(); LOG.error(ex.getMessage(), ex); } int resultSetType = hasScrollType ? ResultSet.TYPE_SCROLL_INSENSITIVE : ResultSet.TYPE_FORWARD_ONLY; if (QueryUtil.isProcedureCall(queryString)) { pstmt = conn.prepareCall("{" + queryString + "}", resultSetType, ResultSet.CONCUR_READ_ONLY); } else { if (isCsv) { pstmt = conn.prepareStatement(queryString); } else { boolean keepCursorsOverCommit = false; try { Dialect dialect = DialectUtil.getDialect(conn); keepCursorsOverCommit = dialect.needsHoldCursorsForPreparedStatement(); } catch (DialectException e) { e.printStackTrace(); LOG.error(e.getMessage(), e); } if (keepCursorsOverCommit) { pstmt = conn.prepareStatement(queryString, resultSetType, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); } else { pstmt = conn.prepareStatement(queryString, resultSetType, ResultSet.CONCUR_READ_ONLY); } } } // ignore queryTimeout and maxRows (some drivers - derby - not implement // these feature yet) try { // set timeout pstmt.setQueryTimeout(timeout); // set max rows pstmt.setMaxRows(maxRows); } catch (SQLException e) { LOG.warn(e); } } catch (SQLException e) { throw new QueryException(e); } return pstmt; }
From source file:swp.bibjsf.persistence.Data.java
/** * Generic retrieval of elements from a given table fulfilling given * constraints and sorted by given order. Only the elements from...to in * that order are returned.//from ww w . ja va 2s.c om * * @param constraints * constraints to be fulfilled * @param from * index of first relevant element (index of very first element * is 0) * @param to * index of last relevant element * @param order * the ordering * @param table * name of the table from which to retrieve the elements * @param clazz * the class of the elements to be retrieved, i.e., Element.class * @return matching elements * @throws DataSourceException */ public <Element extends BusinessObject> List<Element> getElements(List<Constraint> constraints, final int from, final int to, List<OrderBy> order, String table, Class<Element> clazz) throws DataSourceException { // We want to retrieve only some of the matching results, but not // all. For a very large data set, we might otherwise run into // memory problems. And since this code is run by a server serving // multiple clients at once, memory consumption and computing // time is an issue. // // In Derby 10.7 upward, limiting the search for certain number of // results would be possible using the FETCH and OFFSET keywords as // follows: // // Sort T using column I, then fetch rows 11 through 20 of the sorted // rows (inclusive) // SELECT * FROM T ORDER BY I OFFSET 10 ROWS FETCH NEXT 10 ROWS ONLY // // Glashfish 3.1, however, ships with Derby 10.6.2.1. That version of // Derby // does not support FETCH/OFFSET. // If we ever migrate to a more current Derby version, we should use // FETCH/OFFSET // instead. // // For this reason, we follow a pagination strategy described at: // http://www.onjava.com/pub/a/onjava/2007/01/31/tuning-derby.html // // Notice that we set the max rows value to the last row that we need // (incremented by one). So, with this solution we fetch not only the // rows that we wanted (from - to), but first fetched a all rows up to // 'to' // and then filter to the rows of interest. Unfortunately, there is no // way // to tell the JDBC driver to start with a certain row, so we must // specify the // maximum row of the page that will be displayed. This means that // performance will be good for early pages and drop in performance as // the user browses results. The good news is that in most cases, the // user // will not go far, but will usually either find what he's looking for // in // the first few pages or refine the search query. logger.debug("get elements for table " + table); ArrayList<Element> allResults = new ArrayList<Element>(); try { Connection connection = dataSource.getConnection(); try { String query = "SELECT * FROM " + table + toQuery(constraints) + toOrderByClause(order); logger.debug("getElements " + query); PreparedStatement stmt = connection.prepareStatement(query); try { try { stmt.setMaxRows(to + 1); } catch (SQLException e) { // ignore this exception and try to run the query anyway } fillInArguments(constraints, stmt); ResultSet rs = stmt.executeQuery(); try { // Use the BeanHandler implementation to convert the // first // ResultSet row into a Reader JavaBean. ResultSetHandler<Element> handler = new BeanHandler<Element>(clazz); int i = 0; Element reader; while ((reader = handler.handle(rs)) != null) { if (from <= i && i <= to) { allResults.add(reader); } else if (i > to) { break; } i++; } } finally { rs.close(); } } finally { stmt.close(); } } finally { connection.close(); } } catch (SQLException e) { logger.error(e.getLocalizedMessage()); throw new DataSourceException(e.getLocalizedMessage()); } return allResults; }
From source file:uk.ac.cam.cl.dtg.segue.dos.PgEmailPreferenceManager.java
@Override public IEmailPreference getEmailPreference(final long userId, final EmailType emailType) throws SegueDatabaseException { PreparedStatement pst; try (Connection conn = database.getDatabaseConnection()) { pst = conn.prepareStatement(//from w w w .ja va 2 s.c o m "SELECT * FROM user_email_preferences WHERE user_id=? AND email_preference=?"); pst.setLong(1, userId); pst.setInt(2, emailType.mapEmailTypeToInt()); pst.setMaxRows(1); ResultSet results = pst.executeQuery(); if (results.next()) { boolean emailPreferenceStatus = results.getBoolean("email_preference_status"); return new PgEmailPreference(userId, emailType, emailPreferenceStatus); } else { //set defaults for those email preferences that have not been found return new PgEmailPreference(userId, emailType, true); } } catch (SQLException e) { throw new SegueDatabaseException("Postgres exception", e); } }
From source file:uk.ac.cam.cl.dtg.segue.dos.PgUserPreferenceManager.java
@Override public UserPreference getUserPreference(String preferenceType, String preferenceName, long userId) throws SegueDatabaseException { Validate.notBlank(preferenceType);// ww w. j av a 2 s . c om Validate.notBlank(preferenceName); try (Connection conn = database.getDatabaseConnection()) { PreparedStatement pst; pst = conn.prepareStatement( "SELECT * FROM user_preferences WHERE user_id=? AND preference_type=? AND preference_name=?;"); pst.setLong(1, userId); pst.setString(2, preferenceType); pst.setString(3, preferenceName); pst.setMaxRows(1); // There is a primary key to ensure uniqueness! ResultSet results = pst.executeQuery(); if (results.next()) { return userPreferenceFromResultSet(results); } // We must not have found anything: return null; } catch (SQLException e) { throw new SegueDatabaseException("Postgres exception", e); } }