Example usage for java.sql PreparedStatement setFetchSize

List of usage examples for java.sql PreparedStatement setFetchSize

Introduction

In this page you can find the example usage for java.sql PreparedStatement setFetchSize.

Prototype

void setFetchSize(int rows) throws SQLException;

Source Link

Document

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for ResultSet objects generated by this Statement.

Usage

From source file:org.apache.gobblin.source.jdbc.JdbcExtractor.java

/**
 * Execute query using JDBC PreparedStatement to pass query parameters Set
 * fetch size//ww  w  .  j a va 2  s  . co  m
 *
 * @param cmds commands - query, fetch size, query parameters
 * @return JDBC ResultSet
 * @throws Exception
 */
private CommandOutput<?, ?> executePreparedSql(List<Command> cmds) {
    String query = null;
    List<String> queryParameters = null;
    int fetchSize = 0;

    for (Command cmd : cmds) {
        if (cmd instanceof JdbcCommand) {
            JdbcCommandType type = (JdbcCommandType) cmd.getCommandType();
            switch (type) {
            case QUERY:
                query = cmd.getParams().get(0);
                break;
            case QUERYPARAMS:
                queryParameters = cmd.getParams();
                break;
            case FETCHSIZE:
                fetchSize = Integer.parseInt(cmd.getParams().get(0));
                break;
            default:
                this.log.error("Command " + type.toString() + " not recognized");
                break;
            }
        }
    }

    this.log.info("Executing query:" + query);
    ResultSet resultSet = null;
    try {
        this.jdbcSource = createJdbcSource();
        if (this.dataConnection == null) {
            this.dataConnection = this.jdbcSource.getConnection();
        }

        PreparedStatement statement = this.dataConnection.prepareStatement(query, ResultSet.TYPE_FORWARD_ONLY,
                ResultSet.CONCUR_READ_ONLY);

        int parameterPosition = 1;
        if (queryParameters != null && queryParameters.size() > 0) {
            for (String parameter : queryParameters) {
                statement.setString(parameterPosition, parameter);
                parameterPosition++;
            }
        }
        if (fetchSize != 0) {
            statement.setFetchSize(fetchSize);
        }
        final boolean status = statement.execute();
        if (status == false) {
            this.log.error("Failed to execute sql:" + query);
        }
        resultSet = statement.getResultSet();

    } catch (Exception e) {
        this.log.error("Failed to execute sql:" + query + " ;error-" + e.getMessage(), e);
    }

    CommandOutput<JdbcCommand, ResultSet> output = new JdbcCommandOutput();
    output.put((JdbcCommand) cmds.get(0), resultSet);
    return output;
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.java

public static PreparedStatement prepareGetMetricsSqlStmt(Connection connection, Condition condition)
        throws SQLException {

    validateConditionIsNotEmpty(condition);
    validateRowCountLimit(condition);//from ww  w  .  j  av a  2s .  c o m

    String stmtStr;
    if (condition.getStatement() != null) {
        stmtStr = condition.getStatement();
    } else {

        String metricsTable;
        String query;
        if (condition.getPrecision() == null) {
            long endTime = condition.getEndTime() == null ? System.currentTimeMillis() : condition.getEndTime();
            long startTime = condition.getStartTime() == null ? 0 : condition.getStartTime();
            Long timeRange = endTime - startTime;
            if (timeRange > 5 * DAY) {
                metricsTable = METRICS_AGGREGATE_HOURLY_TABLE_NAME;
                query = GET_METRIC_AGGREGATE_ONLY_SQL;
                condition.setPrecision(Precision.HOURS);
            } else if (timeRange > 10 * HOUR) {
                metricsTable = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
                query = GET_METRIC_AGGREGATE_ONLY_SQL;
                condition.setPrecision(Precision.MINUTES);
            } else {
                metricsTable = METRICS_RECORD_TABLE_NAME;
                query = GET_METRIC_SQL;
                condition.setPrecision(Precision.SECONDS);
            }
        } else {
            switch (condition.getPrecision()) {
            case HOURS:
                metricsTable = METRICS_AGGREGATE_HOURLY_TABLE_NAME;
                query = GET_METRIC_AGGREGATE_ONLY_SQL;
                break;
            case MINUTES:
                metricsTable = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
                query = GET_METRIC_AGGREGATE_ONLY_SQL;
                break;
            default:
                metricsTable = METRICS_RECORD_TABLE_NAME;
                query = GET_METRIC_SQL;
            }
        }

        stmtStr = String.format(query, getNaiveTimeRangeHint(condition.getStartTime(), NATIVE_TIME_RANGE_DELTA),
                metricsTable);
    }

    StringBuilder sb = new StringBuilder(stmtStr);
    sb.append(" WHERE ");
    sb.append(condition.getConditionClause());
    String orderByClause = condition.getOrderByClause(true);

    if (orderByClause != null) {
        sb.append(orderByClause);
    } else {
        sb.append(" ORDER BY METRIC_NAME, SERVER_TIME ");
    }
    if (condition.getLimit() != null) {
        sb.append(" LIMIT ").append(condition.getLimit());
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("SQL: " + sb.toString() + ", condition: " + condition);
    }
    PreparedStatement stmt = connection.prepareStatement(sb.toString());
    int pos = 1;
    if (condition.getMetricNames() != null) {
        for (; pos <= condition.getMetricNames().size(); pos++) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Setting pos: " + pos + ", value = " + condition.getMetricNames().get(pos - 1));
            }
            stmt.setString(pos, condition.getMetricNames().get(pos - 1));
        }
    }
    if (condition.getHostname() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Setting pos: " + pos + ", value: " + condition.getHostname());
        }
        stmt.setString(pos++, condition.getHostname());
    }
    if (condition.getAppId() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Setting pos: " + pos + ", value: " + condition.getAppId());
        }
        stmt.setString(pos++, condition.getAppId());
    }
    if (condition.getInstanceId() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Setting pos: " + pos + ", value: " + condition.getInstanceId());
        }
        stmt.setString(pos++, condition.getInstanceId());
    }
    if (condition.getStartTime() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Setting pos: " + pos + ", value: " + condition.getStartTime());
        }
        stmt.setLong(pos++, condition.getStartTime());
    }
    if (condition.getEndTime() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Setting pos: " + pos + ", value: " + condition.getEndTime());
        }
        stmt.setLong(pos, condition.getEndTime());
    }
    if (condition.getFetchSize() != null) {
        stmt.setFetchSize(condition.getFetchSize());
    }

    return stmt;
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixTransactSQL.java

public static PreparedStatement prepareGetLatestMetricSqlStmt(Connection connection, Condition condition)
        throws SQLException {

    validateConditionIsNotEmpty(condition);

    if (condition.getMetricNames() == null || condition.getMetricNames().size() == 0) {
        throw new IllegalArgumentException("Point in time query without " + "metric names not supported ");
    }/*  ww w. jav  a  2  s  .c  o  m*/

    String stmtStr;
    if (condition.getStatement() != null) {
        stmtStr = condition.getStatement();
    } else {
        stmtStr = String.format(GET_METRIC_SQL, "", METRICS_RECORD_TABLE_NAME);
    }

    StringBuilder sb = new StringBuilder(stmtStr);
    sb.append(" WHERE ");
    sb.append(condition.getConditionClause());
    String orderByClause = condition.getOrderByClause(false);
    if (orderByClause != null) {
        sb.append(orderByClause);
    } else {
        sb.append(" ORDER BY METRIC_NAME DESC, HOSTNAME DESC, SERVER_TIME DESC ");
    }

    sb.append(" LIMIT ").append(condition.getMetricNames().size());

    if (LOG.isDebugEnabled()) {
        LOG.debug("SQL: " + sb.toString() + ", condition: " + condition);
    }
    PreparedStatement stmt = connection.prepareStatement(sb.toString());
    int pos = 1;
    if (condition.getMetricNames() != null) {
        //IGNORE condition limit, set one based on number of metric names
        for (; pos <= condition.getMetricNames().size(); pos++) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Setting pos: " + pos + ", value = " + condition.getMetricNames().get(pos - 1));
            }
            stmt.setString(pos, condition.getMetricNames().get(pos - 1));
        }
    }
    if (condition.getHostname() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Setting pos: " + pos + ", value: " + condition.getHostname());
        }
        stmt.setString(pos++, condition.getHostname());
    }
    if (condition.getAppId() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Setting pos: " + pos + ", value: " + condition.getAppId());
        }
        stmt.setString(pos++, condition.getAppId());
    }
    if (condition.getInstanceId() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Setting pos: " + pos + ", value: " + condition.getInstanceId());
        }
        stmt.setString(pos++, condition.getInstanceId());
    }

    if (condition.getFetchSize() != null) {
        stmt.setFetchSize(condition.getFetchSize());
    }

    return stmt;
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.java

public static PreparedStatement prepareGetMetricsSqlStmt(Connection connection, Condition condition)
        throws SQLException {

    validateConditionIsNotEmpty(condition);
    validateRowCountLimit(condition);//w w w  . jav a2  s. c om

    String stmtStr;
    if (condition.getStatement() != null) {
        stmtStr = condition.getStatement();
    } else {

        String metricsTable;
        String query;
        if (condition.getPrecision() == null) {
            long endTime = condition.getEndTime() == null ? System.currentTimeMillis() : condition.getEndTime();
            long startTime = condition.getStartTime() == null ? 0 : condition.getStartTime();
            Precision precision = Precision.getPrecision(startTime, endTime);
            condition.setPrecision(precision);
        }
        switch (condition.getPrecision()) {
        case DAYS:
            metricsTable = METRICS_AGGREGATE_DAILY_TABLE_NAME;
            query = GET_METRIC_AGGREGATE_ONLY_SQL;
            break;
        case HOURS:
            metricsTable = METRICS_AGGREGATE_HOURLY_TABLE_NAME;
            query = GET_METRIC_AGGREGATE_ONLY_SQL;
            break;
        case MINUTES:
            metricsTable = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
            query = GET_METRIC_AGGREGATE_ONLY_SQL;
            break;
        default:
            metricsTable = METRICS_RECORD_TABLE_NAME;
            query = GET_METRIC_SQL;
        }

        stmtStr = String.format(query, getNaiveTimeRangeHint(condition.getStartTime(), NATIVE_TIME_RANGE_DELTA),
                metricsTable);
    }

    StringBuilder sb = new StringBuilder(stmtStr);

    if (!(condition instanceof EmptyCondition)) {
        sb.append(" WHERE ");
        sb.append(condition.getConditionClause());
        String orderByClause = condition.getOrderByClause(true);
        if (orderByClause != null) {
            sb.append(orderByClause);
        } else {
            sb.append(" ORDER BY METRIC_NAME, SERVER_TIME ");
        }
    }

    if (condition.getLimit() != null) {
        sb.append(" LIMIT ").append(condition.getLimit());
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("SQL: " + sb.toString() + ", condition: " + condition);
    }

    PreparedStatement stmt = null;
    try {
        stmt = connection.prepareStatement(sb.toString());
        int pos = 1;
        if (condition.getMetricNames() != null) {
            for (; pos <= condition.getMetricNames().size(); pos++) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Setting pos: " + pos + ", value = " + condition.getMetricNames().get(pos - 1));
                }
                stmt.setString(pos, condition.getMetricNames().get(pos - 1));
            }
        }
        if (condition.getHostnames() != null) {
            for (String hostname : condition.getHostnames()) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Setting pos: " + pos + ", value: " + hostname);
                }
                stmt.setString(pos++, hostname);
            }
        }
        if (condition.getAppId() != null) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Setting pos: " + pos + ", value: " + condition.getAppId());
            }
            stmt.setString(pos++, condition.getAppId());
        }
        if (condition.getInstanceId() != null) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Setting pos: " + pos + ", value: " + condition.getInstanceId());
            }
            stmt.setString(pos++, condition.getInstanceId());
        }
        if (condition.getStartTime() != null) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Setting pos: " + pos + ", value: " + condition.getStartTime());
            }
            stmt.setLong(pos++, condition.getStartTime());
        }
        if (condition.getEndTime() != null) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Setting pos: " + pos + ", value: " + condition.getEndTime());
            }
            stmt.setLong(pos, condition.getEndTime());
        }
        if (condition.getFetchSize() != null) {
            stmt.setFetchSize(condition.getFetchSize());
        }
    } catch (SQLException e) {
        if (stmt != null) {
            stmt.close();
        }
        throw e;
    }

    return stmt;
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL.java

private static PreparedStatement setQueryParameters(PreparedStatement stmt, Condition condition)
        throws SQLException {
    int pos = 1;/*  ww  w . java2s .  c o  m*/
    //For GET_LATEST_METRIC_SQL_SINGLE_HOST parameters should be set 2 times
    do {
        if (condition.getMetricNames() != null) {
            for (String metricName : condition.getMetricNames()) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Setting pos: " + pos + ", value = " + metricName);
                }
                stmt.setString(pos++, metricName);
            }
        }
        if (condition.getHostnames() != null) {
            for (String hostname : condition.getHostnames()) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Setting pos: " + pos + ", value: " + hostname);
                }
                stmt.setString(pos++, hostname);
            }
        }
        if (condition.getAppId() != null) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Setting pos: " + pos + ", value: " + condition.getAppId());
            }
            stmt.setString(pos++, condition.getAppId());
        }
        if (condition.getInstanceId() != null) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Setting pos: " + pos + ", value: " + condition.getInstanceId());
            }
            stmt.setString(pos++, condition.getInstanceId());
        }

        if (condition.getFetchSize() != null) {
            stmt.setFetchSize(condition.getFetchSize());
            pos++;
        }
    } while (pos < stmt.getParameterMetaData().getParameterCount());

    return stmt;
}

From source file:org.apache.hive.storage.jdbc.dao.GenericJdbcDatabaseAccessor.java

@Override
public JdbcRecordIterator getRecordIterator(Configuration conf, int limit, int offset)
        throws HiveJdbcDatabaseAccessException {

    Connection conn = null;/*from   w w w  .  j a v  a  2 s .c om*/
    PreparedStatement ps = null;
    ResultSet rs = null;

    try {
        initializeDatabaseConnection(conf);
        String sql = JdbcStorageConfigManager.getQueryToExecute(conf);
        String limitQuery = addLimitAndOffsetToQuery(sql, limit, offset);
        LOGGER.info("Query to execute is [{}]", limitQuery);

        conn = dbcpDataSource.getConnection();
        ps = conn.prepareStatement(limitQuery, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
        ps.setFetchSize(getFetchSize(conf));
        rs = ps.executeQuery();

        return new JdbcRecordIterator(conn, ps, rs);
    } catch (Exception e) {
        LOGGER.error("Caught exception while trying to execute query", e);
        cleanupResources(conn, ps, rs);
        throw new HiveJdbcDatabaseAccessException("Caught exception while trying to execute query", e);
    }
}

From source file:org.apache.lucene.store.jdbc.lock.PhantomReadLock.java

public boolean obtain() {
    try {//www  .j  av  a  2 s. c o  m
        if (jdbcDirectory.getDialect().useExistsBeforeInsertLock()) {
            // there are databases where the fact that an exception was thrown
            // invalidates the connection. So first we check if it exists, and
            // then insert it.
            if (jdbcDirectory.fileExists(name)) {
                return false;
            }
        }
        jdbcDirectory.getJdbcTemplate().executeUpdate(jdbcDirectory.getTable().sqlInsert(),
                new JdbcTemplate.PrepateStatementAwareCallback() {
                    public void fillPrepareStatement(PreparedStatement ps) throws Exception {
                        ps.setFetchSize(1);
                        ps.setString(1, name);
                        ps.setNull(2, Types.BLOB);
                        ps.setLong(3, 0);
                        ps.setBoolean(4, false);
                    }
                });
    } catch (Exception e) {
        if (log.isTraceEnabled()) {
            log.trace("Obtain Lock exception (might be valid) [" + e.getMessage() + "]");
        }
        return false;
    }
    return true;
}

From source file:org.apache.lucene.store.jdbc.lock.PhantomReadLock.java

public void release() {
    try {/*from  www.ja va  2s  .c  om*/
        jdbcDirectory.getJdbcTemplate().executeUpdate(jdbcDirectory.getTable().sqlDeleteByName(),
                new JdbcTemplate.PrepateStatementAwareCallback() {
                    public void fillPrepareStatement(PreparedStatement ps) throws Exception {
                        ps.setFetchSize(1);
                        ps.setString(1, name);
                    }
                });
    } catch (Exception e) {
        if (log.isTraceEnabled()) {
            log.trace("Release Lock exception (might be valid) [" + e.getMessage() + "]");
        }
    }
}

From source file:org.apache.marmotta.kiwi.persistence.KiWiConnection.java

/**
 * Internal implementation for actually carrying out the query. Returns a closable iteration that can be used
 * in a repository result. The iteration is forward-only and does not allow removing result rows.
 *
 * @param subject    the subject to query for, or null for a wildcard query
 * @param predicate  the predicate to query for, or null for a wildcard query
 * @param object     the object to query for, or null for a wildcard query
 * @param context    the context to query for, or null for a wildcard query
 * @param inferred   if true, the result will also contain triples inferred by the reasoner, if false not
 * @param wildcardContext if true, a null context will be interpreted as a wildcard, if false, a null context will be interpreted as "no context"
 * @return a ClosableIteration that wraps the database ResultSet; needs to be closed explicitly by the caller
 * @throws SQLException//from w ww . j a  va2 s.  c  om
 */
private CloseableIteration<Statement, SQLException> listTriplesInternal(KiWiResource subject,
        KiWiUriResource predicate, KiWiNode object, KiWiResource context, boolean inferred,
        final boolean wildcardContext) throws SQLException {
    // if one of the database ids is null, there will not be any database results, so we can return an empty result
    if (subject != null && subject.getId() < 0) {
        return new EmptyIteration<Statement, SQLException>();
    }
    if (predicate != null && predicate.getId() < 0) {
        return new EmptyIteration<Statement, SQLException>();
    }
    if (object != null && object.getId() < 0) {
        return new EmptyIteration<Statement, SQLException>();
    }
    if (context != null && context.getId() < 0) {
        return new EmptyIteration<Statement, SQLException>();
    }

    requireJDBCConnection();

    // otherwise we need to create an appropriate SQL query and execute it, the repository result will be read-only
    // and only allow forward iteration, so we can limit the query using the respective flags
    PreparedStatement query = connection.prepareStatement(
            constructTripleQuery(subject, predicate, object, context, inferred, wildcardContext),
            ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
    query.clearParameters();

    if (persistence.getDialect().isCursorSupported()) {
        query.setFetchSize(persistence.getConfiguration().getCursorSize());
    }

    // set query parameters
    int position = 1;
    if (subject != null) {
        query.setLong(position++, subject.getId());
    }
    if (predicate != null) {
        query.setLong(position++, predicate.getId());
    }
    if (object != null) {
        query.setLong(position++, object.getId());
    }
    if (context != null) {
        query.setLong(position++, context.getId());
    }

    final ResultSet result = query.executeQuery();

    return new CloseableIteration<Statement, SQLException>() {

        List<KiWiTriple> batch = null;
        int batchPosition = 0;

        @Override
        public void close() throws SQLException {
            result.close();
        }

        @Override
        public boolean hasNext() throws SQLException {
            fetchBatch();

            return batch.size() > batchPosition;
        }

        @Override
        public Statement next() throws SQLException {
            fetchBatch();

            if (batch.size() > batchPosition) {
                return batch.get(batchPosition++);
            } else {
                return null;
            }
        }

        private void fetchBatch() throws SQLException {
            if (batch == null || batch.size() <= batchPosition) {
                batch = constructTriplesFromDatabase(result, QUERY_BATCH_SIZE);
                batchPosition = 0;
            }
        }

        @Override
        public void remove() throws SQLException {
            throw new UnsupportedOperationException("removing results not supported");
        }
    };
}

From source file:org.apache.marmotta.kiwi.persistence.KiWiConnection.java

/**
 * Return the prepared statement with the given identifier; first looks in the statement cache and if it does
 * not exist there create a new statement.
 *
 * @param key the id of the statement in statements.properties
 * @return/*  w  w w  . j  a  v a 2s  .c o m*/
 * @throws SQLException
 */
public PreparedStatement getPreparedStatement(String key) throws SQLException {
    requireJDBCConnection();

    PreparedStatement statement = statementCache.get(key);
    if (statement == null || statement.isClosed()) {
        statement = connection.prepareStatement(dialect.getStatement(key), ResultSet.TYPE_FORWARD_ONLY,
                ResultSet.CONCUR_READ_ONLY);
        statementCache.put(key, statement);
    }
    statement.clearParameters();
    if (persistence.getDialect().isCursorSupported()) {
        statement.setFetchSize(persistence.getConfiguration().getCursorSize());
    }
    return statement;
}