Example usage for java.sql ResultSet FETCH_FORWARD

List of usage examples for java.sql ResultSet FETCH_FORWARD

Introduction

In this page you can find the example usage for java.sql ResultSet FETCH_FORWARD.

Prototype

int FETCH_FORWARD

To view the source code for java.sql ResultSet FETCH_FORWARD.

Click Source Link

Document

The constant indicating that the rows in a result set will be processed in a forward direction; first-to-last.

Usage

From source file:org.apache.hive.jdbc.HiveStatement.java

@Override
public int getFetchDirection() throws SQLException {
    checkConnection("getFetchDirection");
    return ResultSet.FETCH_FORWARD;
}

From source file:org.apache.hive.jdbc.HiveStatement.java

@Override
public void setFetchDirection(int direction) throws SQLException {
    checkConnection("setFetchDirection");
    if (direction != ResultSet.FETCH_FORWARD) {
        throw new SQLException("Not supported direction " + direction);
    }//from   w w  w. j a  v a2 s.  c  o  m
}

From source file:org.apache.openjpa.jdbc.conf.JDBCConfigurationImpl.java

/**
 * Constructor.//from   w w w  .j a  va  2s  .c  o  m
 *
 * @param derivations whether to apply product derivations
 * @param loadGlobals whether to attempt to load the global properties
 */
public JDBCConfigurationImpl(boolean derivations, boolean loadGlobals) {
    super(false, false);
    String[] aliases;

    schema = addString("jdbc.Schema");
    schemas = addStringList("jdbc.Schemas");

    transactionIsolation = addInt("jdbc.TransactionIsolation");
    aliases = new String[] { "default", String.valueOf(-1), "none", String.valueOf(Connection.TRANSACTION_NONE),
            "read-committed", String.valueOf(Connection.TRANSACTION_READ_COMMITTED), "read-uncommitted",
            String.valueOf(Connection.TRANSACTION_READ_UNCOMMITTED), "repeatable-read",
            String.valueOf(Connection.TRANSACTION_REPEATABLE_READ), "serializable",
            String.valueOf(Connection.TRANSACTION_SERIALIZABLE) };
    transactionIsolation.setAliases(aliases);
    transactionIsolation.setDefault(aliases[0]);
    transactionIsolation.set(-1);
    transactionIsolation.setAliasListComprehensive(true);

    resultSetType = addInt("jdbc.ResultSetType");
    aliases = new String[] { "forward-only", String.valueOf(ResultSet.TYPE_FORWARD_ONLY), "scroll-sensitive",
            String.valueOf(ResultSet.TYPE_SCROLL_SENSITIVE), "scroll-insensitive",
            String.valueOf(ResultSet.TYPE_SCROLL_INSENSITIVE), };
    resultSetType.setAliases(aliases);
    resultSetType.setDefault(aliases[0]);
    resultSetType.set(ResultSet.TYPE_FORWARD_ONLY);
    resultSetType.setAliasListComprehensive(true);

    fetchDirection = addInt("jdbc.FetchDirection");
    aliases = new String[] { "forward", String.valueOf(ResultSet.FETCH_FORWARD), "reverse",
            String.valueOf(ResultSet.FETCH_REVERSE), "unknown", String.valueOf(ResultSet.FETCH_UNKNOWN), };
    fetchDirection.setAliases(aliases);
    fetchDirection.setDefault(aliases[0]);
    fetchDirection.set(ResultSet.FETCH_FORWARD);
    fetchDirection.setAliasListComprehensive(true);

    eagerFetchMode = new FetchModeValue("jdbc.EagerFetchMode");
    eagerFetchMode.setDefault(FetchModeValue.EAGER_PARALLEL);
    eagerFetchMode.set(EagerFetchModes.EAGER_PARALLEL);
    addValue(eagerFetchMode);

    subclassFetchMode = new FetchModeValue("jdbc.SubclassFetchMode");
    subclassFetchMode.setDefault(FetchModeValue.EAGER_JOIN);
    subclassFetchMode.set(EagerFetchModes.EAGER_JOIN);
    addValue(subclassFetchMode);

    lrsSize = addInt("jdbc.LRSSize");
    aliases = new String[] { "query", String.valueOf(LRSSizes.SIZE_QUERY), "unknown",
            String.valueOf(LRSSizes.SIZE_UNKNOWN), "last", String.valueOf(LRSSizes.SIZE_LAST), };
    lrsSize.setAliases(aliases);
    lrsSize.setDefault(aliases[0]);
    lrsSize.set(LRSSizes.SIZE_QUERY);
    lrsSize.setAliasListComprehensive(true);

    synchronizeMappings = addString("jdbc.SynchronizeMappings");
    aliases = new String[] { "false", null };
    synchronizeMappings.setAliases(aliases);
    synchronizeMappings.setDefault(aliases[0]);

    jdbcListenerPlugins = addPluginList("jdbc.JDBCListeners");
    jdbcListenerPlugins.setInstantiatingGetter("getJDBCListenerInstances");

    connectionDecoratorPlugins = addPluginList("jdbc.ConnectionDecorators");
    connectionDecoratorPlugins.setInstantiatingGetter("getConnectionDecoratorInstances");

    dbdictionaryPlugin = addPlugin("jdbc.DBDictionary", true);
    aliases = new String[] { "access", "org.apache.openjpa.jdbc.sql.AccessDictionary", "db2",
            "org.apache.openjpa.jdbc.sql.DB2Dictionary", "derby", "org.apache.openjpa.jdbc.sql.DerbyDictionary",
            "empress", "org.apache.openjpa.jdbc.sql.EmpressDictionary", "foxpro",
            "org.apache.openjpa.jdbc.sql.FoxProDictionary", "h2", "org.apache.openjpa.jdbc.sql.H2Dictionary",
            "hsql", "org.apache.openjpa.jdbc.sql.HSQLDictionary", "informix",
            "org.apache.openjpa.jdbc.sql.InformixDictionary", "ingres",
            "org.apache.openjpa.jdbc.sql.IngresDictionary", "jdatastore",
            "org.apache.openjpa.jdbc.sql.JDataStoreDictionary", "mysql",
            "org.apache.openjpa.jdbc.sql.MySQLDictionary", "oracle",
            "org.apache.openjpa.jdbc.sql.OracleDictionary", "pointbase",
            "org.apache.openjpa.jdbc.sql.PointbaseDictionary", "postgres",
            "org.apache.openjpa.jdbc.sql.PostgresDictionary", "soliddb",
            "org.apache.openjpa.jdbc.sql.SolidDBDictionary", "sqlserver",
            "org.apache.openjpa.jdbc.sql.SQLServerDictionary", "sybase",
            "org.apache.openjpa.jdbc.sql.SybaseDictionary", "maxdb",
            MaxDBDictionary.class.getCanonicalName(), };
    dbdictionaryPlugin.setAliases(aliases);
    dbdictionaryPlugin.setInstantiatingGetter("getDBDictionaryInstance");

    updateManagerPlugin = addPlugin("jdbc.UpdateManager", true);
    aliases = new String[] { "default", BatchingConstraintUpdateManager.class.getName(), "operation-order",
            "org.apache.openjpa.jdbc.kernel.OperationOrderUpdateManager", "constraint",
            "org.apache.openjpa.jdbc.kernel.ConstraintUpdateManager", "batching-constraint",
            BatchingConstraintUpdateManager.class.getName(), "batching-operation-order",
            BatchingOperationOrderUpdateManager.class.getName(), };
    updateManagerPlugin.setAliases(aliases);
    updateManagerPlugin.setDefault(aliases[0]);
    updateManagerPlugin.setString(aliases[0]);
    updateManagerPlugin.setInstantiatingGetter("getUpdateManagerInstance");

    driverDataSourcePlugin = addPlugin("jdbc.DriverDataSource", false);
    aliases = new String[] { "auto", "org.apache.openjpa.jdbc.schema.AutoDriverDataSource", "simple",
            "org.apache.openjpa.jdbc.schema.SimpleDriverDataSource", "dbcp",
            "org.apache.openjpa.jdbc.schema.DBCPDriverDataSource", };
    driverDataSourcePlugin.setAliases(aliases);
    driverDataSourcePlugin.setDefault(aliases[0]);
    driverDataSourcePlugin.setString(aliases[0]);

    schemaFactoryPlugin = addPlugin("jdbc.SchemaFactory", true);
    aliases = new String[] { "dynamic", "org.apache.openjpa.jdbc.schema.DynamicSchemaFactory", "native",
            "org.apache.openjpa.jdbc.schema.LazySchemaFactory", "file",
            "org.apache.openjpa.jdbc.schema.FileSchemaFactory", "table",
            "org.apache.openjpa.jdbc.schema.TableSchemaFactory",
            // deprecated alias
            "db", "org.apache.openjpa.jdbc.schema.TableSchemaFactory", };
    schemaFactoryPlugin.setAliases(aliases);
    schemaFactoryPlugin.setDefault(aliases[0]);
    schemaFactoryPlugin.setString(aliases[0]);
    schemaFactoryPlugin.setInstantiatingGetter("getSchemaFactoryInstance");

    sqlFactoryPlugin = addPlugin("jdbc.SQLFactory", true);
    aliases = new String[] { "default", "org.apache.openjpa.jdbc.sql.SQLFactoryImpl", };
    sqlFactoryPlugin.setAliases(aliases);
    sqlFactoryPlugin.setDefault(aliases[0]);
    sqlFactoryPlugin.setString(aliases[0]);
    sqlFactoryPlugin.setInstantiatingGetter("getSQLFactoryInstance");

    mappingFactoryPlugin = new MappingFactoryValue("jdbc.MappingFactory");
    addValue(mappingFactoryPlugin);

    mappingDefaultsPlugin = addPlugin("jdbc.MappingDefaults", true);
    aliases = new String[] { "default", "org.apache.openjpa.jdbc.meta.MappingDefaultsImpl", };
    mappingDefaultsPlugin.setAliases(aliases);
    mappingDefaultsPlugin.setDefault(aliases[0]);
    mappingDefaultsPlugin.setString(aliases[0]);
    mappingDefaultsPlugin.setInstantiatingGetter("getMappingDefaultsInstance");

    // set up broker factory defaults
    brokerFactoryPlugin.setAlias("jdbc", JDBCBrokerFactory.class.getName());
    brokerFactoryPlugin.setDefault("jdbc");
    brokerFactoryPlugin.setString("jdbc");

    // set new default for mapping repos
    metaRepositoryPlugin.setAlias("default", "org.apache.openjpa.jdbc.meta.MappingRepository");
    metaRepositoryPlugin.setDefault("default");
    metaRepositoryPlugin.setString("default");

    // set new default for lock manager
    lockManagerPlugin.setAlias("pessimistic", PessimisticLockManager.class.getName());
    lockManagerPlugin.setDefault("pessimistic");
    lockManagerPlugin.setString("pessimistic");

    // native savepoint manager options
    savepointManagerPlugin.setAlias("jdbc", "org.apache.openjpa.jdbc.kernel.JDBC3SavepointManager");

    // set new aliases and defaults for sequence
    seqPlugin.setAliases(JDBCSeqValue.ALIASES);
    seqPlugin.setDefault(JDBCSeqValue.ALIASES[0]);
    seqPlugin.setString(JDBCSeqValue.ALIASES[0]);

    // This plug-in is declared in superclass but defined here
    // because PreparedQueryCache is currently available for JDBC
    // backend only
    preparedQueryCachePlugin = addPlugin("jdbc.QuerySQLCache", true);
    aliases = new String[] { "true", "org.apache.openjpa.jdbc.kernel.PreparedQueryCacheImpl", "false", null };
    preparedQueryCachePlugin.setAliases(aliases);
    preparedQueryCachePlugin.setAliasListComprehensive(true);
    preparedQueryCachePlugin.setDefault(aliases[0]);
    preparedQueryCachePlugin.setClassName(aliases[1]);
    preparedQueryCachePlugin.setDynamic(true);
    preparedQueryCachePlugin.setInstantiatingGetter("getQuerySQLCacheInstance");

    finderCachePlugin = addPlugin("jdbc.FinderCache", true);
    aliases = new String[] { "true", "org.apache.openjpa.jdbc.kernel.FinderCacheImpl", "false", null };
    finderCachePlugin.setAliases(aliases);
    finderCachePlugin.setAliasListComprehensive(true);
    finderCachePlugin.setDefault(aliases[0]);
    finderCachePlugin.setClassName(aliases[1]);
    finderCachePlugin.setDynamic(true);
    finderCachePlugin.setInstantiatingGetter("getFinderCacheInstance");

    identifierUtilPlugin = addPlugin("jdbc.IdentifierUtil", true);
    aliases = new String[] { "default", "org.apache.openjpa.jdbc.identifier.DBIdentifierUtilImpl" };
    identifierUtilPlugin.setAliases(aliases);
    identifierUtilPlugin.setDefault(aliases[0]);
    identifierUtilPlugin.setString(aliases[0]);
    identifierUtilPlugin.setInstantiatingGetter("getIdentifierUtilInstance");

    // this static initializer is to get past a weird
    // ClassCircularityError that happens only under IBM's
    // JDK 1.3.1 on Linux from within the JRun ClassLoader;
    // while exact causes are unknown, it is almost certainly
    // a bug in JRun, and we can get around it by forcing
    // Instruction.class to be loaded and initialized
    // before TypedInstruction.class
    try {
        serp.bytecode.lowlevel.Entry.class.getName();
    } catch (Throwable t) {
    }
    try {
        serp.bytecode.Instruction.class.getName();
    } catch (Throwable t) {
    }

    supportedOptions().add(OPTION_QUERY_SQL);
    supportedOptions().add(OPTION_JDBC_CONNECTION);
    supportedOptions().remove(OPTION_VALUE_INCREMENT);
    supportedOptions().remove(OPTION_NULL_CONTAINER);

    if (derivations)
        ProductDerivations.beforeConfigurationLoad(this);
    if (loadGlobals)
        loadGlobals();
}

From source file:org.apache.openjpa.jdbc.kernel.JDBCFetchConfigurationImpl.java

public JDBCFetchConfiguration setFetchDirection(int direction) {
    if (direction != DEFAULT && direction != ResultSet.FETCH_FORWARD && direction != ResultSet.FETCH_REVERSE
            && direction != ResultSet.FETCH_UNKNOWN)
        throw new IllegalArgumentException(
                _loc.get("bad-fetch-direction", Integer.valueOf(direction)).getMessage());

    if (direction == DEFAULT) {
        JDBCConfiguration conf = getJDBCConfiguration();
        if (conf != null)
            _state.direction = conf.getFetchDirectionConstant();
    } else//from   w  w w . ja  va2 s  . c  o  m
        _state.direction = direction;
    return this;
}

From source file:org.apache.openjpa.jdbc.kernel.JDBCFetchConfigurationImpl.java

public ResultList<?> newResultList(ResultObjectProvider rop) {
    // if built around a list, just use a simple wrapper
    if (rop instanceof ListResultObjectProvider)
        return new SimpleResultList(rop);

    // if built around a paging list, use a window provider with the
    // same window size
    if (rop instanceof PagingResultObjectProvider)
        return new WindowResultList(rop, ((PagingResultObjectProvider) rop).getPageSize());

    // if fetch size < 0 just read in all results immediately
    if (getFetchBatchSize() < 0)
        return new EagerResultList(rop);

    // if foward only or forward direction use a forward window
    if (_state.type == ResultSet.TYPE_FORWARD_ONLY || _state.direction == ResultSet.FETCH_FORWARD
            || !rop.supportsRandomAccess()) {
        if (getFetchBatchSize() > 0 && getFetchBatchSize() <= 50)
            return new WindowResultList(rop, getFetchBatchSize());
        return new WindowResultList(rop, 50);
    }/*  w ww.  j  a  va2  s  .co  m*/

    // if skipping around use a caching random access list
    if (_state.direction == ResultSet.FETCH_UNKNOWN)
        return new SoftRandomAccessResultList(rop);

    // scrolling reverse... just use non-caching simple result list
    return new SimpleResultList(rop);
}

From source file:org.apache.openjpa.jdbc.sql.SQLBuffer.java

/**
 * Create and populate the parameters of a prepred statement using the
 * SQL in this buffer and the given fetch configuration.
 *///from w w w .  jav  a  2 s . co m
public PreparedStatement prepareStatement(Connection conn, JDBCFetchConfiguration fetch, int rsType,
        int rsConcur) throws SQLException {
    if (rsType == -1 && fetch == null)
        rsType = ResultSet.TYPE_FORWARD_ONLY;
    else if (rsType == -1)
        rsType = fetch.getResultSetType();
    if (rsConcur == -1)
        rsConcur = ResultSet.CONCUR_READ_ONLY;

    PreparedStatement stmnt;
    if (rsType == ResultSet.TYPE_FORWARD_ONLY && rsConcur == ResultSet.CONCUR_READ_ONLY)
        stmnt = conn.prepareStatement(getSQL());
    else
        stmnt = conn.prepareStatement(getSQL(), rsType, rsConcur);
    try {
        setParameters(stmnt);
        if (fetch != null) {
            if (fetch.getFetchBatchSize() > 0)
                stmnt.setFetchSize(_dict.getBatchFetchSize(fetch.getFetchBatchSize()));
            if (rsType != ResultSet.TYPE_FORWARD_ONLY && fetch.getFetchDirection() != ResultSet.FETCH_FORWARD)
                stmnt.setFetchDirection(fetch.getFetchDirection());
        }
        return stmnt;
    } catch (SQLException se) {
        try {
            stmnt.close();
        } catch (SQLException se2) {
        }
        throw se;
    }
}

From source file:org.apache.openjpa.jdbc.sql.SQLBuffer.java

/**
 * Create and populate the parameters of a prepred statement using the
 * SQL in this buffer and the given fetch configuration.
 *//*from  w ww  . j  a va 2s .  co  m*/
public CallableStatement prepareCall(Connection conn, JDBCFetchConfiguration fetch, int rsType, int rsConcur)
        throws SQLException {
    if (rsType == -1 && fetch == null)
        rsType = ResultSet.TYPE_FORWARD_ONLY;
    else if (rsType == -1)
        rsType = fetch.getResultSetType();
    if (rsConcur == -1)
        rsConcur = ResultSet.CONCUR_READ_ONLY;

    CallableStatement stmnt;
    if (rsType == ResultSet.TYPE_FORWARD_ONLY && rsConcur == ResultSet.CONCUR_READ_ONLY)
        stmnt = conn.prepareCall(getSQL());
    else
        stmnt = conn.prepareCall(getSQL(), rsType, rsConcur);
    try {
        setParameters(stmnt);
        if (fetch != null) {
            if (fetch.getFetchBatchSize() > 0)
                stmnt.setFetchSize(_dict.getBatchFetchSize(fetch.getFetchBatchSize()));
            if (rsType != ResultSet.TYPE_FORWARD_ONLY && fetch.getFetchDirection() != ResultSet.FETCH_FORWARD)
                stmnt.setFetchDirection(fetch.getFetchDirection());
        }
        return stmnt;
    } catch (SQLException se) {
        try {
            stmnt.close();
        } catch (SQLException se2) {
        }
        throw se;
    }
}

From source file:org.apache.phoenix.jdbc.PhoenixResultSet.java

@Override
public int getFetchDirection() throws SQLException {
    return ResultSet.FETCH_FORWARD;
}

From source file:org.apache.phoenix.jdbc.PhoenixResultSet.java

@Override
public void setFetchDirection(int direction) throws SQLException {
    if (direction != ResultSet.FETCH_FORWARD) {
        throw new SQLFeatureNotSupportedException();
    }/*w  w w .  j  a va2s .  c o  m*/
}

From source file:org.inquidia.kettle.plugins.snowflakeplugin.bulkloader.SnowflakeBulkLoader.java

/**
 * Runs a desc table to get the fields, and field types from the database.  Uses a desc table as opposed
 * to the select * from table limit 0 that Pentaho normally uses to get the fields and types, due to the need
 * to handle the Time type.  The select * method through Pentaho does not give us the ability to differentiate
 * time from timestamp.//  w  w  w . ja v a2 s .c om
 * @throws KettleException
 */
private void getDbFields() throws KettleException {
    data.dbFields = new ArrayList<>();
    String SQL = "desc table ";
    if (!Const.isEmpty(environmentSubstitute(meta.getTargetSchema()))) {
        SQL += environmentSubstitute(meta.getTargetSchema()) + ".";
    }
    SQL += environmentSubstitute(meta.getTargetTable());
    logDetailed("Executing SQL " + SQL);
    try {
        ResultSet resultSet = data.db.openQuery(SQL, null, null, ResultSet.FETCH_FORWARD, false);

        RowMetaInterface rowMeta = data.db.getReturnRowMeta();
        int nameField = rowMeta.indexOfValue("NAME");
        int typeField = rowMeta.indexOfValue("TYPE");
        if (nameField < 0 || typeField < 0) {
            throw new KettleException("Unable to get database fields");
        }

        Object[] row = data.db.getRow(resultSet);
        if (row == null) {
            throw new KettleException("No fields found in table");
        }
        while (row != null) {
            String[] field = new String[2];
            field[0] = rowMeta.getString(row, nameField);
            field[1] = rowMeta.getString(row, typeField);
            data.dbFields.add(field);
            row = data.db.getRow(resultSet);
        }
        data.db.closeQuery(resultSet);
    } catch (Exception ex) {
        throw new KettleException("Error getting database fields", ex);
    }
}