Example usage for java.sql ResultSet TYPE_SCROLL_INSENSITIVE

List of usage examples for java.sql ResultSet TYPE_SCROLL_INSENSITIVE

Introduction

In this page you can find the example usage for java.sql ResultSet TYPE_SCROLL_INSENSITIVE.

Prototype

int TYPE_SCROLL_INSENSITIVE

To view the source code for java.sql ResultSet TYPE_SCROLL_INSENSITIVE.

Click Source Link

Document

The constant indicating the type for a ResultSet object that is scrollable but generally not sensitive to changes to the data that underlies the ResultSet.

Usage

From source file:no.polaric.aprsdb.MyDBSession.java

/**
 * Get list of signs in a specified geographic area and above a specified scale 
 *///from   ww  w. j  a  v a2 s  .com
public DbList<Signs.Item> getSigns(long scale, Reference uleft, Reference lright) throws java.sql.SQLException {
    PreparedStatement stmt = getCon().prepareStatement(
            " SELECT s.id AS sid, position, maxscale, url, description, cl.name, s.icon AS sicon, cl.icon AS cicon "
                    + " FROM \"Signs\" s LEFT JOIN \"SignClass\" cl ON s.class=cl.id"
                    + " WHERE maxscale>=? AND position && ST_MakeEnvelope(?, ?, ?, ?, 4326) AND NOT s.hidden"
                    + " LIMIT 200",
            ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT);
    stmt.setLong(1, scale);
    LatLng ul = uleft.toLatLng();
    LatLng lr = lright.toLatLng();
    stmt.setDouble(2, ul.getLng());
    stmt.setDouble(3, ul.getLat());
    stmt.setDouble(4, lr.getLng());
    stmt.setDouble(5, lr.getLat());
    stmt.setMaxRows(200);

    return new DbList(stmt.executeQuery(), rs -> {
        String icon = rs.getString("sicon");
        if (icon == null)
            icon = rs.getString("cicon");

        // Item (Reference r, long sc, String ic, String url, String txt)
        return new Signs.Item(rs.getInt("sid"), getRef(rs, "position"), 0, icon, rs.getString("url"),
                rs.getString("description"));
    });
}

From source file:edu.ku.brc.specify.conversion.IdTableMapper.java

/**
 * Map all the old IDs to new IDs// w w  w  .j a  v a  2 s. c o m
 * @param sqlArg the string to use to fill the map
 */
public void mapAllIds(final String sqlArg) {
    log.debug("mapAllIds with sql: " + sqlArg);
    this.sql = sqlArg;

    int mappingCount = getMapCount(mapTableName);
    wasEmpty = mappingCount == 0;

    if (doDelete || mappingCount == 0) {
        BasicSQLUtils.deleteAllRecordsFromTable(oldConn, mapTableName, BasicSQLUtils.myDestinationServerType);
        if (frame != null) {
            String dMsg = "Mapping " + mapTableName;
            frame.setDesc(dMsg);
            log.debug(dMsg);
        }

        try {
            log.debug("Executing: " + sql);
            PreparedStatement pStmt = oldConn.prepareStatement("INSERT INTO " + mapTableName + " VALUES (?,?)");
            Statement stmtOld = oldConn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
                    ResultSet.CONCUR_READ_ONLY);
            ResultSet rs = stmtOld.executeQuery(sql);

            if (rs.last()) {
                if (frame != null) {
                    frame.setProcess(0, rs.getRow());
                }
            }

            if (rs.first()) {
                int newIndex = initialIndex;
                do {
                    int oldIndex = rs.getInt(1);
                    //log.debug("map "+mapTableName+" old[" + oldIndex + "] new [" + newIndex +"]");

                    if (indexIncremeter != null) {
                        newIndex = indexIncremeter.getNextIndex();
                    }

                    pStmt.setInt(1, oldIndex); // Old Index
                    pStmt.setInt(2, newIndex); // New Index
                    if (pStmt.executeUpdate() != 1) {
                        String msg = String.format("Error writing to Map table[%s] old: %d  new: %d",
                                mapTableName, oldIndex, newIndex);
                        log.error(msg);
                        throw new RuntimeException(msg);
                    }

                    newIndex++; // incrementing doesn't matter when there is an indexIncremeter

                    if (frame != null) {
                        if (newIndex % 1000 == 0) {
                            frame.setProcess(newIndex);
                        }

                    } else {
                        if (newIndex % 2000 == 0) {
                            log.debug("Mapped " + newIndex + " records from " + tableName);
                        }
                    }

                } while (rs.next());
                log.info("Mapped " + newIndex + " records from " + tableName);

            } else {
                log.info("No records to map in " + tableName);
            }
            rs.close();
            stmtOld.close();
            pStmt.close();

        } catch (SQLException ex) {
            ex.printStackTrace();
            edu.ku.brc.af.core.UsageTracker.incrSQLUsageCount();
            edu.ku.brc.exceptions.ExceptionTracker.getInstance().capture(IdTableMapper.class, ex);
            log.error(ex);
            throw new RuntimeException(ex);
        }
    } else {
        log.debug("Skipping the build of mapper: " + mapTableName);
    }

    if (frame != null) {
        frame.setProcess(0, 0);
    }
}

From source file:recite18th.library.Db.java

public static String[][] getCallableDataSet(String sql) {
    try {//from   w  w w.  ja  v a2s  .  c o m
        CallableStatement cstmt = getCon().prepareCall(sql, ResultSet.TYPE_SCROLL_INSENSITIVE,
                ResultSet.CONCUR_READ_ONLY);
        return processDataSetResultSet(cstmt.executeQuery());
    } catch (SQLException ex) {
        Logger.getLogger(Db.class.getName()).log(Level.SEVERE, null, ex);
        return null;
    }
}

From source file:com.bc.fiduceo.db.AbstractDriver.java

Integer getSensorId(String sensorName) throws SQLException {
    final Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
            ResultSet.CONCUR_UPDATABLE);
    final ResultSet resultSet = statement
            .executeQuery("SELECT ID FROM SENSOR WHERE NAME = '" + sensorName + "'");

    if (resultSet.first()) {
        return resultSet.getInt("ID");
    } else {// www.ja v a2 s  . c o  m
        return null;
    }
}

From source file:org.apache.openjpa.jdbc.conf.JDBCConfigurationImpl.java

/**
 * Constructor./* w  w  w  .j  a  v  a2  s.  c o m*/
 *
 * @param derivations whether to apply product derivations
 * @param loadGlobals whether to attempt to load the global properties
 */
public JDBCConfigurationImpl(boolean derivations, boolean loadGlobals) {
    super(false, false);
    String[] aliases;

    schema = addString("jdbc.Schema");
    schemas = addStringList("jdbc.Schemas");

    transactionIsolation = addInt("jdbc.TransactionIsolation");
    aliases = new String[] { "default", String.valueOf(-1), "none", String.valueOf(Connection.TRANSACTION_NONE),
            "read-committed", String.valueOf(Connection.TRANSACTION_READ_COMMITTED), "read-uncommitted",
            String.valueOf(Connection.TRANSACTION_READ_UNCOMMITTED), "repeatable-read",
            String.valueOf(Connection.TRANSACTION_REPEATABLE_READ), "serializable",
            String.valueOf(Connection.TRANSACTION_SERIALIZABLE) };
    transactionIsolation.setAliases(aliases);
    transactionIsolation.setDefault(aliases[0]);
    transactionIsolation.set(-1);
    transactionIsolation.setAliasListComprehensive(true);

    resultSetType = addInt("jdbc.ResultSetType");
    aliases = new String[] { "forward-only", String.valueOf(ResultSet.TYPE_FORWARD_ONLY), "scroll-sensitive",
            String.valueOf(ResultSet.TYPE_SCROLL_SENSITIVE), "scroll-insensitive",
            String.valueOf(ResultSet.TYPE_SCROLL_INSENSITIVE), };
    resultSetType.setAliases(aliases);
    resultSetType.setDefault(aliases[0]);
    resultSetType.set(ResultSet.TYPE_FORWARD_ONLY);
    resultSetType.setAliasListComprehensive(true);

    fetchDirection = addInt("jdbc.FetchDirection");
    aliases = new String[] { "forward", String.valueOf(ResultSet.FETCH_FORWARD), "reverse",
            String.valueOf(ResultSet.FETCH_REVERSE), "unknown", String.valueOf(ResultSet.FETCH_UNKNOWN), };
    fetchDirection.setAliases(aliases);
    fetchDirection.setDefault(aliases[0]);
    fetchDirection.set(ResultSet.FETCH_FORWARD);
    fetchDirection.setAliasListComprehensive(true);

    eagerFetchMode = new FetchModeValue("jdbc.EagerFetchMode");
    eagerFetchMode.setDefault(FetchModeValue.EAGER_PARALLEL);
    eagerFetchMode.set(EagerFetchModes.EAGER_PARALLEL);
    addValue(eagerFetchMode);

    subclassFetchMode = new FetchModeValue("jdbc.SubclassFetchMode");
    subclassFetchMode.setDefault(FetchModeValue.EAGER_JOIN);
    subclassFetchMode.set(EagerFetchModes.EAGER_JOIN);
    addValue(subclassFetchMode);

    lrsSize = addInt("jdbc.LRSSize");
    aliases = new String[] { "query", String.valueOf(LRSSizes.SIZE_QUERY), "unknown",
            String.valueOf(LRSSizes.SIZE_UNKNOWN), "last", String.valueOf(LRSSizes.SIZE_LAST), };
    lrsSize.setAliases(aliases);
    lrsSize.setDefault(aliases[0]);
    lrsSize.set(LRSSizes.SIZE_QUERY);
    lrsSize.setAliasListComprehensive(true);

    synchronizeMappings = addString("jdbc.SynchronizeMappings");
    aliases = new String[] { "false", null };
    synchronizeMappings.setAliases(aliases);
    synchronizeMappings.setDefault(aliases[0]);

    jdbcListenerPlugins = addPluginList("jdbc.JDBCListeners");
    jdbcListenerPlugins.setInstantiatingGetter("getJDBCListenerInstances");

    connectionDecoratorPlugins = addPluginList("jdbc.ConnectionDecorators");
    connectionDecoratorPlugins.setInstantiatingGetter("getConnectionDecoratorInstances");

    dbdictionaryPlugin = addPlugin("jdbc.DBDictionary", true);
    aliases = new String[] { "access", "org.apache.openjpa.jdbc.sql.AccessDictionary", "db2",
            "org.apache.openjpa.jdbc.sql.DB2Dictionary", "derby", "org.apache.openjpa.jdbc.sql.DerbyDictionary",
            "empress", "org.apache.openjpa.jdbc.sql.EmpressDictionary", "foxpro",
            "org.apache.openjpa.jdbc.sql.FoxProDictionary", "h2", "org.apache.openjpa.jdbc.sql.H2Dictionary",
            "hsql", "org.apache.openjpa.jdbc.sql.HSQLDictionary", "informix",
            "org.apache.openjpa.jdbc.sql.InformixDictionary", "ingres",
            "org.apache.openjpa.jdbc.sql.IngresDictionary", "jdatastore",
            "org.apache.openjpa.jdbc.sql.JDataStoreDictionary", "mysql",
            "org.apache.openjpa.jdbc.sql.MySQLDictionary", "oracle",
            "org.apache.openjpa.jdbc.sql.OracleDictionary", "pointbase",
            "org.apache.openjpa.jdbc.sql.PointbaseDictionary", "postgres",
            "org.apache.openjpa.jdbc.sql.PostgresDictionary", "soliddb",
            "org.apache.openjpa.jdbc.sql.SolidDBDictionary", "sqlserver",
            "org.apache.openjpa.jdbc.sql.SQLServerDictionary", "sybase",
            "org.apache.openjpa.jdbc.sql.SybaseDictionary", "maxdb",
            MaxDBDictionary.class.getCanonicalName(), };
    dbdictionaryPlugin.setAliases(aliases);
    dbdictionaryPlugin.setInstantiatingGetter("getDBDictionaryInstance");

    updateManagerPlugin = addPlugin("jdbc.UpdateManager", true);
    aliases = new String[] { "default", BatchingConstraintUpdateManager.class.getName(), "operation-order",
            "org.apache.openjpa.jdbc.kernel.OperationOrderUpdateManager", "constraint",
            "org.apache.openjpa.jdbc.kernel.ConstraintUpdateManager", "batching-constraint",
            BatchingConstraintUpdateManager.class.getName(), "batching-operation-order",
            BatchingOperationOrderUpdateManager.class.getName(), };
    updateManagerPlugin.setAliases(aliases);
    updateManagerPlugin.setDefault(aliases[0]);
    updateManagerPlugin.setString(aliases[0]);
    updateManagerPlugin.setInstantiatingGetter("getUpdateManagerInstance");

    driverDataSourcePlugin = addPlugin("jdbc.DriverDataSource", false);
    aliases = new String[] { "auto", "org.apache.openjpa.jdbc.schema.AutoDriverDataSource", "simple",
            "org.apache.openjpa.jdbc.schema.SimpleDriverDataSource", "dbcp",
            "org.apache.openjpa.jdbc.schema.DBCPDriverDataSource", };
    driverDataSourcePlugin.setAliases(aliases);
    driverDataSourcePlugin.setDefault(aliases[0]);
    driverDataSourcePlugin.setString(aliases[0]);

    schemaFactoryPlugin = addPlugin("jdbc.SchemaFactory", true);
    aliases = new String[] { "dynamic", "org.apache.openjpa.jdbc.schema.DynamicSchemaFactory", "native",
            "org.apache.openjpa.jdbc.schema.LazySchemaFactory", "file",
            "org.apache.openjpa.jdbc.schema.FileSchemaFactory", "table",
            "org.apache.openjpa.jdbc.schema.TableSchemaFactory",
            // deprecated alias
            "db", "org.apache.openjpa.jdbc.schema.TableSchemaFactory", };
    schemaFactoryPlugin.setAliases(aliases);
    schemaFactoryPlugin.setDefault(aliases[0]);
    schemaFactoryPlugin.setString(aliases[0]);
    schemaFactoryPlugin.setInstantiatingGetter("getSchemaFactoryInstance");

    sqlFactoryPlugin = addPlugin("jdbc.SQLFactory", true);
    aliases = new String[] { "default", "org.apache.openjpa.jdbc.sql.SQLFactoryImpl", };
    sqlFactoryPlugin.setAliases(aliases);
    sqlFactoryPlugin.setDefault(aliases[0]);
    sqlFactoryPlugin.setString(aliases[0]);
    sqlFactoryPlugin.setInstantiatingGetter("getSQLFactoryInstance");

    mappingFactoryPlugin = new MappingFactoryValue("jdbc.MappingFactory");
    addValue(mappingFactoryPlugin);

    mappingDefaultsPlugin = addPlugin("jdbc.MappingDefaults", true);
    aliases = new String[] { "default", "org.apache.openjpa.jdbc.meta.MappingDefaultsImpl", };
    mappingDefaultsPlugin.setAliases(aliases);
    mappingDefaultsPlugin.setDefault(aliases[0]);
    mappingDefaultsPlugin.setString(aliases[0]);
    mappingDefaultsPlugin.setInstantiatingGetter("getMappingDefaultsInstance");

    // set up broker factory defaults
    brokerFactoryPlugin.setAlias("jdbc", JDBCBrokerFactory.class.getName());
    brokerFactoryPlugin.setDefault("jdbc");
    brokerFactoryPlugin.setString("jdbc");

    // set new default for mapping repos
    metaRepositoryPlugin.setAlias("default", "org.apache.openjpa.jdbc.meta.MappingRepository");
    metaRepositoryPlugin.setDefault("default");
    metaRepositoryPlugin.setString("default");

    // set new default for lock manager
    lockManagerPlugin.setAlias("pessimistic", PessimisticLockManager.class.getName());
    lockManagerPlugin.setDefault("pessimistic");
    lockManagerPlugin.setString("pessimistic");

    // native savepoint manager options
    savepointManagerPlugin.setAlias("jdbc", "org.apache.openjpa.jdbc.kernel.JDBC3SavepointManager");

    // set new aliases and defaults for sequence
    seqPlugin.setAliases(JDBCSeqValue.ALIASES);
    seqPlugin.setDefault(JDBCSeqValue.ALIASES[0]);
    seqPlugin.setString(JDBCSeqValue.ALIASES[0]);

    // This plug-in is declared in superclass but defined here
    // because PreparedQueryCache is currently available for JDBC
    // backend only
    preparedQueryCachePlugin = addPlugin("jdbc.QuerySQLCache", true);
    aliases = new String[] { "true", "org.apache.openjpa.jdbc.kernel.PreparedQueryCacheImpl", "false", null };
    preparedQueryCachePlugin.setAliases(aliases);
    preparedQueryCachePlugin.setAliasListComprehensive(true);
    preparedQueryCachePlugin.setDefault(aliases[0]);
    preparedQueryCachePlugin.setClassName(aliases[1]);
    preparedQueryCachePlugin.setDynamic(true);
    preparedQueryCachePlugin.setInstantiatingGetter("getQuerySQLCacheInstance");

    finderCachePlugin = addPlugin("jdbc.FinderCache", true);
    aliases = new String[] { "true", "org.apache.openjpa.jdbc.kernel.FinderCacheImpl", "false", null };
    finderCachePlugin.setAliases(aliases);
    finderCachePlugin.setAliasListComprehensive(true);
    finderCachePlugin.setDefault(aliases[0]);
    finderCachePlugin.setClassName(aliases[1]);
    finderCachePlugin.setDynamic(true);
    finderCachePlugin.setInstantiatingGetter("getFinderCacheInstance");

    identifierUtilPlugin = addPlugin("jdbc.IdentifierUtil", true);
    aliases = new String[] { "default", "org.apache.openjpa.jdbc.identifier.DBIdentifierUtilImpl" };
    identifierUtilPlugin.setAliases(aliases);
    identifierUtilPlugin.setDefault(aliases[0]);
    identifierUtilPlugin.setString(aliases[0]);
    identifierUtilPlugin.setInstantiatingGetter("getIdentifierUtilInstance");

    // this static initializer is to get past a weird
    // ClassCircularityError that happens only under IBM's
    // JDK 1.3.1 on Linux from within the JRun ClassLoader;
    // while exact causes are unknown, it is almost certainly
    // a bug in JRun, and we can get around it by forcing
    // Instruction.class to be loaded and initialized
    // before TypedInstruction.class
    try {
        serp.bytecode.lowlevel.Entry.class.getName();
    } catch (Throwable t) {
    }
    try {
        serp.bytecode.Instruction.class.getName();
    } catch (Throwable t) {
    }

    supportedOptions().add(OPTION_QUERY_SQL);
    supportedOptions().add(OPTION_JDBC_CONNECTION);
    supportedOptions().remove(OPTION_VALUE_INCREMENT);
    supportedOptions().remove(OPTION_NULL_CONTAINER);

    if (derivations)
        ProductDerivations.beforeConfigurationLoad(this);
    if (loadGlobals)
        loadGlobals();
}

From source file:com.taobao.datax.plugins.writer.mysqlwriter.MysqlWriter.java

@Override
public int startWrite(LineReceiver receiver) {
    com.mysql.jdbc.Statement stmt = null;
    try {/*from  www.ja  va2s.  co m*/

        this.connection = DBSource.getConnection(this.sourceUniqKey);
        stmt = (com.mysql.jdbc.Statement) ((org.apache.commons.dbcp.DelegatingConnection) this.connection)
                .getInnermostDelegate()
                .createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);

        /* set max count */
        this.logger.info(String.format("Config max_error_count: set max_error_count=%d", MAX_ERROR_COUNT));
        stmt.executeUpdate(String.format("set max_error_count=%d;", MAX_ERROR_COUNT));

        /* set connect encoding */
        this.logger.info(String.format("Config encoding %s .", this.encoding));
        for (String sql : this.makeLoadEncoding(encoding))
            stmt.execute(sql);

        /* load data begin */
        String loadSql = this.makeLoadSql();
        this.logger.info(String.format("Load sql: %s.", visualSql(loadSql)));

        MysqlWriterInputStreamAdapter localInputStream = new MysqlWriterInputStreamAdapter(receiver, this);
        stmt.setLocalInfileInputStream(localInputStream);
        stmt.executeUpdate(visualSql(loadSql));
        this.lineCounter = localInputStream.getLineNumber();

        this.logger.info("DataX write to mysql ends .");

        return PluginStatus.SUCCESS.value();
    } catch (Exception e2) {
        if (null != this.connection) {
            try {
                this.connection.close();
            } catch (SQLException e) {
            }
        }
        throw new DataExchangeException(e2.getCause());
    } finally {
        if (null != stmt)
            try {
                stmt.close();
            } catch (SQLException e3) {
            }
    }
}

From source file:com.taobao.datax.plugins.writer.pgsqlwriter.PgsqlWriter.java

@Override
public int startWrite(LineReceiver receiver) {
    com.mysql.jdbc.Statement stmt = null;
    try {//from w w w  .  j a  va 2  s.c o  m

        this.connection = DBSource.getConnection(this.sourceUniqKey);
        stmt = (com.mysql.jdbc.Statement) ((org.apache.commons.dbcp.DelegatingConnection) this.connection)
                .getInnermostDelegate()
                .createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);

        /* set max count */
        this.logger.info(String.format("Config max_error_count: set max_error_count=%d", MAX_ERROR_COUNT));
        stmt.executeUpdate(String.format("set max_error_count=%d;", MAX_ERROR_COUNT));

        /* set connect encoding */
        this.logger.info(String.format("Config encoding %s .", this.encoding));
        for (String sql : this.makeLoadEncoding(encoding))
            stmt.execute(sql);

        /* load data begin */
        String loadSql = this.makeLoadSql();
        this.logger.info(String.format("Load sql: %s.", visualSql(loadSql)));

        PgsqlWriterInputStreamAdapter localInputStream = new PgsqlWriterInputStreamAdapter(receiver, this);
        stmt.setLocalInfileInputStream(localInputStream);
        stmt.executeUpdate(visualSql(loadSql));
        this.lineCounter = localInputStream.getLineNumber();

        this.logger.info("DataX write to mysql ends .");

        return PluginStatus.SUCCESS.value();
    } catch (Exception e2) {
        if (null != this.connection) {
            try {
                this.connection.close();
            } catch (SQLException e) {
            }
        }
        throw new DataExchangeException(e2.getCause());
    } finally {
        if (null != stmt)
            try {
                stmt.close();
            } catch (SQLException e3) {
            }
    }
}

From source file:com.taobao.datax.plugins.writer.sqlserverwriter.SqlserverWriter.java

@Override
public int startWrite(LineReceiver receiver) {
    com.mysql.jdbc.Statement stmt = null;
    try {//w  ww .  ja  va  2 s .  c om

        this.connection = DBSource.getConnection(this.sourceUniqKey);
        stmt = (com.mysql.jdbc.Statement) ((org.apache.commons.dbcp.DelegatingConnection) this.connection)
                .getInnermostDelegate()
                .createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);

        /* set max count */
        this.logger.info(String.format("Config max_error_count: set max_error_count=%d", MAX_ERROR_COUNT));
        stmt.executeUpdate(String.format("set max_error_count=%d;", MAX_ERROR_COUNT));

        /* set connect encoding */
        this.logger.info(String.format("Config encoding %s .", this.encoding));
        for (String sql : this.makeLoadEncoding(encoding))
            stmt.execute(sql);

        /* load data begin */
        String loadSql = this.makeLoadSql();
        this.logger.info(String.format("Load sql: %s.", visualSql(loadSql)));

        SqlserverWriterInputStreamAdapter localInputStream = new SqlserverWriterInputStreamAdapter(receiver,
                this);
        stmt.setLocalInfileInputStream(localInputStream);
        stmt.executeUpdate(visualSql(loadSql));
        this.lineCounter = localInputStream.getLineNumber();

        this.logger.info("DataX write to mysql ends .");

        return PluginStatus.SUCCESS.value();
    } catch (Exception e2) {
        if (null != this.connection) {
            try {
                this.connection.close();
            } catch (SQLException e) {
            }
        }
        throw new DataExchangeException(e2.getCause());
    } finally {
        if (null != stmt)
            try {
                stmt.close();
            } catch (SQLException e3) {
            }
    }
}

From source file:com.oracle.tutorial.jdbc.CoffeesTable.java

public void modifyPricesByPercentage(String coffeeName, float priceModifier, float maximumPrice)
        throws SQLException {
    con.setAutoCommit(false);//w  w w.j  a v a2 s  . c  o m

    Statement getPrice = null;
    Statement updatePrice = null;
    ResultSet rs = null;
    String query = "SELECT COF_NAME, PRICE FROM COFFEES " + "WHERE COF_NAME = '" + coffeeName + "'";

    try {
        Savepoint save1 = con.setSavepoint();
        getPrice = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
        updatePrice = con.createStatement();

        if (!getPrice.execute(query)) {
            System.out.println("Could not find entry for coffee named " + coffeeName);
        } else {
            rs = getPrice.getResultSet();
            rs.first();
            float oldPrice = rs.getFloat("PRICE");
            float newPrice = oldPrice + (oldPrice * priceModifier);
            System.out.println("Old price of " + coffeeName + " is " + oldPrice);
            System.out.println("New price of " + coffeeName + " is " + newPrice);
            System.out.println("Performing update...");
            updatePrice.executeUpdate(
                    "UPDATE COFFEES SET PRICE = " + newPrice + " WHERE COF_NAME = '" + coffeeName + "'");
            System.out.println("\nCOFFEES table after update:");
            CoffeesTable.viewTable(con);
            if (newPrice > maximumPrice) {
                System.out.println("\nThe new price, " + newPrice + ", is greater than the maximum " + "price, "
                        + maximumPrice + ". Rolling back the transaction...");
                con.rollback(save1);
                System.out.println("\nCOFFEES table after rollback:");
                CoffeesTable.viewTable(con);
            }
            con.commit();
        }
    } catch (SQLException e) {
        JDBCTutorialUtilities.printSQLException(e);
    } finally {
        if (getPrice != null) {
            getPrice.close();
        }
        if (updatePrice != null) {
            updatePrice.close();
        }
        con.setAutoCommit(true);
    }
}

From source file:org.globus.workspace.scheduler.defaults.DefaultSchedulerAdapterDB.java

/**
 * Database agnostic way to manage indexes
 *
 * What is stored in the row is the last ID that was USED already.
 *
 * This is O(1) now, not O(numNodes)//from ww  w  . ja  va2s .co  m
 *
 * @param numNodes number of IDs needed
 * @return next request IDs
 * @throws WorkspaceDatabaseException exc
 */
synchronized int[] getNextTasktIds(int numNodes) throws WorkspaceDatabaseException {

    PreparedStatement pstmt = null;
    PreparedStatement pstmt2 = null;
    ResultSet rs = null;
    Connection c = null;
    int lastTaskId = -1;
    int newLastTaskId = -1;
    try {
        c = getConnection();

        pstmt = c.prepareStatement(DefaultSchedulerConstants.SQL_SELECT_DEFAULT_SCHED_REQ_ID,
                ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
        rs = pstmt.executeQuery();

        if (!rs.next()) {
            // if there is no row in database, this is first time an ID
            // is needed, insert the value do not update it
            lastTaskId = 0;
            newLastTaskId = numNodes;
            pstmt2 = c.prepareStatement(DefaultSchedulerConstants.SQL_INSERT_DEFAULT_SCHED_REQ_ID);
            pstmt2.setInt(1, newLastTaskId);
            pstmt2.executeUpdate();
        } else {
            lastTaskId = rs.getInt(1);

            // Get the req Id and increment it
            newLastTaskId = lastTaskId + numNodes;

            pstmt2 = c.prepareStatement(DefaultSchedulerConstants.SQL_UPDATE_DEFAULT_SCHED_REQ_ID);
            pstmt2.setInt(1, newLastTaskId);
            pstmt2.executeUpdate();
        }
    } catch (SQLException e) {
        throw new WorkspaceDatabaseException(e);
    } finally {
        try {
            if (pstmt != null) {
                pstmt.close();
            }
            if (pstmt2 != null) {
                pstmt2.close();
            }
            if (rs != null) {
                rs.close();
            }
            if (c != null) {
                returnConnection(c);
            }
        } catch (SQLException e) {
            logger.error("SQLException in finally cleanup", e);
        }
    }

    if (lastTaskId < 0) {
        throw new WorkspaceDatabaseException("lastTaskId not expected " + "to be negative here");
    }
    if (newLastTaskId < 0) {
        throw new WorkspaceDatabaseException("newLastTaskId not expected" + " to be negative here");
    }

    if (newLastTaskId - lastTaskId != numNodes) {
        throw new WorkspaceDatabaseException("difference expected to be " + "equal to numNodes here");
    }

    final int[] ret = new int[numNodes];
    for (int i = 0; i < numNodes; i++) {
        lastTaskId += 1;
        ret[i] = lastTaskId;
    }
    return ret;
}