Example usage for java.sql ResultSet CONCUR_READ_ONLY

List of usage examples for java.sql ResultSet CONCUR_READ_ONLY

Introduction

In this page you can find the example usage for java.sql ResultSet CONCUR_READ_ONLY.

Prototype

int CONCUR_READ_ONLY

To view the source code for java.sql ResultSet CONCUR_READ_ONLY.

Click Source Link

Document

The constant indicating the concurrency mode for a ResultSet object that may NOT be updated.

Usage

From source file:de.tudarmstadt.ukp.dkpro.core.io.jdbc.JdbcReader.java

private void query() throws ResourceInitializationException {
    try {//from w  w  w .  j  a v a2s.c  om
        Statement statement = sqlConnection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
                ResultSet.CONCUR_READ_ONLY);
        resultSet = statement.executeQuery(query);
        resultSet.last();
        resultSetSize = resultSet.getRow();
        resultSet.beforeFirst();
        completed = 0;

        // Store available column names
        columnNames = new HashSet<String>();
        ResultSetMetaData meta = resultSet.getMetaData();
        for (int i = 1; i < meta.getColumnCount() + 1; i++) {
            String columnName = meta.getColumnLabel(i);
            columnNames.add(columnName);
            if (!CAS_COLUMNS.contains(columnName)) {
                getLogger().warn("Unknown column [" + columnName + "].");
            }
        }
    } catch (SQLException e) {
        throw new ResourceInitializationException(e);
    }
}

From source file:com.adaptris.jdbc.connection.FailoverDatasourceTest.java

@Test
public void testStatements() throws Exception {
    Connection conn = new MyProxy();
    try {//from w w w .  ja v  a  2s.  c  o m
        try {
            conn.nativeSQL("SELECT * FROM SEQUENCES");
        } catch (Exception e) {

        }
        try {
            conn.createStatement();
        } catch (Exception e) {

        }
        try {
            conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
        } catch (Exception e) {

        }
        try {
            conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
        } catch (Exception e) {

        }
        try {
            conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE,
                    ResultSet.CLOSE_CURSORS_AT_COMMIT);
        } catch (Exception e) {

        }
        try {
            conn.prepareStatement("SELECT * FROM SEQUENCES");
        } catch (Exception e) {

        }
        try {
            conn.prepareStatement("SELECT * FROM SEQUENCES", ResultSet.TYPE_FORWARD_ONLY,
                    ResultSet.CONCUR_READ_ONLY);
        } catch (Exception e) {

        }
        try {
            conn.prepareStatement("SELECT * FROM SEQUENCES", ResultSet.TYPE_FORWARD_ONLY,
                    ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT);
        } catch (Exception e) {

        }
        try {
            conn.prepareStatement("INSERT INTO sequences (id, seq_number) values ('id', 2)",
                    Statement.NO_GENERATED_KEYS);
        } catch (Exception e) {
        }
        try {
            conn.prepareStatement("INSERT INTO sequences (id, seq_number) values ('id', 2)", new int[0]);
        } catch (Exception e) {
        }
        try {
            conn.prepareStatement("INSERT INTO sequences (id, seq_number) values ('id', 2)", new String[0]);
        } catch (Exception e) {

        }

        try {
            conn.prepareCall("SELECT * FROM SEQUENCES");
        } catch (Exception e) {

        }
        try {
            conn.prepareCall("SELECT * FROM SEQUENCES", ResultSet.TYPE_FORWARD_ONLY,
                    ResultSet.CONCUR_READ_ONLY);
        } catch (Exception e) {

        }
        try {
            conn.prepareCall("SELECT * FROM SEQUENCES", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY,
                    ResultSet.CLOSE_CURSORS_AT_COMMIT);
        } catch (Exception e) {

        }
    } finally {
        JdbcUtil.closeQuietly(conn);

    }
}

From source file:org.ala.lucene.ExternalIndexLoader.java

/**
 * Loads collections and institutions into the BIE search index.
 * //from   w  ww.  j a va 2s .c o m
 * @throws Exception
 */
public void loadCollections() throws Exception {

    logger.info("Starting syncing collection information....");
    Connection conn = collectoryDataSource.getConnection();
    Statement stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
    ResultSet rs = stmt.executeQuery(
            "select uid, guid, name, acronym, collection_type, pub_description, sub_collections, keywords from collection");

    SolrServer solrServer = solrUtils.getSolrServer();
    solrServer.deleteByQuery("idxtype:" + IndexedTypes.COLLECTION); // delete collections!

    while (rs.next()) {
        String uid = rs.getString("uid");
        String externalGuid = rs.getString("guid");
        String name = rs.getString("name");
        String acronym = rs.getString("acronym");
        String description = rs.getString("pub_description");
        String subCollections = rs.getString("sub_collections");
        String keywords = rs.getString("keywords");
        String collectionType = rs.getString("collection_type");

        SolrInputDocument doc = new SolrInputDocument();
        doc.addField("acronym", acronym, 1.2f);
        doc.addField("name", name, 1.2f);
        doc.addField("guid", baseUrlForCollectory + uid);

        doc.addField("otherGuid", uid); // the internal UID e.g. co1
        if (externalGuid != null) {
            doc.addField("otherGuid", externalGuid); // the external GUID e.g. url:lsid:bci:123
        }

        //add as text
        doc.addField("text", description);
        doc.addField("text", subCollections);
        doc.addField("text", keywords);
        doc.addField("text", collectionType);

        doc.addField("url", baseUrlForCollectory + uid);
        doc.addField("id", baseUrlForCollectory + uid);
        doc.addField("idxtype", IndexedTypes.COLLECTION);
        //         doc.addField("aus_s", "yes");
        doc.addField("australian_s", "recorded"); // so they appear in default QF search

        solrServer.add(doc);
    }

    solrServer.commit();
    rs.close();
    stmt.close();
    conn.close();
    logger.info("Finished syncing collection information with the collectory.");
}

From source file:com.splicemachine.derby.impl.sql.execute.operations.CallStatementOperationIT.java

@Test
@Category(SlowTest.class)
public void testCallSQLTABLESInAppSchema() throws Exception {
    CallableStatement cs = methodWatcher.prepareCall("call SYSIBM.SQLTABLES(null,'"
            + CallStatementOperationIT.class.getSimpleName().toUpperCase() + "',null,null,null)",
            ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
    ResultSet rs = cs.executeQuery();
    int count = 0;
    while (rs.next()) {
        Object data = rs.getObject(2);
        count++;/*from   w  ww  . j  a v a2  s .c  om*/
    }
    Assert.assertTrue("Incorrect rows returned!", count > 0);
    DbUtils.closeQuietly(rs);
}

From source file:com.tfm.utad.sqoopdata.SqoopVerticaDB.java

private static void findBetweenMinIDAndMaxID(Connection conn, Long minID, Long maxID) {
    Statement stmt = null;/*  w  w  w  .j a  va2s .c o m*/
    String query;
    try {
        stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
        query = "SELECT * FROM s1.coordinates WHERE id > " + minID + " AND id <= " + maxID + "";
        LOG.info("Query execution: " + query);
        ResultSet rs = stmt.executeQuery(query);
        int batch = 0;
        List<CoordinateCartoDB> result = new ArrayList<>();
        long start_time = System.currentTimeMillis();
        while (rs.next()) {
            batch++;
            CoordinateCartoDB cdb = new CoordinateCartoDB((long) rs.getInt("id"), rs.getString("userstr"),
                    rs.getString("created_date"), rs.getString("activity"), rs.getFloat("latitude"),
                    rs.getFloat("longitude"), (long) rs.getInt("userid"));
            result.add(cdb);
            if (batch == 50) {
                sendDataToCartoDB(result);
                batch = 0;
                result = new ArrayList<>();
            }
        }
        if (batch > 0) {
            sendDataToCartoDB(result);
        }
        long end_time = System.currentTimeMillis();
        long difference = end_time - start_time;
        LOG.info("CartoDB API execution time: " + String.format("%d min %d sec",
                TimeUnit.MILLISECONDS.toMinutes(difference), TimeUnit.MILLISECONDS.toSeconds(difference)
                        - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(difference))));
    } catch (SQLException e) {
        LOG.error("SQLException error: " + e.toString());
    } finally {
        if (stmt != null) {
            try {
                stmt.close();
            } catch (SQLException ex) {
                LOG.error("Statement error: " + ex.toString());
            }
        }
        if (conn != null) {
            try {
                conn.close();
            } catch (SQLException ex) {
                LOG.error("Connection error: " + ex.toString());
            }
        }
    }
}

From source file:edu.ku.brc.af.core.db.MySQLBackupService.java

@Override
public Vector<String> getTableNames() {
    Vector<String> tablesNames = new Vector<String>();

    Connection dbConnection = null;
    Statement dbStatement = null;
    try {//  w  w  w.j  a va 2  s.  c  o m
        dbConnection = DBConnection.getInstance().createConnection();
        if (dbConnection != null) {
            dbStatement = dbConnection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE,
                    ResultSet.CONCUR_READ_ONLY);
            ResultSet resultSet = dbStatement.executeQuery("show tables");

            ResultSetMetaData metaData = resultSet.getMetaData();
            while (resultSet.next()) {
                for (int i = 0; i < metaData.getColumnCount(); i++) {
                    String name = resultSet.getString(i + 1);
                    tablesNames.add(name);
                }
            }
            resultSet.close();
            return tablesNames;
        }
    } catch (SQLException ex) {
        ex.printStackTrace();
    } finally {
        try {
            if (dbStatement != null) {
                dbStatement.close();
            }
            if (dbConnection != null) {
                dbConnection.close();
            }
        } catch (SQLException ex) {
            ex.printStackTrace();
        }
    }

    return null;
}

From source file:com.tera.common.database.query.CQueryService.java

@Override
public PreparedStatement prepareStatement(String query) {
    return prepareStatement(query, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
}

From source file:edu.ku.brc.specify.conversion.IdTableMapper.java

/**
 * Map all the old IDs to new IDs// w w w.j a v  a  2  s  .  c  o m
 * @param sqlArg the string to use to fill the map
 */
public void mapAllIds(final String sqlArg) {
    log.debug("mapAllIds with sql: " + sqlArg);
    this.sql = sqlArg;

    int mappingCount = getMapCount(mapTableName);
    wasEmpty = mappingCount == 0;

    if (doDelete || mappingCount == 0) {
        BasicSQLUtils.deleteAllRecordsFromTable(oldConn, mapTableName, BasicSQLUtils.myDestinationServerType);
        if (frame != null) {
            String dMsg = "Mapping " + mapTableName;
            frame.setDesc(dMsg);
            log.debug(dMsg);
        }

        try {
            log.debug("Executing: " + sql);
            PreparedStatement pStmt = oldConn.prepareStatement("INSERT INTO " + mapTableName + " VALUES (?,?)");
            Statement stmtOld = oldConn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
                    ResultSet.CONCUR_READ_ONLY);
            ResultSet rs = stmtOld.executeQuery(sql);

            if (rs.last()) {
                if (frame != null) {
                    frame.setProcess(0, rs.getRow());
                }
            }

            if (rs.first()) {
                int newIndex = initialIndex;
                do {
                    int oldIndex = rs.getInt(1);
                    //log.debug("map "+mapTableName+" old[" + oldIndex + "] new [" + newIndex +"]");

                    if (indexIncremeter != null) {
                        newIndex = indexIncremeter.getNextIndex();
                    }

                    pStmt.setInt(1, oldIndex); // Old Index
                    pStmt.setInt(2, newIndex); // New Index
                    if (pStmt.executeUpdate() != 1) {
                        String msg = String.format("Error writing to Map table[%s] old: %d  new: %d",
                                mapTableName, oldIndex, newIndex);
                        log.error(msg);
                        throw new RuntimeException(msg);
                    }

                    newIndex++; // incrementing doesn't matter when there is an indexIncremeter

                    if (frame != null) {
                        if (newIndex % 1000 == 0) {
                            frame.setProcess(newIndex);
                        }

                    } else {
                        if (newIndex % 2000 == 0) {
                            log.debug("Mapped " + newIndex + " records from " + tableName);
                        }
                    }

                } while (rs.next());
                log.info("Mapped " + newIndex + " records from " + tableName);

            } else {
                log.info("No records to map in " + tableName);
            }
            rs.close();
            stmtOld.close();
            pStmt.close();

        } catch (SQLException ex) {
            ex.printStackTrace();
            edu.ku.brc.af.core.UsageTracker.incrSQLUsageCount();
            edu.ku.brc.exceptions.ExceptionTracker.getInstance().capture(IdTableMapper.class, ex);
            log.error(ex);
            throw new RuntimeException(ex);
        }
    } else {
        log.debug("Skipping the build of mapper: " + mapTableName);
    }

    if (frame != null) {
        frame.setProcess(0, 0);
    }
}

From source file:com.splicemachine.derby.impl.sql.catalog.SqlStatisticsIT.java

private int getResultSetCountFromShowIndexes(String schemaName, String tableName) throws Exception {
    if (schemaName == null) {
        schemaName = "null";
    } else {//from   w  ww. ja v a 2  s . co  m
        schemaName = "'" + schemaName + "'";
    }
    if (tableName == null) {
        tableName = "null";
    } else {
        tableName = "'" + tableName + "'";
    }
    CallableStatement cs = methodWatcher.prepareCall(
            format("call SYSIBM.SQLSTATISTICS(null, %s, %s, 1, 1, null)", schemaName, tableName),
            ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
    ResultSet rs = cs.executeQuery();
    int count = 0;
    LOG.trace(format("Show Indexes Args: schema = %s, table = %s", schemaName, tableName));
    while (rs.next()) {
        String schema = rs.getString("TABLE_SCHEM");
        String table = rs.getString("TABLE_NAME");
        String index = rs.getString("INDEX_NAME");
        String column = rs.getString("COLUMN_NAME");
        int position = rs.getInt("ORDINAL_POSITION");
        LOG.trace(
                format("Show Indexes Results: schema = %s, table = %s, index = %s, column = %s, position = %s",
                        schema, table, index, column, position));
        count++;
    }
    LOG.trace(format("Show Indexes Results: count = %s", count));
    DbUtils.closeQuietly(rs);
    return count;
}

From source file:com.oracle.tutorial.jdbc.CoffeesTable.java

public void modifyPricesByPercentage(String coffeeName, float priceModifier, float maximumPrice)
        throws SQLException {
    con.setAutoCommit(false);//from ww  w. j  ava 2s.c  o m

    Statement getPrice = null;
    Statement updatePrice = null;
    ResultSet rs = null;
    String query = "SELECT COF_NAME, PRICE FROM COFFEES " + "WHERE COF_NAME = '" + coffeeName + "'";

    try {
        Savepoint save1 = con.setSavepoint();
        getPrice = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
        updatePrice = con.createStatement();

        if (!getPrice.execute(query)) {
            System.out.println("Could not find entry for coffee named " + coffeeName);
        } else {
            rs = getPrice.getResultSet();
            rs.first();
            float oldPrice = rs.getFloat("PRICE");
            float newPrice = oldPrice + (oldPrice * priceModifier);
            System.out.println("Old price of " + coffeeName + " is " + oldPrice);
            System.out.println("New price of " + coffeeName + " is " + newPrice);
            System.out.println("Performing update...");
            updatePrice.executeUpdate(
                    "UPDATE COFFEES SET PRICE = " + newPrice + " WHERE COF_NAME = '" + coffeeName + "'");
            System.out.println("\nCOFFEES table after update:");
            CoffeesTable.viewTable(con);
            if (newPrice > maximumPrice) {
                System.out.println("\nThe new price, " + newPrice + ", is greater than the maximum " + "price, "
                        + maximumPrice + ". Rolling back the transaction...");
                con.rollback(save1);
                System.out.println("\nCOFFEES table after rollback:");
                CoffeesTable.viewTable(con);
            }
            con.commit();
        }
    } catch (SQLException e) {
        JDBCTutorialUtilities.printSQLException(e);
    } finally {
        if (getPrice != null) {
            getPrice.close();
        }
        if (updatePrice != null) {
            updatePrice.close();
        }
        con.setAutoCommit(true);
    }
}