Example usage for java.sql ResultSet isClosed

List of usage examples for java.sql ResultSet isClosed

Introduction

In this page you can find the example usage for java.sql ResultSet isClosed.

Prototype

boolean isClosed() throws SQLException;

Source Link

Document

Retrieves whether this ResultSet object has been closed.

Usage

From source file:org.apache.marmotta.kiwi.persistence.KiWiConnection.java

/**
 * Construct a batch of KiWiTriples from the result of an SQL query. This query differs from constructTripleFromDatabase
 * in that it does a batch-prefetching for optimized performance
 *
 * @param row a database result containing the columns described above
 * @return a KiWiTriple representation of the database result
 *///from  w  w w. j a  v  a  2 s .  c  o  m
protected List<KiWiTriple> constructTriplesFromDatabase(ResultSet row, int maxPrefetch) throws SQLException {
    int count = 0;

    // declare variables to optimize stack allocation
    KiWiTriple triple;
    long id;

    List<KiWiTriple> result = new ArrayList<>();
    Map<Long, Long[]> tripleIds = new HashMap<>();
    Set<Long> nodeIds = new HashSet<>();
    while (count < maxPrefetch && row.next()) {
        count++;

        if (row.isClosed()) {
            throw new ResultInterruptedException("retrieving results has been interrupted");
        }

        // columns: id,subject,predicate,object,context,deleted,inferred,creator,createdAt,deletedAt
        //          1 ,2      ,3        ,4     ,5      ,6      ,7       ,8      ,9        ,10

        id = row.getLong(1);

        triple = tripleCache.get(id);

        // lookup element in cache first, so we can avoid reconstructing it if it is already there
        if (triple != null) {
            result.add(triple);
        } else {

            triple = new KiWiTriple();
            triple.setId(id);

            // collect node ids for batch retrieval
            nodeIds.add(row.getLong(2));
            nodeIds.add(row.getLong(3));
            nodeIds.add(row.getLong(4));

            if (row.getLong(5) != 0) {
                nodeIds.add(row.getLong(5));
            }

            if (row.getLong(8) != 0) {
                nodeIds.add(row.getLong(8));
            }

            // remember which node ids where relevant for the triple
            tripleIds.put(id, new Long[] { row.getLong(2), row.getLong(3), row.getLong(4), row.getLong(5),
                    row.getLong(8) });

            triple.setDeleted(row.getBoolean(6));
            triple.setInferred(row.getBoolean(7));
            triple.setCreated(new Date(row.getTimestamp(9).getTime()));
            try {
                if (row.getDate(10) != null) {
                    triple.setDeletedAt(new Date(row.getTimestamp(10).getTime()));
                }
            } catch (SQLException ex) {
                // work around a MySQL problem with null dates
                // (see http://stackoverflow.com/questions/782823/handling-datetime-values-0000-00-00-000000-in-jdbc)
            }

            result.add(triple);
        }
    }

    KiWiNode[] nodes = loadNodesByIds(Longs.toArray(nodeIds));
    Map<Long, KiWiNode> nodeMap = new HashMap<>();
    for (int i = 0; i < nodes.length; i++) {
        nodeMap.put(nodes[i].getId(), nodes[i]);
    }

    for (KiWiTriple t : result) {
        if (tripleIds.containsKey(t.getId())) {
            // need to set subject, predicate, object, context and creator
            Long[] ids = tripleIds.get(t.getId());
            t.setSubject((KiWiResource) nodeMap.get(ids[0]));
            t.setPredicate((KiWiUriResource) nodeMap.get(ids[1]));
            t.setObject(nodeMap.get(ids[2]));

            if (ids[3] != 0) {
                t.setContext((KiWiResource) nodeMap.get(ids[3]));
            }

            if (ids[4] != 0) {
                t.setCreator((KiWiResource) nodeMap.get(ids[4]));
            }

        }

        cacheTriple(t);
    }

    return result;
}

From source file:com.smict.person.data.DoctorData.java

public List<DoctorModel> getBranchMgr(int docId) {

    String SQL = "SELECT    branch_mgr_rel_doctor.price,branch.branch_name,doctor.first_name_th, "
            + "doctor.last_name_th,pre_name.pre_name_th, branch_mgr_rel_doctor.branch_id " + "FROM   branch "
            + "INNER JOIN branch_mgr_rel_doctor ON branch.branch_id = branch_mgr_rel_doctor.branch_id "
            + "INNER JOIN doctor ON doctor.doctor_id = branch_mgr_rel_doctor.doctor_id "
            + "INNER JOIN pre_name ON doctor.pre_name_id = pre_name.pre_name_id "
            + "WHERE branch_mgr_rel_doctor.doctor_id = " + docId;
    try {// ww  w.  j  ava2  s  . c  o  m
        conn = agent.getConnectMYSql();
        Stmt = conn.createStatement();
        ResultSet rs = Stmt.executeQuery(SQL);

        List<DoctorModel> doctorList = new ArrayList<DoctorModel>();
        while (rs.next()) {
            DoctorModel docModel = new DoctorModel();
            docModel.setPrice(rs.getInt("price"));
            docModel.setBranchStandID(rs.getString("branch_id"));
            docModel.setBranchName(rs.getString("branch_name"));
            docModel.setFirst_name_th(rs.getString("first_name_th"));
            docModel.setLast_name_th(rs.getString("last_name_th"));
            docModel.setPre_name_th(rs.getString("pre_name_th"));
            doctorList.add(docModel);
        }
        if (!rs.isClosed())
            rs.close();
        if (!Stmt.isClosed())
            Stmt.close();
        if (!conn.isClosed())
            conn.close();
        return doctorList;
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    return null;
}

From source file:org.dspace.storage.rdbms.DatabaseUtils.java

public static boolean sequenceExists(Connection connection, String sequenceName) {
    boolean exists = false;
    PreparedStatement statement = null;
    ResultSet results = null;
    // Whether or not to filter query based on schema (this is DB Type specific)
    boolean schemaFilter = false;

    try {/*w  w w. j  a v a 2  s .  c  om*/
        // Get the name of the Schema that the DSpace Database is using
        // (That way we can search the right schema)
        String schema = getSchemaName(connection);

        // Canonicalize everything to the proper case based on DB type
        schema = canonicalize(connection, schema);
        sequenceName = canonicalize(connection, sequenceName);

        // Different database types store sequence information in different tables
        String dbtype = getDbType(connection);
        String sequenceSQL = null;
        switch (dbtype) {
        case DBMS_POSTGRES:
            // Default schema in PostgreSQL is "public"
            if (schema == null) {
                schema = "public";
            }
            // PostgreSQL specific query for a sequence in a particular schema
            sequenceSQL = "SELECT COUNT(1) FROM pg_class, pg_namespace "
                    + "WHERE pg_class.relnamespace=pg_namespace.oid " + "AND pg_class.relkind='S' "
                    + "AND pg_class.relname=? " + "AND pg_namespace.nspname=?";
            // We need to filter by schema in PostgreSQL
            schemaFilter = true;
            break;
        case DBMS_ORACLE:
            // Oracle specific query for a sequence owned by our current DSpace user
            // NOTE: No need to filter by schema for Oracle, as Schema = User
            sequenceSQL = "SELECT COUNT(1) FROM user_sequences WHERE sequence_name=?";
            break;
        case DBMS_H2:
            // In H2, sequences are listed in the "information_schema.sequences" table
            // SEE: http://www.h2database.com/html/grammar.html#information_schema
            sequenceSQL = "SELECT COUNT(1) " + "FROM INFORMATION_SCHEMA.SEQUENCES " + "WHERE SEQUENCE_NAME = ?";
            break;
        default:
            throw new SQLException("DBMS " + dbtype + " is unsupported.");
        }

        // If we have a SQL query to run for the sequence, then run it
        if (sequenceSQL != null) {
            // Run the query, passing it our parameters
            statement = connection.prepareStatement(sequenceSQL);
            statement.setString(1, sequenceName);
            if (schemaFilter) {
                statement.setString(2, schema);
            }
            results = statement.executeQuery();

            // If results are non-zero, then this sequence exists!
            if (results != null && results.next() && results.getInt(1) > 0) {
                exists = true;
            }
        }
    } catch (SQLException e) {
        log.error("Error attempting to determine if sequence " + sequenceName + " exists", e);
    } finally {
        try {
            // Ensure statement gets closed
            if (statement != null && !statement.isClosed())
                statement.close();
            // Ensure ResultSet gets closed
            if (results != null && !results.isClosed())
                results.close();
        } catch (SQLException e) {
            // ignore it
        }
    }

    return exists;
}

From source file:com.smict.person.data.DoctorData.java

public List<DoctorModel> getBranchStandard(int docId) {

    String SQL = "SELECT    branch_standard_rel_doctor.price,branch.branch_name,doctor.first_name_th, "
            + "doctor.last_name_th,pre_name.pre_name_th, branch_standard_rel_doctor.branch_id,branch_standard_rel_doctor.income_type, "
            + "branch_standard_rel_doctor.startdate_time,branch_standard_rel_doctor.finish_datetime, "
            + "branch_standard_rel_doctor.work_hour,branch_standard_rel_doctor.late_min,branch_standard_rel_doctor.early_min "
            + "FROM   branch "
            + "INNER JOIN branch_standard_rel_doctor ON branch.branch_id = branch_standard_rel_doctor.branch_id "
            + "INNER JOIN doctor ON doctor.doctor_id = branch_standard_rel_doctor.doctor_id "
            + "INNER JOIN pre_name ON doctor.pre_name_id = pre_name.pre_name_id "
            + "WHERE branch_standard_rel_doctor.doctor_id = " + docId;
    try {//from w ww . j ava 2  s. c  o  m
        conn = agent.getConnectMYSql();
        Stmt = conn.createStatement();
        ResultSet rs = Stmt.executeQuery(SQL);

        List<DoctorModel> doctorList = new ArrayList<DoctorModel>();
        while (rs.next()) {
            DoctorModel docModel = new DoctorModel();
            docModel.setPrice(rs.getInt("price"));
            docModel.setBranchStandID(rs.getString("branch_id"));
            docModel.setBranchName(rs.getString("branch_name"));
            docModel.setFirst_name_th(rs.getString("first_name_th"));
            docModel.setLast_name_th(rs.getString("last_name_th"));
            docModel.setPre_name_th(rs.getString("pre_name_th"));
            docModel.setIncome_type(rs.getInt("income_type"));
            docModel.setWork_hour(rs.getInt("work_hour"));
            docModel.setLate_min(rs.getInt("late_min"));
            docModel.setEarly_min(rs.getInt("early_min"));
            docModel.setStart_datetime(rs.getString("startdate_time"));
            docModel.setFinish_datetime(rs.getString("finish_datetime"));
            doctorList.add(docModel);
        }
        if (!rs.isClosed())
            rs.close();
        if (!Stmt.isClosed())
            Stmt.close();
        if (!conn.isClosed())
            conn.close();
        return doctorList;
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    return null;
}

From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java

/**
 * Gets the list of logged messages for the paxosID. The static method
 * PaxosLogger.rollForward(.) can be directly invoked to replay these
 * messages without explicitly invoking this method.
 * /*w  w w.  j  a v  a2 s.  com*/
 * @param paxosID
 * @param fieldConstraints
 * @return A list of logged messages for {@code paxosID} meeting
 *         {@code fieldConstraints}.
 */
private synchronized ArrayList<PaxosPacket> getLoggedMessages(String paxosID, String fieldConstraints) {
    long t = System.currentTimeMillis();
    if (ENABLE_JOURNALING && LOG_INDEX_FREQUENCY > 0)
        this.syncLogMessagesIndex(paxosID);

    ArrayList<PaxosPacket> messages = new ArrayList<PaxosPacket>();
    if (DISABLE_GET_LOGGED_MESSAGES)
        return messages;

    PreparedStatement pstmt = null;
    ResultSet messagesRS = null;
    Connection conn = null;
    try {
        conn = this.getDefaultConn();
        pstmt = this.getPreparedStatement(conn, getMTable(), paxosID,
                "packet_type, message" + (ENABLE_JOURNALING ? ", logfile, foffset, length" : ""),
                fieldConstraints);
        messagesRS = pstmt.executeQuery();

        assert (!messagesRS.isClosed());
        while (messagesRS.next()) {
            assert (!ENABLE_JOURNALING || messagesRS.getString("logfile") != null);

            String logMsgStr = null;
            byte[] logMsgBytes = null;
            try {
                logMsgBytes = (!ENABLE_JOURNALING
                        ? (!getLogMessageBlobOption() ? messagesRS.getString("message").getBytes(CHARSET)
                                : lobToBytes(messagesRS.getBlob("message")))

                        : this.getJournaledMessage(messagesRS.getString("logfile"),
                                messagesRS.getLong("foffset"), messagesRS.getInt("length"), null));
                logMsgStr = new String(logMsgBytes, CHARSET);
            } catch (SQLException | IOException e) {
                /* It is possible that a journal file gets garbage collected
                 * while getJournaledMessage is trying to get logged
                 * messages from it, so IOExceptions here are not fatal. */
                log.severe(this + ":" + e.getClass().getSimpleName() + " while getting logged messages for "
                        + paxosID + ":" + messagesRS.getString("packet_type") + ":"
                        + messagesRS.getString("logfile") + ":" + messagesRS.getLong("foffset") + ":"
                        + messagesRS.getInt("length"));
                e.printStackTrace();
            }
            if (logMsgBytes == null)
                continue;

            PaxosPacket packet = this.getPacketizer() != null ? getPacketizer().stringToPaxosPacket(logMsgBytes)
                    : PaxosPacket.getPaxosPacket(logMsgStr);
            if (packet == null) {
                log.severe(this + " retrieved null packet from logMsgStr");
                continue;
            }
            assert (packet == null || !(packet instanceof AcceptPacket)
                    || ((AcceptPacket) packet).hasRequestValue()) : packet;
            // sanity check for DB-journal consistency
            assert (packet == null || packet.getType().getInt() == messagesRS.getInt("packet_type"));
            messages.add(packet);
        }
    } catch (SQLException | JSONException e) {
        log.severe(e.getClass().getSimpleName() + " while getting slot for " + paxosID);
        e.printStackTrace();
    } finally {
        cleanup(pstmt, messagesRS);
        cleanup(conn);
    }
    if (ENABLE_INSTRUMENTATION && Util.oneIn(Integer.MAX_VALUE))
        DelayProfiler.updateDelay("getLoggedMessages", t);
    return messages;
}

From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java

private synchronized ArrayList<String> getIndexedLogfiles(String table) {

    PreparedStatement pstmt = null;
    ResultSet messagesRS = null;
    Connection conn = null;// w ww .  j ava  2s .c o  m
    ArrayList<String> logfiles = new ArrayList<String>();
    String cmd = "select distinct " + (table.equals(getMTable()) ? "logfile" : "min_logfile") + " from "
            + table;
    try {
        // long t = System.currentTimeMillis();
        conn = this.getDefaultConn();
        pstmt = conn.prepareStatement(cmd);
        messagesRS = pstmt.executeQuery();

        assert (!messagesRS.isClosed());
        while (messagesRS.next())
            logfiles.add(messagesRS.getString(1));
        assert (!logfiles.isEmpty()) : this + " found no minLogfile with query \"" + cmd + "\"";

        // DelayProfiler.updateDelay("get_indexed_logfiles", t);
    } catch (SQLException e) {
        log.severe(e.getClass().getSimpleName() + " while getting logfile names");
        e.printStackTrace();
    } finally {
        cleanup(pstmt, messagesRS);
        cleanup(conn);
    }
    return logfiles;
}

From source file:org.zaproxy.zap.extension.callgraph.CallGraphFrame.java

/**
 * sets up the graph by retrieving the nodes and edges from the history table in the database
 *
 * @param urlPattern// w w  w. j a  va 2  s  .  c om
 * @throws SQLException
 */
private void setupGraph(Pattern urlPattern) throws SQLException {
    Connection conn = null;
    Statement st = null;
    ResultSet rs = null;
    Map<String, String> schemaAuthorityToColor = new HashMap<String, String>();
    // use some web safe colours. Currently, there are 24 colours.
    String[] colors = { "#FFFF00", "#FFCC00", "#FF9900", "#FF6600", "#FF3300", "#CCFF00", "#CCCC00", "#CC9900",
            "#CC6600", "#99FF00", "#999900", "#996600", "#CCFFCC", "#CCCCCC", "#99CCCC", "#9999CC", "#9966CC",
            "#66FFCC", "#6699CC", "#6666CC", "#33FFCC", "#33CCCC", "#3399CC", "#00FFCC" };
    int colorsUsed = 0;
    try {
        // Create a pattern for the specified

        // get a new connection to the database to query it, since the existing database classes
        // do not cater for
        // ad-hoc queries on the table
        /*
         * TODO Add-ons should NOT make their own connections to the db any more - the db layer is plugable
         * so could be implemented in a completely different way
         * TODO: how? There is currently no API to do this.
         */
        // Note: the db is a singleton instance, so do *not* close it!!
        Database db = Model.getSingleton().getDb();
        if (!(db instanceof ParosDatabase)) {
            throw new InvalidParameterException(db.getClass().getCanonicalName());
        }

        conn = ((ParosDatabaseServer) db.getDatabaseServer()).getNewConnection();

        // we begin adding stuff to the graph, so begin a "transaction" on it.
        // we will close this after we add all the vertexes and edges to the graph
        graph.getModel().beginUpdate();

        // prepare to add the vertices to the graph
        // this must include all URLs references as vertices, even if those URLs did not feature
        // in the history table in their own right

        // include entries of type 1 (proxied), 2 (spidered), 10 (Ajax spidered) from the
        // history
        st = conn.createStatement();
        rs = st.executeQuery(
                "select distinct URI from HISTORY where histtype in (1,2,10) union distinct select distinct  RIGHT(REGEXP_SUBSTRING (REQHEADER, 'Referer:.+') , LENGTH(REGEXP_SUBSTRING (REQHEADER, 'Referer:.+'))-LENGTH('Referer: ')) from HISTORY where REQHEADER like '%Referer%' and histtype in (1,2,10) order by 1");
        for (; rs.next();) {
            String url = rs.getString(1);

            // remove urls that do not match the pattern specified (all sites / one site)
            Matcher urlmatcher = urlPattern.matcher(url);
            if (urlmatcher.find()) {
                // addVertex(url , url);
                try {
                    URI uri = new URI(url, false);
                    String schemaAuthority = uri.getScheme() + "://" + uri.getAuthority();
                    String path = uri.getPathQuery();
                    if (path == null)
                        path = "/";
                    String color = schemaAuthorityToColor.get(schemaAuthority);
                    if (color == null) {
                        // not found already.. so assign this scheme and authority a color.
                        if (colorsUsed >= colors.length) {
                            throw new Exception("Too many scheme/authority combinations. Ne need more colours");
                        }
                        color = colors[colorsUsed++];
                        schemaAuthorityToColor.put(schemaAuthority, color);
                    }
                    addVertex(path, url, "fillColor=" + color);
                } catch (Exception e) {
                    log.error("Error graphing node for URL " + url, e);
                }
            } else {
                if (log.isDebugEnabled())
                    log.debug("URL " + url + " does not match the specified pattern " + urlPattern
                            + ", so not adding it as a vertex");
            }
        }
        // close the resultset and statement
        rs.close();
        st.close();

        // set up the edges in the graph
        st = conn.createStatement();
        rs = st.executeQuery(
                "select distinct RIGHT(REGEXP_SUBSTRING (REQHEADER, 'Referer:.+') , LENGTH(REGEXP_SUBSTRING (REQHEADER, 'Referer:.+'))-LENGTH('Referer: ')), URI from HISTORY where REQHEADER like '%Referer%' and histtype in (1,2,10) order by 2");

        mxGraphModel graphmodel = (mxGraphModel) graph.getModel();
        for (; rs.next();) {
            String predecessor = rs.getString(1);
            String url = rs.getString(2);

            // now trim back all urls from the base url
            // Matcher predecessorurlmatcher = urlpattern.matcher(predecessor);
            // if (predecessorurlmatcher.find()) {
            //   predecessor =  predecessorurlmatcher.group(1);
            //   }
            // Matcher urlmatcher = urlpattern.matcher(url);
            // if (urlmatcher.find()) {
            //   url =  urlmatcher.group(1);
            //   }

            // remove urls that do not match the pattern specified (all sites / one site)
            Matcher urlmatcher1 = urlPattern.matcher(predecessor);
            if (!urlmatcher1.find()) {
                if (log.isDebugEnabled())
                    log.debug("Predecessor URL " + predecessor + " does not match the specified pattern "
                            + urlPattern + ", so not adding it as a vertex");
                continue; // to the next iteration
            }
            Matcher urlmatcher2 = urlPattern.matcher(url);
            if (!urlmatcher2.find()) {
                if (log.isDebugEnabled())
                    log.debug("URL " + url + " does not match the specified pattern " + urlPattern
                            + ", so not adding it as a vertex");
                continue; // to the next iteration
            }

            // check that we have added the url as a vertex in its own right.. definitely should
            // have happened..
            mxCell predecessorVertex = (mxCell) graphmodel.getCell(predecessor);
            mxCell postdecessorVertex = (mxCell) graphmodel.getCell(url);
            if (predecessorVertex == null || postdecessorVertex == null) {
                log.warn("Could not find graph node for " + predecessor + " or for " + url + ". Ignoring it.");
                continue;
            }
            // add the edge (ie, add the dependency between 2 URLs)
            graph.insertEdge(parent, predecessorVertex.getId() + "-->" + postdecessorVertex.getId(), null,
                    predecessorVertex, postdecessorVertex);
        }

        // once all the vertices and edges are drawn, look for root nodes (nodes with no
        // incoming edges)
        // we will display the full URl for these, rather than just the path, to aid viewing the
        // graph
        Object[] vertices = graph.getChildVertices(graph.getDefaultParent());
        for (Object vertex : vertices) {
            Object[] incomingEdgesForVertex = graph.getIncomingEdges(vertex);
            if (incomingEdgesForVertex == null
                    || (incomingEdgesForVertex != null && incomingEdgesForVertex.length == 0)) {
                // it's a root node. Set it's value (displayed label) to the same as it's id
                // (the full URL)
                mxCell vertextCasted = (mxCell) vertex;
                vertextCasted.setValue(vertextCasted.getId());

                // now sort out the text metrics for the vertex, since the size of the displayed
                // text has been changed
                Dimension textsize = this.getTextDimension((String) vertextCasted.getValue(), this.fontmetrics);
                mxGeometry cellGeometry = vertextCasted.getGeometry();
                cellGeometry.setHeight(textsize.getHeight());
                cellGeometry.setWidth(textsize.getWidth());
                vertextCasted.setGeometry(cellGeometry);
            }
        }
    } catch (SQLException e) {
        log.error("Error trying to setup the graph", e);
        throw e;
    } finally {

        if (rs != null && !rs.isClosed())
            rs.close();
        if (st != null && !st.isClosed())
            st.close();
        if (conn != null && !conn.isClosed())
            conn.close();
        // mark the "transaction" on the graph as complete
        graph.getModel().endUpdate();
    }
}

From source file:com.streamsets.pipeline.stage.origin.jdbc.cdc.oracle.OracleCDCSource.java

private void generateRecords(Offset startingOffset, PreparedStatement selectChanges) {
    // When this is called the first time, Logminer was started either from SCN or from a start date, so we just keep
    // track of the start date etc.
    LOG.info("Attempting to generate records");
    boolean error;
    StringBuilder query = new StringBuilder();
    BigDecimal lastCommitSCN = new BigDecimal(startingOffset.scn);
    int sequenceNumber = startingOffset.sequence;
    LocalDateTime startTime = adjustStartTime(startingOffset.timestamp);
    String lastTxnId = startingOffset.txnId;
    LocalDateTime endTime = getEndTimeForStartTime(startTime);
    ResultSet resultSet = null;
    while (!getContext().isStopped()) {
        error = false;/*from  w  w  w  .  j a v a2 s  .  c  o  m*/
        generationStarted = true;
        try {
            recordQueue.put(new RecordOffset(dummyRecord,
                    new Offset(version, startTime, lastCommitSCN.toPlainString(), sequenceNumber, lastTxnId)));
            selectChanges = getSelectChangesStatement();
            if (!useLocalBuffering) {
                selectChanges.setBigDecimal(1, lastCommitSCN);
                selectChanges.setInt(2, sequenceNumber);
                selectChanges.setBigDecimal(3, lastCommitSCN);
                if (shouldTrackDDL) {
                    selectChanges.setBigDecimal(4, lastCommitSCN);
                }
            }
            selectChanges.setFetchSize(configBean.jdbcFetchSize);
            resultSet = selectChanges.executeQuery();
            while (resultSet.next() && !getContext().isStopped()) {
                String queryFragment = resultSet.getString(5);
                BigDecimal scnDecimal = resultSet.getBigDecimal(1);
                String scn = scnDecimal.toPlainString();
                String xidUsn = String.valueOf(resultSet.getLong(10));
                String xidSlt = String.valueOf(resultSet.getString(11));
                String xidSqn = String.valueOf(resultSet.getString(12));
                String xid = xidUsn + "." + xidSlt + "." + xidSqn;
                // Query Fragment is not null -> we need to process
                // Query Fragment is null AND the query string buffered from previous rows due to CSF == 0 is null,
                // nothing to do, go to next row
                // Query Fragment is null, but there is previously buffered data in the query, go ahead and process.
                if (queryFragment != null) {
                    query.append(queryFragment);
                } else if (queryFragment == null && query.length() == 0) {
                    LOG.debug(READ_NULL_QUERY_FROM_ORACLE, scn, xid);
                    continue;
                }

                // CSF is 1 if the query is incomplete, so read the next row before parsing
                // CSF being 0 means query is complete, generate the record
                if (resultSet.getInt(9) == 0) {
                    if (query.length() == 0) {
                        LOG.debug(READ_NULL_QUERY_FROM_ORACLE, scn, xid);
                        continue;
                    }
                    String queryString = query.toString();
                    query.setLength(0);
                    String username = resultSet.getString(2);
                    short op = resultSet.getShort(3);
                    String timestamp = resultSet.getString(4);
                    LocalDateTime tsDate = Timestamp.valueOf(timestamp).toLocalDateTime();
                    delay.getValue().put("delay", getDelay(tsDate));
                    String table = resultSet.getString(6);
                    BigDecimal commitSCN = resultSet.getBigDecimal(7);
                    int seq = resultSet.getInt(8);

                    String rsId = resultSet.getString(13);
                    Object ssn = resultSet.getObject(14);
                    String schema = String.valueOf(resultSet.getString(15));
                    int rollback = resultSet.getInt(16);
                    String rowId = resultSet.getString(17);
                    SchemaAndTable schemaAndTable = new SchemaAndTable(schema, table);
                    TransactionIdKey key = new TransactionIdKey(xid);
                    bufferedRecordsLock.lock();
                    try {
                        if (useLocalBuffering && bufferedRecords.containsKey(key) && bufferedRecords.get(key)
                                .contains(new RecordSequence(null, null, 0, 0, rsId, ssn, null))) {
                            continue;
                        }
                    } finally {
                        bufferedRecordsLock.unlock();
                    }
                    Offset offset = null;
                    if (LOG.isDebugEnabled()) {
                        LOG.debug(
                                "Commit SCN = {}, SCN = {}, Operation = {}, Txn Id = {}, Timestamp = {}, Row Id = {}, Redo SQL = {}",
                                commitSCN, scn, op, xid, tsDate, rowId, queryString);
                    }

                    if (op != DDL_CODE && op != COMMIT_CODE && op != ROLLBACK_CODE) {
                        if (!useLocalBuffering) {
                            offset = new Offset(version, tsDate, commitSCN.toPlainString(), seq, xid);
                        }
                        Map<String, String> attributes = new HashMap<>();
                        attributes.put(SCN, scn);
                        attributes.put(USER, username);
                        attributes.put(TIMESTAMP_HEADER, timestamp);
                        attributes.put(TABLE, table);
                        attributes.put(SEQ, String.valueOf(seq));
                        attributes.put(XID, xid);
                        attributes.put(RS_ID, rsId);
                        attributes.put(SSN, ssn.toString());
                        attributes.put(SCHEMA, schema);
                        attributes.put(ROLLBACK, String.valueOf(rollback));
                        attributes.put(ROWID_KEY, rowId);
                        if (!useLocalBuffering || getContext().isPreview()) {
                            if (commitSCN.compareTo(lastCommitSCN) < 0
                                    || (commitSCN.compareTo(lastCommitSCN) == 0 && seq < sequenceNumber)) {
                                continue;
                            }
                            lastCommitSCN = commitSCN;
                            sequenceNumber = seq;
                            if (configBean.keepOriginalQuery) {
                                attributes.put(QUERY_KEY, queryString);
                            }
                            try {
                                Record record = generateRecord(queryString, attributes, op);
                                if (record != null && record.getEscapedFieldPaths().size() > 0) {
                                    recordQueue.put(new RecordOffset(record, offset));
                                }
                            } catch (UnparseableSQLException ex) {
                                LOG.error("Parsing failed", ex);
                                unparseable.offer(queryString);
                            }
                        } else {
                            bufferedRecordsLock.lock();
                            try {
                                HashQueue<RecordSequence> records = bufferedRecords.computeIfAbsent(key, x -> {
                                    x.setTxnStartTime(tsDate);
                                    return createTransactionBuffer(key.txnId);
                                });

                                int nextSeq = records.isEmpty() ? 1 : records.tail().seq + 1;
                                RecordSequence node = new RecordSequence(attributes, queryString, nextSeq, op,
                                        rsId, ssn, tsDate);
                                records.add(node);
                            } finally {
                                bufferedRecordsLock.unlock();
                            }
                        }
                    } else if (!getContext().isPreview() && useLocalBuffering
                            && (op == COMMIT_CODE || op == ROLLBACK_CODE)) {
                        // so this commit was previously processed or it is a rollback, so don't care.
                        if (op == ROLLBACK_CODE || scnDecimal.compareTo(lastCommitSCN) < 0) {
                            bufferedRecordsLock.lock();
                            try {
                                bufferedRecords.remove(key);
                            } finally {
                                bufferedRecordsLock.unlock();
                            }
                        } else {
                            bufferedRecordsLock.lock();
                            try {
                                HashQueue<RecordSequence> records = bufferedRecords.getOrDefault(key,
                                        EMPTY_LINKED_HASHSET);
                                if (lastCommitSCN.equals(scnDecimal) && xid.equals(lastTxnId)) {
                                    removeProcessedRecords(records, sequenceNumber);
                                }
                                int bufferedRecordsToBeRemoved = records.size();
                                LOG.debug(FOUND_RECORDS_IN_TRANSACTION, bufferedRecordsToBeRemoved, xid);
                                lastCommitSCN = scnDecimal;
                                lastTxnId = xid;
                                sequenceNumber = addRecordsToQueue(tsDate, scn, xid);
                            } finally {
                                bufferedRecordsLock.unlock();
                            }
                        }
                    } else {
                        offset = new Offset(version, tsDate, scn, 0, xid);
                        boolean sendSchema = false;
                        // Commit/rollback in Preview will also end up here, so don't really do any of the following in preview
                        // Don't bother with DDL events here.
                        if (!getContext().isPreview()) {
                            // Event is sent on every DDL, but schema is not always sent.
                            // Schema sending logic:
                            // CREATE/ALTER: Schema is sent if the schema after the ALTER is newer than the cached schema
                            // (which we would have sent as an event earlier, at the last alter)
                            // DROP/TRUNCATE: Schema is not sent, since they don't change schema.
                            DDL_EVENT type = getDdlType(queryString);
                            if (type == DDL_EVENT.ALTER || type == DDL_EVENT.CREATE) {
                                sendSchema = refreshSchema(scnDecimal, new SchemaAndTable(schema, table));
                            }
                            recordQueue.put(new RecordOffset(createEventRecord(type, queryString,
                                    schemaAndTable, offset.toString(), sendSchema, timestamp), offset));
                        }
                    }
                    query.setLength(0);
                }
            }
        } catch (SQLException ex) {
            error = true;
            // force a restart from the same timestamp.
            if (ex.getErrorCode() == MISSING_LOG_FILE) {
                LOG.warn("SQL Exception while retrieving records", ex);
                addToStageExceptionsQueue(new StageException(JDBC_86, ex));
            } else if (ex.getErrorCode() != RESULTSET_CLOSED_AS_LOGMINER_SESSION_CLOSED) {
                LOG.warn("SQL Exception while retrieving records", ex);
            } else if (ex.getErrorCode() == QUERY_TIMEOUT) {
                LOG.warn("LogMiner select query timed out");
            } else if (ex.getErrorCode() == LOGMINER_START_MUST_BE_CALLED) {
                LOG.warn("Last LogMiner session did not start successfully. Will retry", ex);
            } else {
                LOG.error("Error while reading data", ex);
                addToStageExceptionsQueue(new StageException(JDBC_52, ex));
            }
        } catch (StageException e) {
            LOG.error("Error while reading data", e);
            error = true;
            addToStageExceptionsQueue(e);
        } catch (InterruptedException ex) {
            LOG.error("Interrupted while waiting to add data");
            Thread.currentThread().interrupt();
        } catch (Exception ex) {
            LOG.error("Error while reading data", ex);
            error = true;
            addToStageExceptionsQueue(new StageException(JDBC_52, ex));
        } finally {
            // If an incomplete batch is seen, it means we are going to move the window forward
            // Ending this session and starting a new one helps reduce PGA memory usage.
            try {
                if (resultSet != null && !resultSet.isClosed()) {
                    resultSet.close();
                }
                if (selectChanges != null && !selectChanges.isClosed()) {
                    selectChanges.close();
                }
            } catch (SQLException ex) {
                LOG.warn("Error while attempting to close SQL statements", ex);
            }
            try {
                endLogMnr.execute();
            } catch (SQLException ex) {
                LOG.warn("Error while trying to close logminer session", ex);
            }
            try {
                if (error) {
                    resetConnectionsQuietly();
                } else {
                    discardOldUncommitted(startTime);
                    startTime = adjustStartTime(endTime);
                    endTime = getEndTimeForStartTime(startTime);
                }
                startLogMinerUsingGivenDates(startTime.format(dateTimeColumnHandler.dateFormatter),
                        endTime.format(dateTimeColumnHandler.dateFormatter));
            } catch (SQLException ex) {
                LOG.error("Error while attempting to start LogMiner", ex);
                addToStageExceptionsQueue(new StageException(JDBC_52, ex));
            } catch (StageException ex) {
                LOG.error("Error while attempting to start logminer for redo log dictionary", ex);
                addToStageExceptionsQueue(ex);
            }
        }
    }
}

From source file:com.github.woonsan.jdbc.jcr.impl.JcrJdbcResultSetTest.java

@Test
public void testUnsupportedOperations() throws Exception {
    Statement statement = getConnection().createStatement();
    ResultSet rs = statement.executeQuery(SQL_EMPS);

    try {//ww w . j a  v a  2s  .co  m
        rs.getWarnings();
        fail();
    } catch (UnsupportedOperationException ignore) {
    }

    try {
        rs.clearWarnings();
        fail();
    } catch (UnsupportedOperationException ignore) {
    }

    try {
        rs.getCursorName();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getObject(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getObject("ename");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.isLast();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.beforeFirst();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.afterLast();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.first();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.last();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.absolute(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.relative(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.previous();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.moveToCurrentRow();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNull(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNull("col1");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBoolean(1, true);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBoolean("col1", true);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateByte(1, (byte) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateByte("col1", (byte) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateShort(1, (short) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateShort("col1", (short) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateInt(1, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateInt("col1", 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateLong(1, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateLong("col1", (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateFloat(1, (float) 0.1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateFloat("col1", (float) 0.1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateDouble(1, 0.1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateDouble("col1", 0.1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBigDecimal(1, new BigDecimal("100000000"));
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBigDecimal("col1", new BigDecimal("100000000"));
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateString(1, "Unknown");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateString("col1", "Unknown");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBytes(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBytes("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateDate(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateDate("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateTime(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateTime("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateTimestamp(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateTimestamp("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateAsciiStream(1, null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateAsciiStream(1, null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateAsciiStream("col1", null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateAsciiStream("col1", null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateAsciiStream(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateAsciiStream("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBinaryStream(1, null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBinaryStream(1, null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBinaryStream("col1", null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBinaryStream("col1", null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBinaryStream(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBinaryStream("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateCharacterStream(1, null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateCharacterStream(1, null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateCharacterStream("col1", null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateCharacterStream("col1", null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateCharacterStream(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateCharacterStream("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateObject(1, null, 1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateObject("col1", null, 1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateObject(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateObject("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.insertRow();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateRow();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.deleteRow();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.refreshRow();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.cancelRowUpdates();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.moveToInsertRow();
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getObject(1, (Map<String, Class<?>>) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getObject("col1", (Map<String, Class<?>>) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getRef(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getRef("col1");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getBlob(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getBlob("col1");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getClob(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getClob("col1");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getURL(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getURL("col1");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateRef(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateRef("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBlob(1, (Blob) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBlob("col1", (Blob) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateClob(1, (Clob) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateClob("col1", (Clob) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateArray(1, (Array) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateArray("col1", (Array) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getRowId(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getRowId("col1");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateRowId(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateRowId("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNString(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNString("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNClob(1, (NClob) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNClob("col1", (NClob) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getNClob(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getNClob("col1");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getSQLXML(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getSQLXML("col1");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateSQLXML(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateSQLXML("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getNString(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getNString("col1");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getNCharacterStream(1);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getNCharacterStream("col1");
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNCharacterStream(1, null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNCharacterStream(1, null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNCharacterStream("col1", null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNCharacterStream("col1", null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateAsciiStream(1, null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateAsciiStream(1, null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateAsciiStream("col1", null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateAsciiStream("col1", null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBinaryStream(1, null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBinaryStream(1, null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBinaryStream("col1", null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBinaryStream("col1", null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateCharacterStream(1, null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateCharacterStream(1, null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateCharacterStream("col1", null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateCharacterStream("col1", null, (long) 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBlob(1, null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBlob("col1", null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateClob(1, null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateClob("col1", null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNClob(1, null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNClob("col1", null, 0);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNCharacterStream(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNCharacterStream("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateAsciiStream(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateAsciiStream("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBinaryStream(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBinaryStream("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateCharacterStream(1, null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateCharacterStream("col1", null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBlob(1, (InputStream) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateBlob("col1", (InputStream) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateClob(1, (Reader) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateClob("col1", (Reader) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNClob(1, (Reader) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.updateNClob("col1", (Reader) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getObject(1, (Class<?>) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    try {
        rs.getObject("col1", (Class<?>) null);
        fail();
    } catch (SQLFeatureNotSupportedException ignore) {
    }

    rs.close();
    assertTrue(rs.isClosed());

    statement.close();
    assertTrue(statement.isClosed());
}