Example usage for java.sql ResultSet TYPE_SCROLL_INSENSITIVE

List of usage examples for java.sql ResultSet TYPE_SCROLL_INSENSITIVE

Introduction

In this page you can find the example usage for java.sql ResultSet TYPE_SCROLL_INSENSITIVE.

Prototype

int TYPE_SCROLL_INSENSITIVE

To view the source code for java.sql ResultSet TYPE_SCROLL_INSENSITIVE.

Click Source Link

Document

The constant indicating the type for a ResultSet object that is scrollable but generally not sensitive to changes to the data that underlies the ResultSet.

Usage

From source file:pub.platform.db.DatabaseConnection.java

/**
 * Gets the preparedStatement attribute of the DatabaseConnection object
 * // w ww .  ja  v  a  2s.  co  m
 * @param str
 *          Description of the Parameter
 * @return The preparedStatement value
 */
public PreparedStatement getPreparedStatement(String str) {
    try {

        PreparedStatement st = connect.prepareStatement(str, ResultSet.TYPE_SCROLL_INSENSITIVE,
                ResultSet.CONCUR_READ_ONLY);
        return st;
    } catch (SQLException ex) {
        errorException = ex;
        return null;
    }
}

From source file:edu.ku.brc.specify.conversion.MSULichensFixer.java

private void convertTaxonRecords() {
    IdMapperMgr.getInstance().setDBs(oldDBConn, newDBConn);

    txMapper = IdMapperMgr.getInstance().addTableMapper("taxonname", "TaxonNameID", false);
    txTypMapper = IdMapperMgr.getInstance().addTableMapper("TaxonomyType", "TaxonomyTypeID", false);
    txUnitTypMapper = IdMapperMgr.getInstance().addTableMapper("TaxonomicUnitType", "TaxonomicUnitTypeID",
            false);/*w ww.j av  a  2  s .  c o m*/
    mappers = new IdMapperIFace[] { txMapper, txMapper, txTypMapper, txMapper, txUnitTypMapper };

    newToOldColMap.put("TaxonID", "TaxonNameID");
    newToOldColMap.put("ParentID", "ParentTaxonNameID");
    newToOldColMap.put("TaxonTreeDefID", "TaxonomyTypeID");
    newToOldColMap.put("TaxonTreeDefItemID", "TaxonomicUnitTypeID");
    newToOldColMap.put("Name", "TaxonName");
    newToOldColMap.put("FullName", "FullTaxonName");
    newToOldColMap.put("IsAccepted", "Accepted");

    oldToNewColMap.put("TaxonNameID", "TaxonID");
    oldToNewColMap.put("ParentTaxonNameID", "ParentID");
    oldToNewColMap.put("TaxonomyTypeID", "TaxonTreeDefID");
    oldToNewColMap.put("TaxonomicUnitTypeID", "TaxonTreeDefItemID");
    oldToNewColMap.put("TaxonName", "Name");
    oldToNewColMap.put("FullTaxonName", "FullName");
    oldToNewColMap.put("Accepted", "IsAccepted");

    BasicSQLUtils.setDBConnection(newDBConn);

    StringBuilder newSB = new StringBuilder();
    StringBuilder vl = new StringBuilder();
    for (int i = 0; i < cols.length; i++) {
        fieldToColHash.put(cols[i], i + 1);
        colToFieldHash.put(i + 1, cols[i]);

        if (newSB.length() > 0)
            newSB.append(", ");
        newSB.append(cols[i]);

        if (vl.length() > 0)
            vl.append(',');
        vl.append('?');
    }

    StringBuilder oldSB = new StringBuilder();
    for (int i = 0; i < oldCols.length; i++) {
        oldFieldToColHash.put(oldCols[i], i + 1);
        if (oldSB.length() > 0)
            oldSB.append(", ");
        oldSB.append("ttx.");
        oldSB.append(oldCols[i]);
    }

    rankIdOldDBInx = oldFieldToColHash.get("RankID");

    String sqlStr = String.format("SELECT %s FROM taxon ", newSB.toString());
    log.debug(sqlStr);

    String fromClause = " FROM taxonname ttx LEFT JOIN msu_lichens.taxonname_TaxonNameID ON OldID = ttx.TaxonNameID LEFT JOIN msu_lichens_6.taxon AS ntx ON NewID = ntx.TaxonID WHERE ntx.TaxonID IS NULL";
    String sql = String.format("SELECT %s %s", oldSB.toString(), fromClause);
    log.debug(sql);

    String cntSQL = String.format("SELECT COUNT(*) %s", fromClause);
    log.debug(cntSQL);

    int txCnt = BasicSQLUtils.getCountAsInt(oldDBConn, cntSQL);
    if (frame != null) {
        frame.setProcess(0, txCnt);
    }

    log.debug(txCnt);

    String pStr = String.format("INSERT INTO taxon (%s) VALUES (%s)", newSB.toString(), vl.toString());
    log.debug(pStr);

    try {
        stmtTx = newDBConn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
        ResultSet rs1 = stmtTx.executeQuery(sqlStr);
        ResultSetMetaData rsmd1 = rs1.getMetaData();
        colTypes = new int[rsmd1.getColumnCount()];
        colSizes = new int[rsmd1.getColumnCount()];
        for (int i = 0; i < colTypes.length; i++) {
            colTypes[i] = rsmd1.getColumnType(i + 1);
            colSizes[i] = rsmd1.getPrecision(i + 1);
        }
        rs1.close();
        stmtTx.close();

        missingParentTaxonCount = 0;
        lastEditedByInx = oldFieldToColHash.get("LastEditedBy");
        modifiedByAgentInx = fieldToColHash.get("ModifiedByAgentID");
        stmtTx = oldDBConn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
        pStmtTx = newDBConn.prepareStatement(pStr);

        int cnt = 0;
        ResultSet rs = stmtTx.executeQuery(sql);
        ResultSetMetaData rsmd = rs.getMetaData();
        while (rs.next()) {
            processRow(rs, rsmd, null);

            cnt++;
            if (cnt % 1000 == 0) {
                log.debug(cnt);
                if (frame != null) {
                    frame.setProcess(cnt);
                }
            }
        }
        rs.close();

        if (frame != null) {
            frame.setProcess(txCnt, txCnt);
        }

        String msg = String.format("Stranded Taxon (no parent): %d", missingParentTaxonCount);
        tblWriter.log(msg);
        log.debug(msg);

        if (missingParentTaxonCount > 0) {
            if (frame != null)
                frame.setDesc("Renumbering the tree nodes, this may take a while...");

            HashSet<Integer> ttdHash = new HashSet<Integer>();
            for (CollectionInfo colInfo : CollectionInfo.getFilteredCollectionInfoList()) {
                if (!ttdHash.contains(colInfo.getTaxonTreeDef().getId())) {
                    DataProviderSessionIFace session = null;
                    try {
                        session = DataProviderFactory.getInstance().createSession();

                        TaxonTreeDef taxonTreeDef = colInfo.getTaxonTreeDef();
                        taxonTreeDef = (TaxonTreeDef) session
                                .getData("FROM TaxonTreeDef WHERE id = " + taxonTreeDef.getId());

                        sql = "SELECT TaxonID FROM taxon WHERE RankID = 0 AND TaxonTreeDefID = "
                                + taxonTreeDef.getId();
                        log.debug(sql);
                        Integer txRootId = BasicSQLUtils.getCount(sql);
                        Taxon txRoot = (Taxon) session.getData("FROM Taxon WHERE id = " + txRootId);

                        NodeNumberer<Taxon, TaxonTreeDef, TaxonTreeDefItem> nodeNumberer = new NodeNumberer<Taxon, TaxonTreeDef, TaxonTreeDefItem>(
                                txRoot.getDefinition());
                        nodeNumberer.doInBackground();

                    } catch (Exception ex) {
                        //session.rollback();
                        ex.printStackTrace();

                    } finally {
                        if (session != null) {
                            session.close();
                        }
                    }
                    ttdHash.add(colInfo.getTaxonTreeDef().getId());
                }
            }
            if (frame != null)
                frame.setDesc("Renumbering done.");
        }
        missingParentTaxonCount = 0;

    } catch (SQLException ex) {
        ex.printStackTrace();

    } finally {
        try {
            stmtTx.close();
            pStmtTx.close();
        } catch (Exception ex) {
        }
    }

    System.out.println("Done.");
}

From source file:org.apache.bigtop.itest.hive.TestJdbc.java

@Test
public void setSchema() throws SQLException {
    try (Statement stmt = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY)) {

        final String dbName = "bigtop_jdbc_test_db";

        final String tableName = "bigtop_jdbc_test_table";
        stmt.execute("drop table if exists " + tableName);

        stmt.execute("drop database if exists " + dbName + " cascade");
        stmt.execute("create database " + dbName);

        conn.setSchema(dbName);//from  w  w  w.  ja  v a2 s .  c  om

        DatabaseMetaData md = conn.getMetaData();

        ResultSet rs = md.getSchemas(null, dbName);

        while (rs.next()) {
            String schemaName = rs.getString(2);
            LOG.debug("Schema name is " + schemaName);
        }

        stmt.execute("create table " + tableName + " (i int, s varchar(32))");

        rs = md.getTables(null, dbName, tableName, null);
        while (rs.next()) {
            String tName = rs.getString(3);
            LOG.debug("Schema name is " + tName);
        }

        rs = md.getColumns(null, dbName, tableName, "i");
        while (rs.next()) {
            String colName = rs.getString(4);
            LOG.debug("Schema name is " + colName);
        }

        rs = md.getFunctions(null, dbName, "foo");
        while (rs.next()) {
            String funcName = rs.getString(3);
            LOG.debug("Schema name is " + funcName);
        }
    }
}

From source file:edu.ku.brc.specify.conversion.IdTableMapper.java

/**
 * Map all the old IDs to new IDs//from   w w w  .  j a va2  s.c  o m
 * @param sqlArg the string to use to fill the map
 */
public void mapAllIdsNoIncrement(final String sqlArg, final Integer numRecords) {
    log.debug("mapAllIdsNoIncrement with sql: " + sqlArg);
    this.sql = sqlArg;

    int mappingCount = numRecords != null ? numRecords : getMapCount(mapTableName);
    wasEmpty = mappingCount == 0;

    if (doDelete || mappingCount == 0) {
        BasicSQLUtils.deleteAllRecordsFromTable(oldConn, mapTableName, BasicSQLUtils.myDestinationServerType);
        if (frame != null) {
            String dMsg = "Mapping " + mapTableName;
            frame.setDesc(dMsg);
            log.debug(dMsg);
        }

        try {
            log.debug("Executing: " + sql);
            PreparedStatement pStmt = oldConn.prepareStatement("INSERT INTO " + mapTableName + " VALUES (?,?)");
            Statement stmtOld = oldConn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
                    ResultSet.CONCUR_READ_ONLY);
            ResultSet rs = stmtOld.executeQuery(sql);

            if (rs.last()) {
                if (frame != null) {
                    frame.setProcess(0, rs.getRow());
                }
            }

            int cnt = 0;
            if (rs.first()) {
                do {
                    int oldIndex = rs.getInt(1);
                    int toIndex = rs.getInt(2);
                    pStmt.setInt(1, oldIndex); // Old Index
                    pStmt.setInt(2, toIndex); // New Index
                    if (pStmt.executeUpdate() != 1) {
                        String msg = String.format("Error writing to Map table[%s] old: %d  new: %d",
                                mapTableName, oldIndex, toIndex);
                        log.error(msg);
                        throw new RuntimeException(msg);
                    }

                    if (frame != null) {
                        if (cnt % 1000 == 0) {
                            frame.setProcess(cnt);
                        }

                    } else {
                        if (cnt % 2000 == 0) {
                            log.debug("Mapped " + cnt + " records from " + tableName);
                        }
                    }

                    cnt++;

                } while (rs.next());

                log.info("Mapped " + cnt + " records from " + tableName);

            } else {
                log.info("No records to map in " + tableName);
            }
            rs.close();
            stmtOld.close();
            pStmt.close();

        } catch (SQLException ex) {
            ex.printStackTrace();
            edu.ku.brc.af.core.UsageTracker.incrSQLUsageCount();
            edu.ku.brc.exceptions.ExceptionTracker.getInstance().capture(IdTableMapper.class, ex);
            log.error(ex);
            throw new RuntimeException(ex);
        }
    } else {
        log.debug("Skipping the build of mapper: " + mapTableName);
    }

    if (frame != null) {
        frame.setProcess(0, 0);
    }
}

From source file:com.taobao.datax.plugins.writer.mysqlwriter.MysqlWriter.java

@Override
public int finish() {
    Statement stmt = null;//from  w  w  w .  j  a va2 s. c o m
    try {
        StringBuilder sb = new StringBuilder();

        String PATTERN = "row \\d+";
        Pattern p = Pattern.compile(PATTERN);
        Set<String> rowCounter = new HashSet<String>();

        stmt = this.connection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
        ResultSet rs;

        sb.setLength(0);
        sb.append('\n');

        int warnCnts = 0;
        rs = stmt.executeQuery("SHOW WARNINGS;");
        while (rs.next()) {
            if (warnCnts++ < 32) {
                sb.append(rs.getString(1)).append(" ").append(rs.getInt(2)).append(" ").append(rs.getString(3))
                        .append("\n");
            }

            Matcher matcher = p.matcher(rs.getString(3));
            if (matcher.find()) {
                rowCounter.add(matcher.group());
            }
        }

        if (!StringUtils.isBlank(sb.toString())) {

            if (rowCounter.size() > 32) {
                sb.append("More error messages hidden ...");
            }
            this.logger.warn(sb);

            if (this.limit >= 1 && rowCounter.size() >= this.limit) {
                this.logger.error(String.format("%d rows data failed in loading.", rowCounter.size()));
                return PluginStatus.FAILURE.value();
            } else if (this.limit > 0 && this.limit < 1 && this.lineCounter > 0) {
                double rate = (double) rowCounter.size() / (double) this.lineCounter;
                if (rate >= this.limit) {
                    this.logger.error(String.format("%.1f%% data failed in loading.", rate * 100));
                    return PluginStatus.FAILURE.value();
                }
            } else {
                this.logger.warn(
                        String.format("MysqlWriter found %d rows data format error .", rowCounter.size()));
                // this.getMonitor().setFailedLines(rowCounter.size());
            }
        }
    } catch (SQLException e) {
    } finally {
        try {
            if (stmt != null)
                stmt.close();
            if (null != this.connection)
                this.connection.close();
        } catch (Exception e) {
        }
    }
    return PluginStatus.SUCCESS.value();
}

From source file:ca.on.gov.jus.icon.common.iconcodetables.IconCodeTablesManager.java

private IconCodeTable getICONCodesTableList() {
    IconCodeTable iconCodesTableList = null;
    String selectSql = null;//from   ww w. ja  v a  2 s.c o  m
    Connection oracleConnection = ReportsConnectionManager.getPooledOracleConnection();
    PreparedStatement preparedStatement = null;
    ResultSet resultSet = null;

    if (null != oracleConnection) {
        selectSql = "" + "SELECT " + "    ICONADMIN.ICON_TABLES_CODE.CODE, "
                + "    ICONADMIN.ICON_TABLES_CODE.DESCRIPTION, " + "    ICONADMIN.ICON_TABLES_CODE.TABLE_PASS  "
                + "FROM  " + "    ICONADMIN.ICON_TABLES_CODE " + "ORDER BY  "
                + "    ICONADMIN.ICON_TABLES_CODE.DESCRIPTION ASC ";

        try {

            preparedStatement = oracleConnection.prepareStatement(selectSql, ResultSet.TYPE_SCROLL_INSENSITIVE,
                    ResultSet.CONCUR_READ_ONLY);
            resultSet = preparedStatement.executeQuery();

            resultSet.last();
            int resultSetCount = resultSet.getRow();
            //logger.info("resultSetCount: " + resultSetCount);
            resultSet.beforeFirst();

            if (resultSetCount > 0) {
                iconCodesTableList = new IconCodeTable("ICON_CodeTablesList", "ICON Codes Table List");
            }

            while (resultSet.next()) {
                IconCodeTable iconCodeTable = new IconCodeTable(resultSet.getString("CODE"),
                        resultSet.getString("DESCRIPTION"));
                iconCodeTable.setCodeTablePass(resultSet.getString("TABLE_PASS"));

                //Null it so that it can not be used that way
                iconCodeTable.setCodeTableValues(null);

                iconCodesTableList.getCodeTableValues().put(resultSet.getString("CODE"), iconCodeTable);
            }

        } catch (SQLException e) {
            e.printStackTrace();
        } finally {
            try {
                //clean up
                if (null != resultSet) {
                    resultSet.close();
                    resultSet = null;
                }
                if (null != preparedStatement) {
                    preparedStatement.close();
                    preparedStatement = null;
                }
                //Should never close the pooled connection
                //               if(null != oracleConnection){ 
                //                  oracleConnection.close();
                //                  oracleConnection = null;
                //               }
            } catch (SQLException e1) {
            }
        }
    }

    return iconCodesTableList;
}

From source file:com.itemanalysis.jmetrik.graph.irt.IrtPlotAnalysis.java

private void summarizeResponseData() throws SQLException {
    this.firePropertyChange("progress-ind-on", null, null);

    Statement stmt = null;/*from   w  w  w.  ja va2  s .c om*/
    ResultSet rs = null;

    //IRT observed score distribution
    NormalDistributionApproximation latentDist = new NormalDistributionApproximation(min, max, points);
    irtDist = new IrtObservedScoreDistribution(itemResponseModels, latentDist);
    irtDist.compute();
    nscores = irtDist.getNumberOfScores();
    eapScore = new double[nscores];
    for (int i = 0; i < nscores; i++) {
        eapScore[i] = irtDist.getEAP(i);
    }

    //Summarize item response vectors
    try {
        int nrow = dao.getRowCount(conn, responseTableName);
        responseVector = new ItemResponseVector[nrow];

        VariableTableName variableTableName = new VariableTableName(responseTableName.toString());
        ArrayList<VariableAttributes> variableAttributes = dao.getSelectedVariables(conn, variableTableName,
                variables);

        //Query the db. Variables include the select items and the grouping variable is one is available.
        Table sqlTable = new Table(responseTableName.getNameForDatabase());
        SelectQuery select = new SelectQuery();
        for (VariableAttributes v : variableAttributes) {
            select.addColumn(sqlTable, v.getName().nameForDatabase());
        }
        stmt = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
        rs = stmt.executeQuery(select.toString());

        int i = 0;
        int c = 0;
        int ncol = itemResponseModels.length;
        byte[] rv = null;
        Object response = null;
        ItemResponseVector iVec = null;
        while (rs.next()) {
            c = 0;
            rv = new byte[ncol];

            for (VariableAttributes v : variableAttributes) {
                response = rs.getObject(v.getName().nameForDatabase());
                if ((response == null || response.equals("") || response.equals("NA"))) {
                    rv[c] = -1;//code for omitted responses
                } else {
                    rv[c] = (byte) v.getItemScoring().computeItemScore(response);
                }
                c++;
            }
            iVec = new ItemResponseVector(rv, 1.0);
            responseVector[i] = iVec;
            i++;
        } //end data summary

    } catch (SQLException ex) {
        throw (ex);
    } finally {
        if (rs != null)
            rs.close();
        if (stmt != null)
            stmt.close();
    }
}

From source file:org.kawanfw.sql.jdbc.ResultSetHttp.java

/**
 * Constructor when building the ResultSet from a getMetaData function.
 * Allows to use directly a getMetaData() on the ResultSet without a new
 * call on server// w w w  .  j  a  va  2s  .  c  om
 * 
 * @param connectionHttp
 *            The Http Connection
 * @param rsAndMetaDataFile
 *            the result file that contains both the ResultSet and it's
 *            MetaData
 * @param metaDataMethodName
 *            The method name that produced the result set
 * @param metaDataMethodParams
 *            The parameters of the method
 */
public ResultSetHttp(ConnectionHttp connectionHttp, String metaDataMethodName, File rsAndMetaDataFile,
        Object... metaDataMethodParams) throws SQLException {

    if (connectionHttp == null) {
        String message = Tag.PRODUCT_PRODUCT_FAIL + "connectionHttp can not be null!";
        throw new SQLException(message, new IOException(message));
    }

    if (rsAndMetaDataFile == null) {
        String message = Tag.PRODUCT_PRODUCT_FAIL + "Result Set as String can not be null!";
        throw new SQLException(message, new IOException(message));
    }

    this.connectionHttp = connectionHttp;
    this.MetaDataMethodName = metaDataMethodName;
    this.MetaDataMethodParams = metaDataMethodParams;

    resultSetType = ResultSet.TYPE_SCROLL_INSENSITIVE;

    // Split the result file into two files: 1) ResultSet itself 2)
    // ResultSet.getMetaData

    ResultSetFileSplitter resultSetFileSplitter = new ResultSetFileSplitter(rsAndMetaDataFile);

    File rsFile = resultSetFileSplitter.getResultSetFile();
    rsMetaDataFileTypeFunction = resultSetFileSplitter.getMetaDataFile();

    debug("rsAndMetaDataFile: " + rsAndMetaDataFile);
    debug("rsFile           : " + rsMetaDataFileTypeFunction);
    debug("rsMetaDataFile   : " + rsMetaDataFileTypeFunction);

    // Build the column position map:
    ColPositionBuilder colPositionBuilder = new ColPositionBuilder(rsFile, connectionHttp);
    colPosition = colPositionBuilder.getColPosition();

    // Build the List of Strings
    list = new FileBackedListRs<List<String>>(rsFile, connectionHttp);

}

From source file:no.polaric.aprsdb.MyDBSession.java

/**
 * Get an APRS item at a given point in time.
 *///from   ww w . j av a2  s.c  o m
public AprsPoint getItem(String src, java.util.Date at) throws java.sql.SQLException {
    _log.debug("MyDbSession", "getItem:  " + src + ", " + df.format(at));
    PreparedStatement stmt = getCon().prepareStatement(
            " SELECT * FROM \"PosReport\"" + " WHERE src=? AND time <= ?" + " ORDER BY time DESC LIMIT 1",
            ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
    stmt.setString(1, src);
    stmt.setTimestamp(2, new Timestamp(at.getTime()));
    ResultSet rs = stmt.executeQuery();

    String name[] = src.split("@", 2);
    AprsPoint x = null;
    if (name.length > 1) {
        Station owner = _api.getDB().getStation(name[1], null);
        x = new AprsObject(owner, name[0]);
    } else
        x = new Station(src);

    if (rs.next())
        x.update(
                rs.getDate("time"), new AprsHandler.PosData(getRef(rs, "position"), rs.getInt("course"),
                        rs.getInt("speed"), rs.getString("symbol").charAt(0), rs.getString("symtab").charAt(0)),
                null, null);
    return x;
}

From source file:edu.ku.brc.specify.conversion.StratToGTP.java

/**
 * @throws SQLException//from   ww w. ja  v  a2s  .c  o  m
 */
public void convertStratToGTP() throws SQLException {
    Statement stmt = null;
    ResultSet rs = null;

    try {
        // get a Hibernate session for saving the new records
        Session localSession = HibernateUtil.getCurrentSession();
        HibernateUtil.beginTransaction();

        int count = BasicSQLUtils.getCountAsInt(oldDBConn, "SELECT COUNT(*) FROM stratigraphy");
        if (count < 1)
            return;

        if (hasFrame) {
            setProcess(0, count);
        }

        IdTableMapper gtpIdMapper = IdMapperMgr.getInstance().addTableMapper("geologictimeperiod",
                "GeologicTimePeriodID");

        Hashtable<Integer, Integer> ceToNewStratIdHash = new Hashtable<Integer, Integer>();

        IdMapperIFace ceMapper = IdMapperMgr.getInstance().get("collectingevent", "CollectingEventID");

        // get all of the old records
        //  Future GTP                           Period        Epoch       EML        Age    EML(age)    Text1   Text2     Remarks
        String sql = "SELECT s.StratigraphyID, s.SuperGroup, s.Group, s.Formation, s.Member, s.Bed,    s.Text1, s.Text2,  s.Remarks FROM stratigraphy s ORDER BY s.StratigraphyID";

        stmt = oldDBConn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
        rs = stmt.executeQuery(sql);

        int counter = 0;
        // for each old record, convert the record
        while (rs.next()) {
            if (counter % 500 == 0) {
                if (hasFrame) {
                    setProcess(counter);

                } else {
                    log.info("Converted " + counter + " Stratigraphy records");
                }
            }

            // grab the important data fields from the old record
            int oldStratId = rs.getInt(1);
            String period = rs.getString(2);
            String epoch = rs.getString(3);
            String eml = rs.getString(4);
            String age = rs.getString(5);
            String emlAge = rs.getString(6);

            String text1 = rs.getString(7);
            String text2 = rs.getString(8);
            String remarks = rs.getString(9);

            if (StringUtils.isNotEmpty(text2) && text2.length() > 128) {
                remarks += "; " + text2;
                text2 = text2.substring(0, 128);
            }

            if (StringUtils.isNotEmpty(eml)) {
                if (StringUtils.isNotEmpty(epoch)) {
                    epoch += ' ' + eml;

                } else {
                    epoch = eml;
                }
            }

            if (StringUtils.isEmpty(epoch)) {
                epoch = "(Empty)";
            }

            // create a new Geography object from the old data
            GeologicTimePeriod newStrat = convertOldStratRecord(localSession, eraNode, text1, text2, remarks,
                    period, epoch, eml, age, emlAge);

            counter++;

            // Map Old GeologicTimePeriod ID to the new Tree Id
            gtpIdMapper.put(oldStratId, newStrat.getGeologicTimePeriodId());

            // Convert Old CEId to new CEId, then map the new CEId -> new StratId
            Integer ceId = ceMapper.get(oldStratId);
            if (ceId != null) {
                ceToNewStratIdHash.put(ceId, newStrat.getGeologicTimePeriodId());
            } else {
                String msg = String.format("No CE mapping for Old StratId %d, when they are a one-to-one.",
                        oldStratId);
                tblWriter.logError(msg);
                log.error(msg);
            }
        }
        stmt.close();

        if (hasFrame) {
            setProcess(counter);

        } else {
            log.info("Converted " + counter + " Stratigraphy records");
        }

        TreeHelper.fixFullnameForNodeAndDescendants(eraNode);
        eraNode.setNodeNumber(1);
        fixNodeNumbersFromRoot(eraNode);
        rs.close();

        HibernateUtil.commitTransaction();
        log.info("Converted " + counter + " Stratigraphy records");

    } catch (Exception ex) {
        ex.printStackTrace();
    }

    // Now in this Step we Add the PaleoContext to the Collecting Events

}