Example usage for java.sql DatabaseMetaData getPrimaryKeys

List of usage examples for java.sql DatabaseMetaData getPrimaryKeys

Introduction

In this page you can find the example usage for java.sql DatabaseMetaData getPrimaryKeys.

Prototype

ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException;

Source Link

Document

Retrieves a description of the given table's primary key columns.

Usage

From source file:org.apache.bigtop.itest.hive.TestJdbc.java

/**
 * Test simple DatabaseMetaData calls.  getColumns is tested elsewhere, as we need to call
 * that on a valid table.  Same with getFunctions.
 *
 * @throws SQLException/*from  w  w  w. j a  v  a2  s  .  c o  m*/
 */
@Test
public void databaseMetaDataCalls() throws SQLException {
    DatabaseMetaData md = conn.getMetaData();

    boolean boolrc = md.allTablesAreSelectable();
    LOG.debug("All tables are selectable? " + boolrc);

    String strrc = md.getCatalogSeparator();
    LOG.debug("Catalog separator " + strrc);

    strrc = md.getCatalogTerm();
    LOG.debug("Catalog term " + strrc);

    ResultSet rs = md.getCatalogs();
    while (rs.next()) {
        strrc = rs.getString(1);
        LOG.debug("Found catalog " + strrc);
    }

    Connection c = md.getConnection();

    int intrc = md.getDatabaseMajorVersion();
    LOG.debug("DB major version is " + intrc);

    intrc = md.getDatabaseMinorVersion();
    LOG.debug("DB minor version is " + intrc);

    strrc = md.getDatabaseProductName();
    LOG.debug("DB product name is " + strrc);

    strrc = md.getDatabaseProductVersion();
    LOG.debug("DB product version is " + strrc);

    intrc = md.getDefaultTransactionIsolation();
    LOG.debug("Default transaction isolation is " + intrc);

    intrc = md.getDriverMajorVersion();
    LOG.debug("Driver major version is " + intrc);

    intrc = md.getDriverMinorVersion();
    LOG.debug("Driver minor version is " + intrc);

    strrc = md.getDriverName();
    LOG.debug("Driver name is " + strrc);

    strrc = md.getDriverVersion();
    LOG.debug("Driver version is " + strrc);

    strrc = md.getExtraNameCharacters();
    LOG.debug("Extra name characters is " + strrc);

    strrc = md.getIdentifierQuoteString();
    LOG.debug("Identifier quote string is " + strrc);

    // In Hive 1.2 this always returns an empty RS
    rs = md.getImportedKeys("a", "b", "d");

    // In Hive 1.2 this always returns an empty RS
    rs = md.getIndexInfo("a", "b", "d", true, true);

    intrc = md.getJDBCMajorVersion();
    LOG.debug("JDBC major version is " + intrc);

    intrc = md.getJDBCMinorVersion();
    LOG.debug("JDBC minor version is " + intrc);

    intrc = md.getMaxColumnNameLength();
    LOG.debug("Maximum column name length is " + intrc);

    strrc = md.getNumericFunctions();
    LOG.debug("Numeric functions are " + strrc);

    // In Hive 1.2 this always returns an empty RS
    rs = md.getPrimaryKeys("a", "b", "d");

    // In Hive 1.2 this always returns an empty RS
    rs = md.getProcedureColumns("a", "b", "d", "e");

    strrc = md.getProcedureTerm();
    LOG.debug("Procedures are called " + strrc);

    // In Hive 1.2 this always returns an empty RS
    rs = md.getProcedures("a", "b", "d");

    strrc = md.getSchemaTerm();
    LOG.debug("Schemas are called " + strrc);

    rs = md.getSchemas();
    while (rs.next()) {
        strrc = rs.getString(1);
        LOG.debug("Found schema " + strrc);
    }

    strrc = md.getSearchStringEscape();
    LOG.debug("Search string escape is " + strrc);

    strrc = md.getStringFunctions();
    LOG.debug("String functions are " + strrc);

    strrc = md.getSystemFunctions();
    LOG.debug("System functions are " + strrc);

    rs = md.getTableTypes();
    while (rs.next()) {
        strrc = rs.getString(1);
        LOG.debug("Found table type " + strrc);
    }

    strrc = md.getTimeDateFunctions();
    LOG.debug("Time/date functions are " + strrc);

    rs = md.getTypeInfo();
    while (rs.next()) {
        strrc = rs.getString(1);
        LOG.debug("Found type " + strrc);
    }

    // In Hive 1.2 this always returns an empty RS
    rs = md.getUDTs("a", "b", "d", null);

    boolrc = md.supportsAlterTableWithAddColumn();
    LOG.debug("Supports alter table with add column? " + boolrc);

    boolrc = md.supportsAlterTableWithDropColumn();
    LOG.debug("Supports alter table with drop column? " + boolrc);

    boolrc = md.supportsBatchUpdates();
    LOG.debug("Supports batch updates? " + boolrc);

    boolrc = md.supportsCatalogsInDataManipulation();
    LOG.debug("Supports catalogs in data manipulation? " + boolrc);

    boolrc = md.supportsCatalogsInIndexDefinitions();
    LOG.debug("Supports catalogs in index definition? " + boolrc);

    boolrc = md.supportsCatalogsInPrivilegeDefinitions();
    LOG.debug("Supports catalogs in privilege definition? " + boolrc);

    boolrc = md.supportsCatalogsInProcedureCalls();
    LOG.debug("Supports catalogs in procedure calls? " + boolrc);

    boolrc = md.supportsCatalogsInTableDefinitions();
    LOG.debug("Supports catalogs in table definition? " + boolrc);

    boolrc = md.supportsColumnAliasing();
    LOG.debug("Supports column aliasing? " + boolrc);

    boolrc = md.supportsFullOuterJoins();
    LOG.debug("Supports full outer joins? " + boolrc);

    boolrc = md.supportsGroupBy();
    LOG.debug("Supports group by? " + boolrc);

    boolrc = md.supportsLimitedOuterJoins();
    LOG.debug("Supports limited outer joins? " + boolrc);

    boolrc = md.supportsMultipleResultSets();
    LOG.debug("Supports limited outer joins? " + boolrc);

    boolrc = md.supportsNonNullableColumns();
    LOG.debug("Supports non-nullable columns? " + boolrc);

    boolrc = md.supportsOuterJoins();
    LOG.debug("Supports outer joins? " + boolrc);

    boolrc = md.supportsPositionedDelete();
    LOG.debug("Supports positioned delete? " + boolrc);

    boolrc = md.supportsPositionedUpdate();
    LOG.debug("Supports positioned update? " + boolrc);

    boolrc = md.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT);
    LOG.debug("Supports result set holdability? " + boolrc);

    boolrc = md.supportsResultSetType(ResultSet.HOLD_CURSORS_OVER_COMMIT);
    LOG.debug("Supports result set type? " + boolrc);

    boolrc = md.supportsSavepoints();
    LOG.debug("Supports savepoints? " + boolrc);

    boolrc = md.supportsSchemasInDataManipulation();
    LOG.debug("Supports schemas in data manipulation? " + boolrc);

    boolrc = md.supportsSchemasInIndexDefinitions();
    LOG.debug("Supports schemas in index definitions? " + boolrc);

    boolrc = md.supportsSchemasInPrivilegeDefinitions();
    LOG.debug("Supports schemas in privilege definitions? " + boolrc);

    boolrc = md.supportsSchemasInProcedureCalls();
    LOG.debug("Supports schemas in procedure calls? " + boolrc);

    boolrc = md.supportsSchemasInTableDefinitions();
    LOG.debug("Supports schemas in table definitions? " + boolrc);

    boolrc = md.supportsSelectForUpdate();
    LOG.debug("Supports select for update? " + boolrc);

    boolrc = md.supportsStoredProcedures();
    LOG.debug("Supports stored procedures? " + boolrc);

    boolrc = md.supportsTransactions();
    LOG.debug("Supports transactions? " + boolrc);

    boolrc = md.supportsUnion();
    LOG.debug("Supports union? " + boolrc);

    boolrc = md.supportsUnionAll();
    LOG.debug("Supports union all? " + boolrc);

}

From source file:org.kuali.test.ui.components.sqlquerypanel.DatabasePanel.java

private void loadTableColumns(DatabaseConnection dbconn, DatabaseMetaData dmd, SqlQueryNode node)
        throws Exception {
    ResultSet res = null;/*from ww w .j  av  a  2s . c  o  m*/

    try {
        TableData td = (TableData) node.getUserObject();

        String colkey = (td.getSchema() + "." + td.getName());

        List<ColumnData> cols = tableColumnData.get(colkey);

        if (cols != null) {
            for (ColumnData cd : cols) {
                ColumnData cdata = (ColumnData) cd.clone();
                node.add(new SqlQueryNode(getMainframe().getConfiguration(), cdata));
                td.getColumns().add(cdata);
            }
        } else {
            res = dmd.getColumns(null, td.getSchema(), td.getName(), null);

            while (res.next()) {
                String cname = res.getString(4);
                int dataType = res.getInt(5);
                int width = res.getInt(7);
                int decimalDigits = res.getInt(9);

                String displayName = getColumnDisplayName(td.getName(), cname,
                        dbconn.getConfiguredTablesOnly());

                if (StringUtils.isNotBlank(displayName)) {
                    ColumnData cd = new ColumnData(td.getSchema(), cname, displayName);
                    cd.setDataType(dataType);
                    cd.setDecimalDigits(decimalDigits);
                    cd.setWidth(width);
                    td.getColumns().add(cd);
                }
            }

            HashMap<String, ColumnData> map = new HashMap<String, ColumnData>();
            for (ColumnData cd : td.getColumns()) {
                map.put(cd.getName(), cd);
            }

            res.close();
            try {
                res = dmd.getPrimaryKeys(null, td.getSchema(), td.getName());
                while (res.next()) {
                    String cname = res.getString(4);
                    int seq = res.getInt(5);

                    ColumnData cd = map.get(cname);

                    if (cd != null) {
                        cd.setPrimaryKeyIndex(seq);
                    }
                }
            }

            catch (SQLException ex) {
                LOG.warn("error obtaining primary keys for table " + td.getName());
            }

            Collections.sort(td.getColumns());

            for (ColumnData cd : td.getColumns()) {
                node.add(new SqlQueryNode(getMainframe().getConfiguration(), cd));
            }

            tableColumnData.put(colkey, td.getColumns());
        }
    }

    finally {
        Utils.closeDatabaseResources(null, null, res);
    }
}

From source file:org.alfresco.util.schemacomp.ExportDb.java

private void extractSchema(DatabaseMetaData dbmd, String schemaName, String prefixFilter)
        throws SQLException, IllegalArgumentException, IllegalAccessException {
    if (log.isDebugEnabled()) {
        log.debug("Retrieving tables: schemaName=[" + schemaName + "], prefixFilter=[" + prefixFilter + "]");
    }/*from  w w w . j a v a  2 s.  co  m*/

    final ResultSet tables = dbmd.getTables(null, schemaName, prefixFilter,
            new String[] { "TABLE", "VIEW", "SEQUENCE" });

    while (tables.next()) {
        final String tableName = tables.getString("TABLE_NAME");

        if (log.isDebugEnabled()) {
            log.debug("Examining table tableName=[" + tableName + "]");
        }

        // Oracle hack: ignore tables in the recycle bin
        // ALF-14129 fix, check whether schema already contains object with provided name
        if (tableName.startsWith("BIN$") || schema.containsByName(tableName)) {
            continue;
        }

        if (tables.getString("TABLE_TYPE").equals("SEQUENCE")) {
            Sequence sequence = new Sequence(tableName);
            schema.add(sequence);
            continue;
        }

        Table table = new Table(tableName);
        schema.add(table);

        // Table columns
        final ResultSet columns = dbmd.getColumns(null, tables.getString("TABLE_SCHEM"), tableName, "%");
        while (columns.next()) {
            String columnName = columns.getString("COLUMN_NAME");
            Column column = new Column(columnName);

            String dbType = columns.getString("TYPE_NAME");
            int colSize = columns.getInt("COLUMN_SIZE");
            int scale = columns.getInt("DECIMAL_DIGITS");
            int jdbcType = columns.getInt("DATA_TYPE");
            String type = generateType(dbType, colSize, scale, jdbcType);
            column.setType(type);

            String nullableString = columns.getString("IS_NULLABLE");
            column.setNullable(parseBoolean(nullableString));

            column.setOrder(columns.getInt("ORDINAL_POSITION"));

            try {
                String autoIncString = columns.getString("IS_AUTOINCREMENT");
                column.setAutoIncrement(parseBoolean(autoIncString));
            } catch (SQLException jtdsDoesNOtHAveIsUatoincrement) {
                column.setAutoIncrement((dbType.endsWith("identity")));
            }

            column.setParent(table);
            table.getColumns().add(column);
        }
        columns.close();

        // Primary key
        final ResultSet primarykeycols = dbmd.getPrimaryKeys(null, tables.getString("TABLE_SCHEM"), tableName);

        PrimaryKey pk = null;

        while (primarykeycols.next()) {
            if (pk == null) {
                String pkName = primarykeycols.getString("PK_NAME");
                pk = new PrimaryKey(pkName);
            }
            String columnName = primarykeycols.getString("COLUMN_NAME");
            pk.getColumnNames().add(columnName);

            int columnOrder = primarykeycols.getInt("KEY_SEQ");
            pk.getColumnOrders().add(columnOrder);
        }
        primarykeycols.close();

        // If this table has a primary key, add it. 
        if (pk != null) {
            pk.setParent(table);
            table.setPrimaryKey(pk);
        }

        // Indexes
        final ResultSet indexes = dbmd.getIndexInfo(null, tables.getString("TABLE_SCHEM"), tableName, false,
                true);
        String lastIndexName = "";

        Index index = null;

        while (indexes.next()) {
            final String indexName = indexes.getString("INDEX_NAME");
            if (indexName == null) {
                // Oracle seems to have some dummy index entries
                continue;
            }
            // Skip the index corresponding to the PK if it is mentioned
            else if (indexName.equals(table.getPrimaryKey().getName())) {
                continue;
            }

            if (!indexName.equals(lastIndexName)) {
                index = new Index(indexName);
                index.setUnique(!indexes.getBoolean("NON_UNIQUE"));
                index.setParent(table);
                table.getIndexes().add(index);
                lastIndexName = indexName;
            }
            if (index != null) {
                String columnName = indexes.getString("COLUMN_NAME");
                index.getColumnNames().add(columnName);
            }
        }
        indexes.close();

        final ResultSet foreignkeys = dbmd.getImportedKeys(null, tables.getString("TABLE_SCHEM"), tableName);
        String lastKeyName = "";

        ForeignKey fk = null;

        while (foreignkeys.next()) {
            final String keyName = foreignkeys.getString("FK_NAME");
            if (!keyName.equals(lastKeyName)) {
                fk = new ForeignKey(keyName);
                fk.setParent(table);
                table.getForeignKeys().add(fk);
                lastKeyName = keyName;
            }
            if (fk != null) {
                fk.setLocalColumn(foreignkeys.getString("FKCOLUMN_NAME"));
                fk.setTargetTable(foreignkeys.getString("PKTABLE_NAME"));
                fk.setTargetColumn(foreignkeys.getString("PKCOLUMN_NAME"));
            }
        }
        foreignkeys.close();
    }
    tables.close();
}

From source file:it.unibas.spicy.persistence.relational.DAORelational.java

public void loadInstance(int scenarioNo, AccessConfiguration accessConfiguration, IDataSourceProxy dataSource,
        DBFragmentDescription dataDescription, IConnectionFactory dataSourceDB, boolean source)
        throws DAOException, SQLException {
    Connection connection = dataSourceDB.getConnection(accessConfiguration);
    DatabaseMetaData databaseMetaData = null;
    String catalog = null;//from   w  w  w.  java  2 s . c  o  m
    String schemaName = accessConfiguration.getSchemaName();
    Connection connectionPostgres = null;
    this.dataDescription = dataDescription;

    AccessConfiguration accessConfigurationPostgres = new AccessConfiguration();
    accessConfigurationPostgres.setDriver(SpicyEngineConstants.ACCESS_CONFIGURATION_DRIVER);
    accessConfigurationPostgres
            .setUri(SpicyEngineConstants.ACCESS_CONFIGURATION_URI + SpicyEngineConstants.MAPPING_TASK_DB_NAME);
    accessConfigurationPostgres.setLogin(SpicyEngineConstants.ACCESS_CONFIGURATION_LOGIN);
    accessConfigurationPostgres.setPassword(SpicyEngineConstants.ACCESS_CONFIGURATION_PASS);
    connectionPostgres = dataSourceDB.getConnection(accessConfigurationPostgres);
    try {
        databaseMetaData = connection.getMetaData();
        catalog = connection.getCatalog();
        String[] tableTypes = new String[] { "TABLE" };
        ResultSet tableResultSet = databaseMetaData.getTables(catalog, schemaName, null, tableTypes);
        Statement statement = connection.createStatement();
        Statement statementPostgres = connectionPostgres.createStatement();
        while (tableResultSet.next()) {
            String tableName = tableResultSet.getString("TABLE_NAME");
            if (!this.dataDescription.checkLoadTable(tableName)) {
                continue;
            }
            String tablePath = tableName;
            if (!schemaName.equals("")) {
                tablePath = schemaName + ".\"" + tableName + "\"";
            }
            String newTablePath = tableName;
            if (source) {
                newTablePath = SpicyEngineConstants.SOURCE_SCHEMA_NAME + scenarioNo + ".\"" + tableName + "\"";
            } else {
                newTablePath = SpicyEngineConstants.TARGET_SCHEMA_NAME + scenarioNo + ".\"" + tableName + "\"";
            }
            ResultSet countResult = statement
                    .executeQuery("SELECT COUNT(*) AS instancesCount FROM " + tablePath + ";");
            int instancesCount = 1;
            while (countResult.next()) {
                instancesCount = countResult.getInt("instancesCount");
            }

            ResultSet pKList = null;
            pKList = databaseMetaData.getPrimaryKeys(null, null, tableName);
            //                ResultSet pKList = statement.executeQuery("SELECT c.column_name as keyname\n" + "FROM information_schema.key_column_usage AS c\n" +
            //                    "LEFT JOIN information_schema.table_constraints AS t\n" +
            //                    "ON t.constraint_name = c.constraint_name\n" +
            //                    "WHERE t.table_name = '" + tablePath + "' AND t.constraint_type = 'PRIMARY KEY';");
            String pKListString = "";
            while (pKList.next()) {
                pKListString += pKList.getString("COLUMN_NAME") + ",";
            }
            if (pKListString != "")
                pKListString = pKListString.substring(0, pKListString.length() - 1);

            int inCount = 0;
            String viewName = tableName + "_MIPMapView";
            String orderByClause = "";
            if (pKListString != "")
                orderByClause = " ORDER BY " + pKListString;
            statement.executeUpdate("DROP VIEW IF EXISTS " + viewName + ";");
            statement.executeUpdate(
                    "CREATE VIEW " + viewName + " AS SELECT * FROM " + tablePath + orderByClause + ";");
            for (int i = 0; i <= ((instancesCount - 1) / BATCH_SIZE); i++) {
                ResultSet instancesSet = statement.executeQuery("SELECT * FROM " + viewName + " LIMIT "
                        + BATCH_SIZE + " OFFSET " + (BATCH_SIZE * i) + ";");
                ResultSetMetaData rsmd = instancesSet.getMetaData();
                int columnsNumber = rsmd.getColumnCount();
                String sql_insert_stmnt = "";
                while (instancesSet.next()) {
                    String tmp_sql_insert_stmnt = "(";
                    for (int j = 1; j <= columnsNumber; j++) {
                        String columnValue = instancesSet.getString(j);
                        if (columnValue == null) {
                            tmp_sql_insert_stmnt += " null,";
                        } else {
                            if (isTextColumn(rsmd.getColumnTypeName(j))) {
                                tmp_sql_insert_stmnt += "'" + columnValue.replaceAll("'", "''") + "',";
                            } else {
                                tmp_sql_insert_stmnt += "" + columnValue + ",";
                            }
                        }

                    }
                    //take out the last ',' character           
                    tmp_sql_insert_stmnt = tmp_sql_insert_stmnt.substring(0, tmp_sql_insert_stmnt.length() - 1);
                    tmp_sql_insert_stmnt += "),";
                    //                        if (!inserted.contains(tmp_sql_insert_stmnt)) {
                    sql_insert_stmnt += tmp_sql_insert_stmnt;
                    //                            inserted.add(tmp_sql_insert_stmnt);
                    //                        }
                }
                if (!sql_insert_stmnt.equals("")) {
                    //take out the last ',' character           
                    sql_insert_stmnt = sql_insert_stmnt.substring(0, sql_insert_stmnt.length() - 1);
                    inCount += statementPostgres
                            .executeUpdate("insert into " + newTablePath + " values " + sql_insert_stmnt + ";");
                }
            }
            statement.executeUpdate("DROP VIEW IF EXISTS " + viewName + ";");
        }
        dataSource.addAnnotation(SpicyEngineConstants.LOADED_INSTANCES_FLAG, true);
    } finally {
        if (connection != null)
            dataSourceDB.close(connection);
        if (connectionPostgres != null)
            dataSourceDB.close(connectionPostgres);
    }
}

From source file:org.pentaho.aggdes.model.mondrian.validate.HierarchyPkValidator.java

public List<ValidationMessage> validateCube(Schema schema, Cube cube, Connection conn) {

    List<ValidationMessage> messages = new ArrayList<ValidationMessage>();

    Map<String, Boolean> checkedRelations = new HashMap<String, Boolean>();

    // ~ Get DatabaseMetaData ==========================================================================================
    DatabaseMetaData meta = null;
    try {/*from w w  w.ja  va 2 s. c o m*/
        meta = conn.getMetaData();
    } catch (SQLException e) {
        if (logger.isErrorEnabled()) {
            logger.error("an exception occurred", e); //$NON-NLS-1$
        }
        return fatal(e, messages);
    }

    if (logger.isDebugEnabled()) {
        logger.debug("processing cube \"" + cube.name + "\""); //$NON-NLS-1$ //$NON-NLS-2$
    }

    // TODO: include validation support for mondrian views
    if (!(cube.fact instanceof Table)) {
        if (logger.isDebugEnabled()) {
            logger.debug("cube \"" + cube.name + "\" contains unsupported fact type, " + cube.fact); //$NON-NLS-1$ //$NON-NLS-2$
        }
        return messages;
    }

    String schemaName = ((Table) cube.fact).schema;

    // ~ Check: Primary key on hierarchies==============================================================================
    for (CubeDimension dim : cube.dimensions) {
        for (Hierarchy hierarchy : dim.getDimension(schema).hierarchies) {
            // if primaryKey then use that; otherwise use key at lowest level
            String primaryKey;
            if (null != hierarchy.primaryKey) {
                primaryKey = hierarchy.primaryKey;
            } else {
                if (logger.isDebugEnabled()) {
                    logger.debug("skipping primary key check as hierarchy table and fact table are the same");
                }
                // table is cube table--already checked its primary key
                break;
            }

            // if primaryKeyTable then use that; elseif use nested table element; else use fact table
            String primaryKeyTable;
            if (null != hierarchy.primaryKeyTable) {
                primaryKeyTable = hierarchy.primaryKeyTable;
            } else if (null != hierarchy.relation) {
                // TODO mlowery again assuming a table; seems bad
                primaryKeyTable = ((Table) hierarchy.relation).name;
            } else {
                primaryKeyTable = ((Table) cube.fact).name;
            }

            if (checkedRelations.containsKey(primaryKeyTable)) {
                if (logger.isDebugEnabled()) {
                    logger.debug("already checked that primary key exists on relation \"" //$NON-NLS-1$
                            + (null == schemaName ? "" : schemaName + ".") //$NON-NLS-1$ //$NON-NLS-2$
                            + primaryKeyTable + "\"; skipping"); //$NON-NLS-1$
                }
                continue;
            } else {
                if (logger.isDebugEnabled()) {
                    logger.debug("checking that primary key exists on relation \"" //$NON-NLS-1$
                            + (null == schemaName ? "" : schemaName + ".") //$NON-NLS-1$ //$NON-NLS-2$
                            + primaryKeyTable + "\""); //$NON-NLS-1$
                }
            }

            ResultSet rs2 = null;
            try {
                rs2 = meta.getPrimaryKeys(null, schemaName, primaryKeyTable);
            } catch (SQLException e) {
                if (logger.isErrorEnabled()) {
                    logger.error("an exception occurred", e); //$NON-NLS-1$
                }
                return fatal(e, messages);
            }
            boolean pkHierarchyFound = false;
            try {
                while (rs2.next()) {
                    pkHierarchyFound = true;
                    break;
                }
            } catch (SQLException e) {
                if (logger.isErrorEnabled()) {
                    logger.error("an exception occurred", e); //$NON-NLS-1$
                }
                return fatal(e, messages);
            }
            if (!pkHierarchyFound) {
                append(messages, ERROR, "ERROR_HIERARCHY_TABLE_PK_CHECK", primaryKeyTable); //$NON-NLS-1$
            } else {
                append(messages, OK, "OK_HIERARCHY_TABLE_PK_CHECK", primaryKeyTable); //$NON-NLS-1$
            }
            checkedRelations.put(primaryKeyTable, true);
        }
    }

    return messages;
}

From source file:com.netspective.axiom.policy.AnsiDatabasePolicy.java

public void reverseEngineer(Writer writer, Connection conn, String catalog, String schemaPattern)
        throws IOException, SQLException {
    Map dataTypesMap = prepareJdbcTypeInfoMap();
    DatabaseMetaData dbmd = conn.getMetaData();
    TextUtils textUtils = TextUtils.getInstance();

    writer.write("<?xml version=\"1.0\"?>\n\n");
    writer.write("<!-- Reverse engineered by Axiom\n");
    writer.write("     driver: " + dbmd.getDriverName() + "\n");
    writer.write("     driver-version: " + dbmd.getDriverVersion() + "\n");
    writer.write("     product: " + dbmd.getDatabaseProductName() + "\n");
    writer.write("     product-version: " + dbmd.getDatabaseProductVersion() + "\n");

    writer.write("     available catalogs:");
    ResultSet rs = null;//  ww  w . ja  v a2s. com
    try {
        rs = dbmd.getCatalogs();
        while (rs.next()) {
            writer.write(" " + rs.getObject(1).toString());
        }
    } finally {
        if (rs != null)
            rs.close();
    }

    writer.write("\n");

    writer.write("     available schemas:");
    try {
        rs = dbmd.getSchemas();
        while (rs.next()) {
            writer.write(" " + rs.getObject(1).toString());
        }
    } finally {
        if (rs != null)
            rs.close();
    }
    writer.write("\n");
    writer.write("-->\n\n");

    writer.write("<component xmlns:xdm=\"http://www.netspective.org/Framework/Commons/XMLDataModel\">\n");
    writer.write("    <xdm:include resource=\"com/netspective/axiom/conf/axiom.xml\"/>\n");
    writer.write("    <schema name=\"" + catalog + "." + schemaPattern + "\">\n");

    Map dbmdTypeInfoByName = new HashMap();
    Map dbmdTypeInfoByJdbcType = new HashMap();
    ResultSet typesRS = null;
    try {
        typesRS = dbmd.getTypeInfo();
        while (typesRS.next()) {
            int colCount = typesRS.getMetaData().getColumnCount();
            Object[] typeInfo = new Object[colCount];
            for (int i = 1; i <= colCount; i++)
                typeInfo[i - 1] = typesRS.getObject(i);
            dbmdTypeInfoByName.put(typesRS.getString(1), typeInfo);
            dbmdTypeInfoByJdbcType.put(new Integer(typesRS.getInt(2)), typeInfo);
        }
    } finally {
        if (typesRS != null)
            typesRS.close();
    }

    ResultSet tables = null;
    try {
        tables = dbmd.getTables(catalog, schemaPattern, null, new String[] { "TABLE" });
        while (tables.next()) {
            String tableNameOrig = tables.getString(3);
            String tableName = textUtils.fixupTableNameCase(tableNameOrig);

            writer.write("        <table name=\"" + tableName + "\">\n");

            Map primaryKeys = new HashMap();
            ResultSet pkRS = null;
            try {
                pkRS = dbmd.getPrimaryKeys(null, null, tableNameOrig);
                while (pkRS.next()) {
                    primaryKeys.put(pkRS.getString(4), pkRS.getString(5));
                }

            } catch (Exception e) {
                // driver may not support this function
            } finally {
                if (pkRS != null)
                    pkRS.close();
            }

            Map fKeys = new HashMap();
            ResultSet fkRS = null;
            try {
                fkRS = dbmd.getImportedKeys(null, null, tableNameOrig);
                while (fkRS.next()) {
                    fKeys.put(fkRS.getString(8), textUtils.fixupTableNameCase(fkRS.getString(3)) + "."
                            + fkRS.getString(4).toLowerCase());
                }
            } catch (Exception e) {
                // driver may not support this function
            } finally {
                if (fkRS != null)
                    fkRS.close();
            }

            // we keep track of processed columns so we don't duplicate them in the XML
            Set processedColsMap = new HashSet();
            ResultSet columns = null;
            try {
                columns = dbmd.getColumns(null, null, tableNameOrig, null);
                while (columns.next()) {
                    String columnNameOrig = columns.getString(4);
                    if (processedColsMap.contains(columnNameOrig))
                        continue;
                    processedColsMap.add(columnNameOrig);

                    String columnName = columnNameOrig.toLowerCase();

                    writer.write("            <column name=\"" + columnName + "\"");
                    try {
                        if (fKeys.containsKey(columnNameOrig))
                            writer.write(" lookup-ref=\"" + fKeys.get(columnNameOrig) + "\"");
                        else {
                            short jdbcType = columns.getShort(5);
                            String dataType = (String) dataTypesMap.get(new Integer(jdbcType));
                            if (dataType == null)
                                dataType = Short.toString(jdbcType);
                            writer.write(" type=\"" + dataType + "\"");
                        }

                        if (primaryKeys.containsKey(columnNameOrig))
                            writer.write(" primary-key=\"yes\"");

                        if (columns.getString(18).equals("NO"))
                            writer.write(" required=\"yes\"");

                        String defaultValue = columns.getString(13);
                        if (defaultValue != null)
                            writer.write(" default=\"" + defaultValue + "\"");

                        String remarks = columns.getString(12);
                        if (remarks != null)
                            writer.write(" descr=\"" + remarks + "\"");

                    } catch (Exception e) {
                    }

                    writer.write("/>\n");
                }
            } finally {
                if (columns != null)
                    columns.close();
            }

            writer.write("        </table>\n");
        }
    } finally {
        tables.close();
    }

    writer.write("    </schema>\n");
    writer.write("</component>");
}

From source file:org.lockss.db.DbMigrator.java

/**
 * Extracts the metadata of the tables of a database schema.
 * /*from  w  ww  .  j  ava 2 s .c  o m*/
 * @param conn
 *          A Connection with the database connection to be used.
 * @param schema
 *          A String with the database schema.
 * 
 * @return a Map<String, DbTable> with the metadata of the tables.
 * @throws DbMigratorException
 *           if there are problems extracting the metadata.
 */
private Map<String, DbTable> extractDbMetadata(Connection conn, String schema) throws DbMigratorException {
    final String DEBUG_HEADER = "populateDbMetadata(): ";
    if (log.isDebug2())
        log.debug2(DEBUG_HEADER + "schema = " + schema);

    if (conn == null) {
        throw new DbMigratorException("Null connection");
    }

    Map<String, DbTable> tableMap = new HashMap<String, DbTable>();

    ResultSet tableResultSet = null;
    String tableName = null;
    ResultSet columnResultSet = null;
    ResultSet pkResultSet = null;
    ResultSet fkResultSet = null;

    try {
        DatabaseMetaData metadata = DbManagerSql.getMetadata(conn);

        // Get the database schema table data.
        tableResultSet = DbManagerSql.getStandardTables(conn, null, schema, null);

        // Loop through all the schema tables.
        while (tableResultSet.next()) {
            tableName = tableResultSet.getString("TABLE_NAME");
            log.debug2(DEBUG_HEADER + "TABLE_NAME = " + tableName);

            String tableType = tableResultSet.getString("TABLE_TYPE");
            log.debug2(DEBUG_HEADER + "TABLE_TYPE = " + tableType);
            log.debug2(DEBUG_HEADER + "");

            // Check that this is not a view, etc.
            if ("TABLE".equals(tableType)) {
                // Yes: Get the table column metadata.
                DbTable table = new DbTable(tableName.toLowerCase());
                DbRow row = new DbRow(tableName.toLowerCase());
                table.setRow(row);
                List<DbColumn> columns = row.getColumns();
                columnResultSet = metadata.getColumns(null, schema, tableName, null);

                // Loop through each table column.
                while (columnResultSet.next()) {
                    String columnName = columnResultSet.getString("COLUMN_NAME").toLowerCase();
                    log.debug2(DEBUG_HEADER + "columnName = '" + columnName + "'.");

                    int columnType = columnResultSet.getInt("DATA_TYPE");
                    log.debug2(DEBUG_HEADER + "columnType = '" + columnType + "'.");

                    int position = columnResultSet.getInt("ORDINAL_POSITION");
                    log.debug2(DEBUG_HEADER + "position = '" + position + "'.");

                    DbColumn column = new DbColumn(columnName, columnType, position);
                    columns.add(column);
                }

                // Remember any primary key the table may have.
                pkResultSet = metadata.getPrimaryKeys(null, schema, tableName);

                if (pkResultSet.next()) {
                    String pkColumnName = pkResultSet.getString("COLUMN_NAME").toLowerCase();
                    log.debug2(DEBUG_HEADER + "pkColumnName = '" + pkColumnName + "'.");

                    for (DbColumn column : columns) {
                        if (pkColumnName.equals(column.getName())) {
                            column.setPk(true);
                            break;
                        }
                    }
                }

                // Remember any foreign keys the table may have.
                fkResultSet = metadata.getImportedKeys(null, schema, tableName);

                while (fkResultSet.next()) {
                    String fkColumnName = fkResultSet.getString("FKCOLUMN_NAME").toLowerCase();
                    log.debug2(DEBUG_HEADER + "fkColumnName = '" + fkColumnName + "'.");

                    String fkTableName = fkResultSet.getString("PKTABLE_NAME").toLowerCase();
                    log.debug2(DEBUG_HEADER + "fkTableName = '" + fkTableName + "'.");

                    for (DbColumn column : columns) {
                        if (fkColumnName.equals(column.getName())) {
                            column.setFkTable(fkTableName);
                            break;
                        }
                    }
                }

                // Sort the columns by their ordinal position.
                Collections.sort(columns);

                if (log.isDebug3()) {
                    for (DbColumn column : columns) {
                        log.debug3(DEBUG_HEADER + "column = '" + column + "'.");
                    }
                }

                // Add  the table to the result.
                tableMap.put(tableName.toLowerCase(), table);
            }
        }
    } catch (SQLException sqle) {
        String message = "Cannot populate DB metadata.";
        log.error(message);
        log.error("TABLE_NAME = " + tableName);
        throw new DbMigratorException(message, sqle);
    } catch (RuntimeException re) {
        String message = "Cannot populate DB metadata.";
        log.error(message);
        log.error("TABLE_NAME = " + tableName);
        throw new DbMigratorException(message, re);
    } finally {
        DbManagerSql.safeCloseResultSet(fkResultSet);
        DbManagerSql.safeCloseResultSet(pkResultSet);
        DbManagerSql.safeCloseResultSet(columnResultSet);
        DbManagerSql.safeCloseResultSet(tableResultSet);

        try {
            DbManagerSql.rollback(conn, log);
        } catch (SQLException sqle) {
            throw new DbMigratorException(sqle);
        } catch (RuntimeException re) {
            throw new DbMigratorException(re);
        }
    }

    if (log.isDebug2())
        log.debug2(DEBUG_HEADER + "tableMap.size() = " + tableMap.size());
    return tableMap;
}

From source file:org.apache.openjpa.jdbc.sql.DBDictionary.java

/**
 * Reflect on the schema to find primary keys for the given table pattern.
 *///from   ww  w  . java 2 s.c om
protected PrimaryKey[] getPrimaryKeysFromGetPrimaryKeys(DatabaseMetaData meta, DBIdentifier catalog,
        DBIdentifier schemaName, DBIdentifier tableName, Connection conn) throws SQLException {
    if (tableName == null && !supportsNullTableForGetPrimaryKeys)
        return null;

    beforeMetadataOperation(conn);
    ResultSet pks = null;
    try {
        pks = meta.getPrimaryKeys(getCatalogNameForMetadata(catalog), getSchemaNameForMetadata(schemaName),
                getTableNameForMetadata(tableName));

        List pkList = new ArrayList();
        while (pks != null && pks.next())
            pkList.add(newPrimaryKey(pks));
        return (PrimaryKey[]) pkList.toArray(new PrimaryKey[pkList.size()]);
    } finally {
        if (pks != null)
            try {
                pks.close();
            } catch (Exception e) {
            }
    }
}

From source file:org.kawanfw.test.api.client.DatabaseMetaDataTest.java

public void test(Connection connection) throws Exception {
    MessageDisplayer.initClassDisplay(this.getClass().getSimpleName());

    DatabaseMetaData databaseMetaData = connection.getMetaData();

    // Test that getMetaData() will return value from cache
    databaseMetaData = connection.getMetaData();

    if (connection instanceof RemoteConnection) {
        MessageDisplayer.display("Java Version : " + System.getProperty("java.version"));
        MessageDisplayer.display("AceQL Version: " + ((RemoteConnection) connection).getVersion());
        MessageDisplayer.display("AceQL Url    : " + ((RemoteConnection) connection).getUrl());
        MessageDisplayer.display("");
    }//from   ww  w .  java  2s  .c o  m

    if (connection instanceof RemoteConnection) {
        MessageDisplayer.display("((RemoteConnection)connection).clone();");
        Connection connection2 = ((RemoteConnection) connection).clone();
        @SuppressWarnings("unused")
        DatabaseMetaData databaseMetaData2 = connection2.getMetaData();
        connection2.close();
    }

    MessageDisplayer.display("General info (no Assert done):");

    MessageDisplayer.display("connection.getCatalog()                     : " + connection.getCatalog());

    MessageDisplayer.display(
            "databaseMetaData.getDatabaseProductName()   : " + databaseMetaData.getDatabaseProductName());
    MessageDisplayer.display(
            "databaseMetaData.getDatabaseProductVersion(): " + databaseMetaData.getDatabaseProductVersion());
    MessageDisplayer.display(
            "databaseMetaData.getDatabaseMajorVersion()  : " + databaseMetaData.getDatabaseMajorVersion());
    MessageDisplayer.display(
            "databaseMetaData.getDatabaseMinorVersion()  : " + databaseMetaData.getDatabaseMinorVersion());
    MessageDisplayer.display(
            "databaseMetaData.allProceduresAreCallable() : " + databaseMetaData.allProceduresAreCallable());
    // SystemOutHandle.display(DatabaseMetaData.bestRowSession);
    MessageDisplayer.display("");

    // SystemOutHandle.display(databaseMetaData.autoCommitFailureClosesAllResultSets());

    MessageDisplayer.display("databaseMetaData.getCatalogTerm(): " + databaseMetaData.getCatalogTerm());

    try {

        MessageDisplayer.display(
                "databaseMetaData.supportsStoredProcedures(): " + databaseMetaData.supportsStoredProcedures());

        MessageDisplayer.display("databaseMetaData.supportsStoredFunctionsUsingCallSyntax(): "
                + databaseMetaData.supportsStoredFunctionsUsingCallSyntax());

    } catch (Throwable e) {
        MessageDisplayer.display(e.toString());
    }

    MessageDisplayer.display("connection.getAutoCommit(): " + connection.getAutoCommit());

    MessageDisplayer.display("databaseMetaData.getDefaultTransactionIsolation()    : "
            + databaseMetaData.getDefaultTransactionIsolation());

    MessageDisplayer
            .display("databaseMetaData.supportsTransactionIsolationLevel(TRANSACTION_READ_UNCOMMITTED): "
                    + databaseMetaData
                            .supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_UNCOMMITTED));

    MessageDisplayer.display("databaseMetaData.supportsTransactionIsolationLevel(TRANSACTION_READ_COMMITTED): "
            + databaseMetaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_COMMITTED));

    MessageDisplayer.display("databaseMetaData.supportsTransactionIsolationLevel(TRANSACTION_REPEATABLE_READ): "
            + databaseMetaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_REPEATABLE_READ));

    MessageDisplayer.display("databaseMetaData.supportsTransactionIsolationLevel(TRANSACTION_SERIALIZABLE): "
            + databaseMetaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_SERIALIZABLE));

    MessageDisplayer
            .display("databaseMetaData.supportsBatchUpdates()    : " + databaseMetaData.supportsBatchUpdates());
    MessageDisplayer
            .display("databaseMetaData.supportsSavepoints()      : " + databaseMetaData.supportsSavepoints());
    MessageDisplayer.display(
            "databaseMetaData.supportsGetGeneratedKeys(): " + databaseMetaData.supportsGetGeneratedKeys());

    if (!new SqlUtil(connection).isTeradata() && !new SqlUtil(connection).isInformix()) {
        Assert.assertEquals(true,
                databaseMetaData.supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_COMMITTED));
    }

    Assert.assertEquals("databaseMetaData.supportsBatchUpdates()", true,
            databaseMetaData.supportsBatchUpdates());

    if (!new SqlUtil(connection).isSQLAnywhere() && !new SqlUtil(connection).isAccess()) {
        Assert.assertEquals("databaseMetaData.supportsGetGeneratedKeys()", true,
                databaseMetaData.supportsGetGeneratedKeys());
    }
    // Informix does not support savepoints
    SqlUtil sqlUtil = new SqlUtil(connection);
    if (!sqlUtil.isInformix() && !sqlUtil.isTeradata() && !new SqlUtil(connection).isAccess()) {
        Assert.assertEquals(true, databaseMetaData.supportsSavepoints());
    }

    MessageDisplayer.display("");

    String catalog = null;
    String schema = null;
    String table = "customer";

    // Table name must be uppercase for Oracle & DB2, lowercase for MySQL
    // and PostgreSQL
    if (new SqlUtil(connection).isOracle() || new SqlUtil(connection).isHSQLDB()
            || new SqlUtil(connection).isDB2()) {
        table = table.toUpperCase();
    }

    ResultSet rs = null;

    if (!new SqlUtil(connection).isAccess()) {

        rs = databaseMetaData.getPrimaryKeys(catalog, schema, table);

        printResultSet(rs);

        boolean rsNext = false;

        while (rs.next()) {
            rsNext = true;
            String keyColumnName = rs.getString("COLUMN_NAME");
            MessageDisplayer.display("Primary Key is: " + keyColumnName + " for Table: " + table);
            Assert.assertEquals("customer_id", keyColumnName.toLowerCase());
        }

        if (!new SqlUtil(connection).isH2()) {
            Assert.assertEquals(true, rsNext);
        }

        rs.close();
    }

    // boolean returnNow = true;
    // if (returnNow) return;

    String[] types = { "TABLE", "VIEW" };
    rs = databaseMetaData.getTables(null, null, null, types);

    Set<String> tablesSet = new HashSet<String>();

    Set<String> ourTables = new HashSet<String>();
    ourTables.add("banned_usernames");
    ourTables.add("customer");
    ourTables.add("customer_auto");
    ourTables.add("orderlog");
    ourTables.add("user_login");

    MessageDisplayer.display("");
    while (rs.next()) {
        table = rs.getString("TABLE_NAME");

        if (ourTables.contains(table.toLowerCase())) {
            MessageDisplayer.display("Table: " + table);
        }

        tablesSet.add(table.toLowerCase());
    }

    // printResultSet(rs);

    testTable("banned_usernames", tablesSet);
    testTable("customer", tablesSet);
    testTable("orderlog", tablesSet);
    testTable("user_login", tablesSet);

    rs.close();
}

From source file:com.amazon.carbonado.repo.jdbc.JDBCStorableIntrospector.java

/**
 * Uses the given database connection to query database metadata. This is
 * used to bind storables to tables, and properties to columns. Other
 * checks are performed to ensure that storable type matches well with the
 * definition in the database./*from  ww w. ja  v  a2 s .  c  o  m*/
 */
private static <S extends Storable> JDBCStorableInfo<S> examine(StorableInfo<S> mainInfo, Connection con,
        final String searchCatalog, final String searchSchema, SchemaResolver resolver,
        boolean primaryKeyCheckDisabled) throws SQLException, SupportException {
    final DatabaseMetaData meta = con.getMetaData();

    final String databaseProductName = meta.getDatabaseProductName();
    final String userName = meta.getUserName();

    String[] tableAliases;
    if (mainInfo.getAliasCount() > 0) {
        tableAliases = mainInfo.getAliases();
    } else {
        String name = mainInfo.getStorableType().getSimpleName();
        tableAliases = generateAliases(name);
    }

    // Try to find matching table from aliases.
    String catalog = null, schema = null, tableName = null, tableType = null;
    findName: {
        // The call to getTables may return several matching tables. This
        // map defines the "best" table type we'd like to use. The higher
        // the number the better.
        Map<String, Integer> fitnessMap = new HashMap<String, Integer>();
        fitnessMap.put("LOCAL TEMPORARY", 1);
        fitnessMap.put("GLOBAL TEMPORARY", 2);
        fitnessMap.put("VIEW", 3);
        fitnessMap.put("SYSTEM TABLE", 4);
        fitnessMap.put("TABLE", 5);
        fitnessMap.put("ALIAS", 6);
        fitnessMap.put("SYNONYM", 7);

        for (int i = 0; i < tableAliases.length; i++) {
            ResultSet rs = meta.getTables(searchCatalog, searchSchema, tableAliases[i], null);
            try {
                int bestFitness = 0;
                while (rs.next()) {
                    String type = rs.getString("TABLE_TYPE");
                    Integer fitness = fitnessMap.get(type);
                    if (fitness != null) {
                        String rsSchema = rs.getString("TABLE_SCHEM");

                        if (searchSchema == null) {
                            if (userName != null && userName.equalsIgnoreCase(rsSchema)) {
                                // Favor entities whose schema name matches
                                // the user name.
                                fitness += 7;
                            }
                        }

                        if (fitness > bestFitness) {
                            bestFitness = fitness;
                            catalog = rs.getString("TABLE_CAT");
                            schema = rsSchema;
                            tableName = rs.getString("TABLE_NAME");
                            tableType = type;
                        }
                    }
                }
            } finally {
                rs.close();
            }

            if (tableName != null) {
                // Found a match, so stop checking aliases.
                break;
            }
        }
    }

    if (tableName == null && !mainInfo.isIndependent()) {
        StringBuilder buf = new StringBuilder();
        buf.append("Unable to find matching table name for type \"");
        buf.append(mainInfo.getStorableType().getName());
        buf.append("\" by looking for ");
        appendToSentence(buf, tableAliases);
        buf.append(" with catalog " + searchCatalog + " and schema " + searchSchema);
        throw new MismatchException(buf.toString());
    }

    String qualifiedTableName = tableName;
    String resolvedTableName = tableName;

    // Oracle specific stuff...
    // TODO: Migrate this to OracleSupportStrategy.
    if (tableName != null && databaseProductName.toUpperCase().contains("ORACLE")) {
        if ("TABLE".equals(tableType) && searchSchema != null) {
            // Qualified table name references the schema. Used by SQL statements.
            qualifiedTableName = searchSchema + '.' + tableName;
        } else if ("SYNONYM".equals(tableType)) {
            // Try to get the real schema. This call is Oracle specific, however.
            String select = "SELECT TABLE_OWNER,TABLE_NAME " + "FROM ALL_SYNONYMS "
                    + "WHERE OWNER=? AND SYNONYM_NAME=?";
            PreparedStatement ps = con.prepareStatement(select);
            ps.setString(1, schema); // in Oracle, schema is the owner
            ps.setString(2, tableName);
            try {
                ResultSet rs = ps.executeQuery();
                try {
                    if (rs.next()) {
                        schema = rs.getString("TABLE_OWNER");
                        resolvedTableName = rs.getString("TABLE_NAME");
                    }
                } finally {
                    rs.close();
                }
            } finally {
                ps.close();
            }
        }
    }

    // Gather information on all columns such that metadata only needs to
    // be retrieved once.
    Map<String, ColumnInfo> columnMap = new TreeMap<String, ColumnInfo>(String.CASE_INSENSITIVE_ORDER);

    if (resolvedTableName != null) {
        ResultSet rs = meta.getColumns(catalog, schema, resolvedTableName, null);
        rs.setFetchSize(1000);
        try {
            while (rs.next()) {
                ColumnInfo info = new ColumnInfo(rs);
                columnMap.put(info.columnName, info);
            }
        } finally {
            rs.close();
        }
    }

    // Make sure that all properties have a corresponding column.
    Map<String, ? extends StorableProperty<S>> mainProperties = mainInfo.getAllProperties();
    Map<String, String> columnToProperty = new HashMap<String, String>();
    Map<String, JDBCStorableProperty<S>> jProperties = new LinkedHashMap<String, JDBCStorableProperty<S>>(
            mainProperties.size());

    ArrayList<String> errorMessages = new ArrayList<String>();

    for (StorableProperty<S> mainProperty : mainProperties.values()) {
        if (mainProperty.isDerived() || mainProperty.isJoin() || tableName == null) {
            jProperties.put(mainProperty.getName(), new JProperty<S>(mainProperty, primaryKeyCheckDisabled));
            continue;
        }

        String[] columnAliases;
        if (mainProperty.getAliasCount() > 0) {
            columnAliases = mainProperty.getAliases();
        } else {
            columnAliases = generateAliases(mainProperty.getName());
        }

        JDBCStorableProperty<S> jProperty = null;
        boolean addedError = false;

        findName: for (int i = 0; i < columnAliases.length; i++) {
            ColumnInfo columnInfo = columnMap.get(columnAliases[i]);
            if (columnInfo != null) {
                AccessInfo accessInfo = getAccessInfo(mainProperty, columnInfo.dataType,
                        columnInfo.dataTypeName, columnInfo.columnSize, columnInfo.decimalDigits);

                if (accessInfo == null) {
                    TypeDesc propertyType = TypeDesc.forClass(mainProperty.getType());
                    String message = "Property \"" + mainProperty.getName() + "\" has type \""
                            + propertyType.getFullName() + "\" which is incompatible with database type \""
                            + columnInfo.dataTypeName + '"';

                    if (columnInfo.decimalDigits > 0) {
                        message += " (decimal digits = " + columnInfo.decimalDigits + ')';
                    }

                    errorMessages.add(message);
                    addedError = true;
                    break findName;
                }

                if (columnInfo.nullable) {
                    if (!mainProperty.isNullable() && !mainProperty.isIndependent()) {
                        errorMessages.add(
                                "Property \"" + mainProperty.getName() + "\" must have a Nullable annotation");
                    }
                } else {
                    if (mainProperty.isNullable() && !mainProperty.isIndependent()) {
                        errorMessages.add("Property \"" + mainProperty.getName()
                                + "\" must not have a Nullable annotation");
                    }
                }

                boolean autoIncrement = mainProperty.isAutomatic();
                if (autoIncrement) {
                    // Need to execute a little query to check if column is
                    // auto-increment or not. This information is not available in
                    // the regular database metadata prior to jdk1.6.

                    PreparedStatement ps = con.prepareStatement(
                            "SELECT " + columnInfo.columnName + " FROM " + tableName + " WHERE 1=0");

                    try {
                        ResultSet rs = ps.executeQuery();
                        try {
                            autoIncrement = rs.getMetaData().isAutoIncrement(1);
                        } finally {
                            rs.close();
                        }
                    } finally {
                        ps.close();
                    }
                }

                jProperty = new JProperty<S>(mainProperty, columnInfo, autoIncrement, primaryKeyCheckDisabled,
                        accessInfo.mResultSetGet, accessInfo.mPreparedStatementSet, accessInfo.getAdapter());

                break findName;
            }
        }

        if (jProperty != null) {
            jProperties.put(mainProperty.getName(), jProperty);
            columnToProperty.put(jProperty.getColumnName(), jProperty.getName());
        } else {
            if (mainProperty.isIndependent()) {
                jProperties.put(mainProperty.getName(),
                        new JProperty<S>(mainProperty, primaryKeyCheckDisabled));
            } else if (!addedError) {
                StringBuilder buf = new StringBuilder();
                buf.append("Unable to find matching database column for property \"");
                buf.append(mainProperty.getName());
                buf.append("\" by looking for ");
                appendToSentence(buf, columnAliases);
                errorMessages.add(buf.toString());
            }
        }
    }

    if (errorMessages.size() > 0) {
        throw new MismatchException(mainInfo.getStorableType(), errorMessages);
    }

    // Now verify that primary or alternate keys match.

    if (resolvedTableName != null)
        checkPrimaryKey: {
            ResultSet rs;
            try {
                rs = meta.getPrimaryKeys(catalog, schema, resolvedTableName);
            } catch (SQLException e) {
                getLog().info("Unable to get primary keys for table \"" + resolvedTableName + "\" with catalog "
                        + catalog + " and schema " + schema + ": " + e);
                break checkPrimaryKey;
            }

            List<String> pkProps = new ArrayList<String>();

            try {
                while (rs.next()) {
                    String columnName = rs.getString("COLUMN_NAME");
                    String propertyName = columnToProperty.get(columnName);

                    if (propertyName == null) {
                        errorMessages
                                .add("Column \"" + columnName + "\" must be part of primary or alternate key");
                        continue;
                    }

                    pkProps.add(propertyName);
                }
            } finally {
                rs.close();
            }

            if (errorMessages.size() > 0) {
                // Skip any extra checks.
                break checkPrimaryKey;
            }

            if (pkProps.size() == 0) {
                // If no primary keys are reported, don't even bother checking.
                // There's no consistent way to get primary keys, and entities
                // like views and synonyms don't usually report primary keys.
                // A primary key might even be logically defined as a unique
                // constraint.
                break checkPrimaryKey;
            }

            if (matchesKey(pkProps, mainInfo.getPrimaryKey())) {
                // Good. Primary key in database is same as in Storable.
                break checkPrimaryKey;
            }

            // Check if Storable has an alternate key which matches the
            // database's primary key.
            boolean foundAnyAltKey = false;
            for (StorableKey<S> altKey : mainInfo.getAlternateKeys()) {
                if (matchesKey(pkProps, altKey)) {
                    // Okay. Primary key in database matches a Storable
                    // alternate key.
                    foundAnyAltKey = true;

                    // Also check that declared primary key is a strict subset
                    // of the alternate key. If not, keep checking alt keys.

                    if (matchesSubKey(pkProps, mainInfo.getPrimaryKey())) {
                        break checkPrimaryKey;
                    }
                }
            }

            if (foundAnyAltKey) {
                errorMessages.add("Actual primary key matches a declared alternate key, "
                        + "but declared primary key must be a strict subset. "
                        + mainInfo.getPrimaryKey().getProperties() + " is not a subset of " + pkProps);
            } else {
                errorMessages.add("Actual primary key does not match any "
                        + "declared primary or alternate key: " + pkProps);
            }
        }

    if (errorMessages.size() > 0) {
        if (primaryKeyCheckDisabled) {
            for (String errorMessage : errorMessages) {
                getLog().warn("Suppressed error: " + errorMessage);
            }
            errorMessages.clear();
        } else {
            throw new MismatchException(mainInfo.getStorableType(), errorMessages);
        }
    }

    // IndexInfo is empty, as querying for it tends to cause a table analyze to run.
    IndexInfo[] indexInfo = new IndexInfo[0];

    if (needsQuotes(tableName)) {
        String quote = meta.getIdentifierQuoteString();
        if (quote != null && !quote.equals(" ")) {
            tableName = quote + tableName + quote;
            qualifiedTableName = quote + qualifiedTableName + quote;
        }
    }

    return new JInfo<S>(mainInfo, catalog, schema, tableName, qualifiedTableName, indexInfo, jProperties);
}