List of usage examples for java.sql DatabaseMetaData getIndexInfo
ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException;
From source file:org.alfresco.util.schemacomp.ExportDb.java
private void extractSchema(DatabaseMetaData dbmd, String schemaName, String prefixFilter) throws SQLException, IllegalArgumentException, IllegalAccessException { if (log.isDebugEnabled()) { log.debug("Retrieving tables: schemaName=[" + schemaName + "], prefixFilter=[" + prefixFilter + "]"); }/*www . j av a 2 s . c o m*/ final ResultSet tables = dbmd.getTables(null, schemaName, prefixFilter, new String[] { "TABLE", "VIEW", "SEQUENCE" }); while (tables.next()) { final String tableName = tables.getString("TABLE_NAME"); if (log.isDebugEnabled()) { log.debug("Examining table tableName=[" + tableName + "]"); } // Oracle hack: ignore tables in the recycle bin // ALF-14129 fix, check whether schema already contains object with provided name if (tableName.startsWith("BIN$") || schema.containsByName(tableName)) { continue; } if (tables.getString("TABLE_TYPE").equals("SEQUENCE")) { Sequence sequence = new Sequence(tableName); schema.add(sequence); continue; } Table table = new Table(tableName); schema.add(table); // Table columns final ResultSet columns = dbmd.getColumns(null, tables.getString("TABLE_SCHEM"), tableName, "%"); while (columns.next()) { String columnName = columns.getString("COLUMN_NAME"); Column column = new Column(columnName); String dbType = columns.getString("TYPE_NAME"); int colSize = columns.getInt("COLUMN_SIZE"); int scale = columns.getInt("DECIMAL_DIGITS"); int jdbcType = columns.getInt("DATA_TYPE"); String type = generateType(dbType, colSize, scale, jdbcType); column.setType(type); String nullableString = columns.getString("IS_NULLABLE"); column.setNullable(parseBoolean(nullableString)); column.setOrder(columns.getInt("ORDINAL_POSITION")); try { String autoIncString = columns.getString("IS_AUTOINCREMENT"); column.setAutoIncrement(parseBoolean(autoIncString)); } catch (SQLException jtdsDoesNOtHAveIsUatoincrement) { column.setAutoIncrement((dbType.endsWith("identity"))); } column.setParent(table); table.getColumns().add(column); } columns.close(); // Primary key final ResultSet primarykeycols = dbmd.getPrimaryKeys(null, tables.getString("TABLE_SCHEM"), tableName); PrimaryKey pk = null; while (primarykeycols.next()) { if (pk == null) { String pkName = primarykeycols.getString("PK_NAME"); pk = new PrimaryKey(pkName); } String columnName = primarykeycols.getString("COLUMN_NAME"); pk.getColumnNames().add(columnName); int columnOrder = primarykeycols.getInt("KEY_SEQ"); pk.getColumnOrders().add(columnOrder); } primarykeycols.close(); // If this table has a primary key, add it. if (pk != null) { pk.setParent(table); table.setPrimaryKey(pk); } // Indexes final ResultSet indexes = dbmd.getIndexInfo(null, tables.getString("TABLE_SCHEM"), tableName, false, true); String lastIndexName = ""; Index index = null; while (indexes.next()) { final String indexName = indexes.getString("INDEX_NAME"); if (indexName == null) { // Oracle seems to have some dummy index entries continue; } // Skip the index corresponding to the PK if it is mentioned else if (indexName.equals(table.getPrimaryKey().getName())) { continue; } if (!indexName.equals(lastIndexName)) { index = new Index(indexName); index.setUnique(!indexes.getBoolean("NON_UNIQUE")); index.setParent(table); table.getIndexes().add(index); lastIndexName = indexName; } if (index != null) { String columnName = indexes.getString("COLUMN_NAME"); index.getColumnNames().add(columnName); } } indexes.close(); final ResultSet foreignkeys = dbmd.getImportedKeys(null, tables.getString("TABLE_SCHEM"), tableName); String lastKeyName = ""; ForeignKey fk = null; while (foreignkeys.next()) { final String keyName = foreignkeys.getString("FK_NAME"); if (!keyName.equals(lastKeyName)) { fk = new ForeignKey(keyName); fk.setParent(table); table.getForeignKeys().add(fk); lastKeyName = keyName; } if (fk != null) { fk.setLocalColumn(foreignkeys.getString("FKCOLUMN_NAME")); fk.setTargetTable(foreignkeys.getString("PKTABLE_NAME")); fk.setTargetColumn(foreignkeys.getString("PKCOLUMN_NAME")); } } foreignkeys.close(); } tables.close(); }
From source file:org.apache.bigtop.itest.hive.TestJdbc.java
/** * Test simple DatabaseMetaData calls. getColumns is tested elsewhere, as we need to call * that on a valid table. Same with getFunctions. * * @throws SQLException//www.j av a 2s. com */ @Test public void databaseMetaDataCalls() throws SQLException { DatabaseMetaData md = conn.getMetaData(); boolean boolrc = md.allTablesAreSelectable(); LOG.debug("All tables are selectable? " + boolrc); String strrc = md.getCatalogSeparator(); LOG.debug("Catalog separator " + strrc); strrc = md.getCatalogTerm(); LOG.debug("Catalog term " + strrc); ResultSet rs = md.getCatalogs(); while (rs.next()) { strrc = rs.getString(1); LOG.debug("Found catalog " + strrc); } Connection c = md.getConnection(); int intrc = md.getDatabaseMajorVersion(); LOG.debug("DB major version is " + intrc); intrc = md.getDatabaseMinorVersion(); LOG.debug("DB minor version is " + intrc); strrc = md.getDatabaseProductName(); LOG.debug("DB product name is " + strrc); strrc = md.getDatabaseProductVersion(); LOG.debug("DB product version is " + strrc); intrc = md.getDefaultTransactionIsolation(); LOG.debug("Default transaction isolation is " + intrc); intrc = md.getDriverMajorVersion(); LOG.debug("Driver major version is " + intrc); intrc = md.getDriverMinorVersion(); LOG.debug("Driver minor version is " + intrc); strrc = md.getDriverName(); LOG.debug("Driver name is " + strrc); strrc = md.getDriverVersion(); LOG.debug("Driver version is " + strrc); strrc = md.getExtraNameCharacters(); LOG.debug("Extra name characters is " + strrc); strrc = md.getIdentifierQuoteString(); LOG.debug("Identifier quote string is " + strrc); // In Hive 1.2 this always returns an empty RS rs = md.getImportedKeys("a", "b", "d"); // In Hive 1.2 this always returns an empty RS rs = md.getIndexInfo("a", "b", "d", true, true); intrc = md.getJDBCMajorVersion(); LOG.debug("JDBC major version is " + intrc); intrc = md.getJDBCMinorVersion(); LOG.debug("JDBC minor version is " + intrc); intrc = md.getMaxColumnNameLength(); LOG.debug("Maximum column name length is " + intrc); strrc = md.getNumericFunctions(); LOG.debug("Numeric functions are " + strrc); // In Hive 1.2 this always returns an empty RS rs = md.getPrimaryKeys("a", "b", "d"); // In Hive 1.2 this always returns an empty RS rs = md.getProcedureColumns("a", "b", "d", "e"); strrc = md.getProcedureTerm(); LOG.debug("Procedures are called " + strrc); // In Hive 1.2 this always returns an empty RS rs = md.getProcedures("a", "b", "d"); strrc = md.getSchemaTerm(); LOG.debug("Schemas are called " + strrc); rs = md.getSchemas(); while (rs.next()) { strrc = rs.getString(1); LOG.debug("Found schema " + strrc); } strrc = md.getSearchStringEscape(); LOG.debug("Search string escape is " + strrc); strrc = md.getStringFunctions(); LOG.debug("String functions are " + strrc); strrc = md.getSystemFunctions(); LOG.debug("System functions are " + strrc); rs = md.getTableTypes(); while (rs.next()) { strrc = rs.getString(1); LOG.debug("Found table type " + strrc); } strrc = md.getTimeDateFunctions(); LOG.debug("Time/date functions are " + strrc); rs = md.getTypeInfo(); while (rs.next()) { strrc = rs.getString(1); LOG.debug("Found type " + strrc); } // In Hive 1.2 this always returns an empty RS rs = md.getUDTs("a", "b", "d", null); boolrc = md.supportsAlterTableWithAddColumn(); LOG.debug("Supports alter table with add column? " + boolrc); boolrc = md.supportsAlterTableWithDropColumn(); LOG.debug("Supports alter table with drop column? " + boolrc); boolrc = md.supportsBatchUpdates(); LOG.debug("Supports batch updates? " + boolrc); boolrc = md.supportsCatalogsInDataManipulation(); LOG.debug("Supports catalogs in data manipulation? " + boolrc); boolrc = md.supportsCatalogsInIndexDefinitions(); LOG.debug("Supports catalogs in index definition? " + boolrc); boolrc = md.supportsCatalogsInPrivilegeDefinitions(); LOG.debug("Supports catalogs in privilege definition? " + boolrc); boolrc = md.supportsCatalogsInProcedureCalls(); LOG.debug("Supports catalogs in procedure calls? " + boolrc); boolrc = md.supportsCatalogsInTableDefinitions(); LOG.debug("Supports catalogs in table definition? " + boolrc); boolrc = md.supportsColumnAliasing(); LOG.debug("Supports column aliasing? " + boolrc); boolrc = md.supportsFullOuterJoins(); LOG.debug("Supports full outer joins? " + boolrc); boolrc = md.supportsGroupBy(); LOG.debug("Supports group by? " + boolrc); boolrc = md.supportsLimitedOuterJoins(); LOG.debug("Supports limited outer joins? " + boolrc); boolrc = md.supportsMultipleResultSets(); LOG.debug("Supports limited outer joins? " + boolrc); boolrc = md.supportsNonNullableColumns(); LOG.debug("Supports non-nullable columns? " + boolrc); boolrc = md.supportsOuterJoins(); LOG.debug("Supports outer joins? " + boolrc); boolrc = md.supportsPositionedDelete(); LOG.debug("Supports positioned delete? " + boolrc); boolrc = md.supportsPositionedUpdate(); LOG.debug("Supports positioned update? " + boolrc); boolrc = md.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT); LOG.debug("Supports result set holdability? " + boolrc); boolrc = md.supportsResultSetType(ResultSet.HOLD_CURSORS_OVER_COMMIT); LOG.debug("Supports result set type? " + boolrc); boolrc = md.supportsSavepoints(); LOG.debug("Supports savepoints? " + boolrc); boolrc = md.supportsSchemasInDataManipulation(); LOG.debug("Supports schemas in data manipulation? " + boolrc); boolrc = md.supportsSchemasInIndexDefinitions(); LOG.debug("Supports schemas in index definitions? " + boolrc); boolrc = md.supportsSchemasInPrivilegeDefinitions(); LOG.debug("Supports schemas in privilege definitions? " + boolrc); boolrc = md.supportsSchemasInProcedureCalls(); LOG.debug("Supports schemas in procedure calls? " + boolrc); boolrc = md.supportsSchemasInTableDefinitions(); LOG.debug("Supports schemas in table definitions? " + boolrc); boolrc = md.supportsSelectForUpdate(); LOG.debug("Supports select for update? " + boolrc); boolrc = md.supportsStoredProcedures(); LOG.debug("Supports stored procedures? " + boolrc); boolrc = md.supportsTransactions(); LOG.debug("Supports transactions? " + boolrc); boolrc = md.supportsUnion(); LOG.debug("Supports union? " + boolrc); boolrc = md.supportsUnionAll(); LOG.debug("Supports union all? " + boolrc); }
From source file:org.apache.ddlutils.task.DumpMetadataTask.java
/** * Dumps the indexes of the indicated table. * //from w w w . j av a2 s.c o m * @param xmlWriter The xml writer to write to * @param metaData The database metadata * @param catalogName The catalog name * @param schemaName The schema name * @param tableName The table name */ private void dumpIndexes(PrettyPrintingXmlWriter xmlWriter, final DatabaseMetaData metaData, final String catalogName, final String schemaName, final String tableName) throws SQLException { performResultSetXmlOperation(xmlWriter, null, new ResultSetXmlOperation() { public ResultSet getResultSet() throws SQLException { return metaData.getIndexInfo(catalogName, schemaName, tableName, false, false); } public void handleRow(PrettyPrintingXmlWriter xmlWriter, ResultSet result) throws SQLException { Set columns = getColumnsInResultSet(result); xmlWriter.writeElementStart(null, "index"); addStringAttribute(xmlWriter, "name", result, columns, "INDEX_NAME"); addBooleanAttribute(xmlWriter, "nonUnique", result, columns, "NON_UNIQUE"); addStringAttribute(xmlWriter, "indexCatalog", result, columns, "INDEX_QUALIFIER"); if (columns.contains("TYPE")) { try { switch (result.getShort("TYPE")) { case DatabaseMetaData.tableIndexStatistic: xmlWriter.writeAttribute(null, "type", "table statistics"); break; case DatabaseMetaData.tableIndexClustered: xmlWriter.writeAttribute(null, "type", "clustered"); break; case DatabaseMetaData.tableIndexHashed: xmlWriter.writeAttribute(null, "type", "hashed"); break; case DatabaseMetaData.tableIndexOther: xmlWriter.writeAttribute(null, "type", "other"); break; default: xmlWriter.writeAttribute(null, "type", "unknown"); break; } } catch (SQLException ex) { log("Could not read the TYPE value for an index of table '" + tableName + "' from the result set: " + ex.getStackTrace(), Project.MSG_ERR); } } addStringAttribute(xmlWriter, "column", result, columns, "COLUMN_NAME"); addShortAttribute(xmlWriter, "sequenceNumberInIndex", result, columns, "ORDINAL_POSITION"); if (columns.contains("ASC_OR_DESC")) { try { String value = result.getString("ASC_OR_DESC"); if ("A".equalsIgnoreCase(value)) { xmlWriter.writeAttribute(null, "sortOrder", "ascending"); } else if ("D".equalsIgnoreCase(value)) { xmlWriter.writeAttribute(null, "sortOrder", "descending"); } else { xmlWriter.writeAttribute(null, "sortOrder", "unknown"); } } catch (SQLException ex) { log("Could not read the ASC_OR_DESC value for an index of table '" + tableName + "' from the result set: " + ex.getStackTrace(), Project.MSG_ERR); } } addIntAttribute(xmlWriter, "cardinality", result, columns, "CARDINALITY"); addIntAttribute(xmlWriter, "pages", result, columns, "PAGES"); addStringAttribute(xmlWriter, "filter", result, columns, "FILTER_CONDITION"); } public void handleError(SQLException ex) { log("Could not read the indexes for table '" + tableName + "' from the result set: " + ex.getStackTrace(), Project.MSG_ERR); } }); }
From source file:org.apache.oozie.command.SchemaCheckXCommand.java
private boolean checkIndexes(DatabaseMetaData metaData, String catalog, String table, Set<String> expectedIndexedColumns) throws SQLException { boolean problem = false; Set<String> foundIndexedColumns = new HashSet<String>(); ResultSet rs = metaData.getIndexInfo(catalog, null, table, false, true); while (rs.next()) { String colName = rs.getString("COLUMN_NAME"); if (colName != null) { foundIndexedColumns.add(colName); }//from w w w. j a v a2 s.c om } Collection missingIndexColumns = CollectionUtils.subtract(expectedIndexedColumns, foundIndexedColumns); if (!missingIndexColumns.isEmpty()) { LOG.error("Found [{0}] missing indexes for columns in table [{1}]: {2}", missingIndexColumns.size(), table, Arrays.toString(missingIndexColumns.toArray())); problem = true; } else { if (LOG.isDebugEnabled()) { LOG.debug("No missing indexes found in table [{0}]: {1}", table, Arrays.toString(expectedIndexedColumns.toArray())); } } if (!ignoreExtras) { Collection extraIndexColumns = CollectionUtils.subtract(foundIndexedColumns, expectedIndexedColumns); if (!extraIndexColumns.isEmpty()) { LOG.error("Found [{0}] extra indexes for columns in table [{1}]: {2}", extraIndexColumns.size(), table, Arrays.toString(extraIndexColumns.toArray())); problem = true; } else { LOG.debug("No extra indexes found in table [{0}]", table); } } return problem; }
From source file:org.apache.openjpa.jdbc.sql.DBDictionary.java
/** * Reflect on the schema to find indexes matching the given table pattern. *///from www .j a v a 2 s . c o m public Index[] getIndexInfo(DatabaseMetaData meta, DBIdentifier catalog, DBIdentifier schemaName, DBIdentifier tableName, boolean unique, boolean approx, Connection conn) throws SQLException { if (tableName == null && !supportsNullTableForGetIndexInfo) return null; beforeMetadataOperation(conn); ResultSet indexes = null; try { indexes = meta.getIndexInfo(getCatalogNameForMetadata(catalog), getSchemaNameForMetadata(schemaName), getTableNameForMetadata(tableName), unique, approx); List indexList = new ArrayList(); while (indexes != null && indexes.next()) indexList.add(newIndex(indexes)); return (Index[]) indexList.toArray(new Index[indexList.size()]); } finally { if (indexes != null) try { indexes.close(); } catch (Exception e) { } } }
From source file:org.apache.phoenix.mapreduce.index.IndexTool.java
/** * Checks for the validity of the index table passed to the job. * @param connection/*from w w w . ja v a2 s .c o m*/ * @param masterTable * @param indexTable * @return * @throws SQLException */ private boolean isValidIndexTable(final Connection connection, final String masterTable, final String indexTable) throws SQLException { final DatabaseMetaData dbMetaData = connection.getMetaData(); final String schemaName = SchemaUtil.getSchemaNameFromFullName(masterTable); final String tableName = SchemaUtil.getTableNameFromFullName(masterTable); ResultSet rs = null; try { rs = dbMetaData.getIndexInfo(null, schemaName, tableName, false, false); while (rs.next()) { final String indexName = rs.getString(6); if (indexTable.equalsIgnoreCase(indexName)) { return true; } } } finally { if (rs != null) { rs.close(); } } return false; }
From source file:org.apache.tajo.catalog.store.XMLCatalogSchemaManager.java
protected boolean checkExistence(Connection conn, DatabaseObjectType type, String... params) throws SQLException { boolean result = false; DatabaseMetaData metadata = null; PreparedStatement pstmt = null; BaseSchema baseSchema = catalogStore.getSchema(); if (params == null || params.length < 1) { throw new IllegalArgumentException("checkExistence function needs at least one argument."); }// w ww . j av a 2s . c o m switch (type) { case DATA: metadata = conn.getMetaData(); ResultSet data = metadata.getUDTs(null, baseSchema.getSchemaName() != null && !baseSchema.getSchemaName().isEmpty() ? baseSchema.getSchemaName().toUpperCase() : null, params[0].toUpperCase(), null); result = data.next(); CatalogUtil.closeQuietly(data); break; case FUNCTION: metadata = conn.getMetaData(); ResultSet functions = metadata.getFunctions(null, baseSchema.getSchemaName() != null && !baseSchema.getSchemaName().isEmpty() ? baseSchema.getSchemaName().toUpperCase() : null, params[0].toUpperCase()); result = functions.next(); CatalogUtil.closeQuietly(functions); break; case INDEX: if (params.length != 2) { throw new IllegalArgumentException( "Finding index object is needed two strings, table name and index name"); } pstmt = getExistQuery(conn, type); if (pstmt != null) { result = checkExistenceByQuery(pstmt, baseSchema, params); } else { metadata = conn.getMetaData(); ResultSet indexes = metadata.getIndexInfo(null, baseSchema.getSchemaName() != null && !baseSchema.getSchemaName().isEmpty() ? baseSchema.getSchemaName().toUpperCase() : null, params[0].toUpperCase(), false, true); while (indexes.next()) { if (indexes.getString("INDEX_NAME").equals(params[1].toUpperCase())) { result = true; break; } } CatalogUtil.closeQuietly(indexes); } break; case TABLE: pstmt = getExistQuery(conn, type); if (pstmt != null) { result = checkExistenceByQuery(pstmt, baseSchema, params); } else { metadata = conn.getMetaData(); ResultSet tables = metadata.getTables(null, baseSchema.getSchemaName() != null && !baseSchema.getSchemaName().isEmpty() ? baseSchema.getSchemaName().toUpperCase() : null, params[0].toUpperCase(), new String[] { "TABLE" }); result = tables.next(); CatalogUtil.closeQuietly(tables); } break; case DOMAIN: case OPERATOR: case RULE: case SEQUENCE: case TRIGGER: case VIEW: pstmt = getExistQuery(conn, type); if (pstmt == null) { throw new TajoInternalError( "Finding " + type + " type of database object is not supported on this database system."); } result = checkExistenceByQuery(pstmt, baseSchema, params); break; } return result; }
From source file:org.batoo.jpa.core.jdbc.adapter.JdbcTable.java
private void readIndexes(DatabaseMetaData dbMetadata) throws SQLException { ResultSet rs = null;/*from w w w . j ava2s . c o m*/ try { rs = dbMetadata.getIndexInfo(this.catalog, this.schema, this.name, false, true); while (rs.next()) { final String name = rs.getString(JdbcTable.INDEX_NAME); if (name == null) { continue; } JdbcIndex index = this.getIndex(name); if (index == null) { index = new JdbcIndex(name); this.indexes.put(name.toUpperCase(), index); } index.addColumn(rs.getString(JdbcTable.COLUMN_NAME)); } } finally { DbUtils.closeQuietly(rs); } }
From source file:org.cloudfoundry.identity.uaa.db.TestSchemaValidation.java
public void validate_index_existence(String[] tableNames, String lookupIndexName) throws Exception { Connection connection = dataSource.getConnection(); try {/* w w w.ja va 2s . c o m*/ DatabaseMetaData meta = connection.getMetaData(); boolean foundIndex = false; for (String tableName : tableNames) { ResultSet rs = meta.getIndexInfo(connection.getCatalog(), null, tableName, false, false); while ((!foundIndex) && rs.next()) { String indexName = rs.getString("INDEX_NAME"); if (lookupIndexName.equalsIgnoreCase(indexName)) { foundIndex = true; } } rs.close(); if (foundIndex) { break; } } assertTrue("I was expecting to find index " + lookupIndexName, foundIndex); } finally { connection.close(); } }
From source file:org.kuali.core.db.torque.KualiTorqueJDBCTransformTask.java
public List<TableIndex> getIndexes(DatabaseMetaData dbMeta, String tableName) throws SQLException { List<TableIndex> indexes = new ArrayList<TableIndex>(); ResultSet pkInfo = null;/*from w w w . ja va 2s .c o m*/ String pkName = null; //ArrayList<String> pkFields = new ArrayList<String>(); ResultSet indexInfo = null; try { indexInfo = dbMeta.getIndexInfo(null, dbSchema, tableName, false, true); // need to ensure that the PK is not returned as an index pkInfo = dbMeta.getPrimaryKeys(null, dbSchema, tableName); if (pkInfo.next()) { pkName = pkInfo.getString("PK_NAME"); } //Map<Integer,String> tempPk = new HashMap<Integer,String>(); //while ( pkInfo.next() ) { // tempPk.put( pkInfo.getInt( "KEY_SEQ" ), pkInfo.getString( "COLUMN_NAME" ) ); //} TableIndex currIndex = null; while (indexInfo.next()) { if (indexInfo.getString("INDEX_NAME") == null) continue; //System.out.println( "Row: " + indexInfo.getString( "INDEX_NAME" ) + "/" + indexInfo.getString( "COLUMN_NAME" ) ); if (currIndex == null || !indexInfo.getString("INDEX_NAME").equals(currIndex.name)) { currIndex = new TableIndex(); currIndex.name = indexInfo.getString("INDEX_NAME"); currIndex.unique = !indexInfo.getBoolean("NON_UNIQUE"); // if has the same name as the PK, skip adding it to the index list if (pkName == null || !pkName.equals(currIndex.name)) { indexes.add(currIndex); //System.out.println( "Added " + currIndex.name + " to index list"); } else { //System.out.println( "Skipping PK: " + currIndex.name ); } } currIndex.columns.add(indexInfo.getString("COLUMN_NAME")); } } catch (SQLException e) { log("WARN: Could not read indexes for Table " + tableName + " : " + e.getMessage(), Project.MSG_WARN); } finally { if (indexInfo != null) { indexInfo.close(); } if (pkInfo != null) { pkInfo.close(); } } return indexes; }