List of usage examples for java.sql DatabaseMetaData getColumns
ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern)
throws SQLException;
From source file:org.zenoss.zep.dao.impl.DaoUtils.java
/** * Returns a list of column names in the specified table. * * @param dataSource DataSource to use./*from w w w . j av a 2 s .c o m*/ * @param tableName Table name. * @return A list of column names in the table. * @throws MetaDataAccessException If an exception occurs. */ public static List<String> getColumnNames(final javax.sql.DataSource dataSource, final String tableName) throws MetaDataAccessException { final List<String> columnNames = new ArrayList<String>(); JdbcUtils.extractDatabaseMetaData(dataSource, new DatabaseMetaDataCallback() { @Override public Object processMetaData(DatabaseMetaData dbmd) throws SQLException, MetaDataAccessException { ResultSet rs = dbmd.getColumns(null, null, tableName, null); while (rs.next()) { String columnName = rs.getString("COLUMN_NAME"); columnNames.add(columnName); } rs.close(); return null; } }); return columnNames; }
From source file:org.zenoss.zep.dao.impl.DaoUtils.java
/** * Returns a map of column names to their JDBC type in the specified table. The map is returned in the order * returned by the getColumns query./*from w w w. j a v a 2 s . c o m*/ * * @param dataSource DataSource to use. * @param tableName Table name. * @return A map of column names to the column types in the specified table. * @throws MetaDataAccessException If an exception occurs. */ public static Map<String, Integer> getColumnNamesAndTypes(final DataSource dataSource, final String tableName) throws MetaDataAccessException { final Map<String, Integer> columnNamesToTypes = new LinkedHashMap<String, Integer>(); JdbcUtils.extractDatabaseMetaData(dataSource, new DatabaseMetaDataCallback() { @Override public Object processMetaData(DatabaseMetaData dbmd) throws SQLException, MetaDataAccessException { ResultSet rs = dbmd.getColumns(null, null, tableName, null); while (rs.next()) { String columnName = rs.getString("COLUMN_NAME"); int columnType = rs.getInt("DATA_TYPE"); columnNamesToTypes.put(columnName, columnType); } rs.close(); return null; } }); return columnNamesToTypes; }
From source file:net.firejack.platform.core.utils.db.DBUtils.java
private static List<Column> getColumns(OpenFlameDataSource dataSource, DatabaseMetaData metaData, Table table) throws SQLException { ResultSet rs;/* ww w . ja v a 2 s .c om*/ if (dataSource.getName() == DatabaseName.Oracle) { rs = metaData.getColumns("orcl", "TIMUR", table.getName(), null); } else { rs = metaData.getColumns(null, null, table.getName(), null); } List<Column> columns = new ArrayList<Column>(); AbstractTableAnalyzer dbAnalyzer = getDBAnalyzer(dataSource); while (rs.next()) { Column column = dbAnalyzer.createColumn(rs, table); columns.add(column); } return columns; }
From source file:org.apache.zeppelin.jdbc.SqlCompleter.java
/** * Fill two map with list of tables and list of columns * * @param catalogName name of a catalog/*from w w w .j a v a 2s. co m*/ * @param meta metadata from connection to database * @param schemaFilter a schema name pattern; must match the schema name * as it is stored in the database; "" retrieves those without a schema; * <code>null</code> means that the schema name should not be used to narrow * the search; supports '%' and '_' symbols; for example "prod_v_%" * @param tables function fills this map, for every schema name adds * set of table names within the schema * @param columns function fills this map, for every table name adds set * of columns within the table; table name is in format schema_name.table_name */ private static void fillTableAndColumnNames(String catalogName, DatabaseMetaData meta, String schemaFilter, Map<String, Set<String>> tables, Map<String, Set<String>> columns) { try { ResultSet cols = meta.getColumns(catalogName, schemaFilter, "%", "%"); try { while (cols.next()) { String schema = cols.getString("TABLE_SCHEM"); if (schema == null) schema = cols.getString("TABLE_CAT"); String table = cols.getString("TABLE_NAME"); String column = cols.getString("COLUMN_NAME"); if (!isBlank(table)) { String schemaTable = schema + "." + table; if (!columns.containsKey(schemaTable)) columns.put(schemaTable, new HashSet<String>()); columns.get(schemaTable).add(column); if (!tables.containsKey(schema)) tables.put(schema, new HashSet<String>()); tables.get(schema).add(table); } } } finally { cols.close(); } } catch (Throwable t) { logger.error("Failed to retrieve the column name", t); } }
From source file:org.openflexo.technologyadapter.jdbc.util.SQLHelper.java
/** * Updates the list of columns for the given table. * /* www. j av a2s .c o m*/ * @param table * the table * @param columns * the table list to update * @param factory * the factory used to create the new columns if needed */ public static void updateColumns(final JDBCTable table, List<JDBCColumn> columns, final JDBCFactory factory) throws SQLException { JDBCConnection jdbcConn = table.getResourceData(); // TODO : maybe resource leak, cannot use lexical scope for auto-closing Connection connection = jdbcConn.getConnection(); // retrieves keys final Set<String> keys = getKeys(table); // prepare case ignoring map to match columns final Map<String, JDBCColumn> sortedColumns = new HashMap<>(); for (JDBCColumn column : columns) { sortedColumns.put(column.getName().toLowerCase(), column); } // query the columns to find new and removed ones final Set<JDBCColumn> added = new LinkedHashSet<>(); final Set<JDBCColumn> matched = new LinkedHashSet<>(); DatabaseMetaData metadata = connection.getMetaData(); try (ResultSet jdbcCols = metadata.getColumns(connection.getCatalog(), jdbcConn.getDbType().getSchemaPattern(), sqlName(table.getName()), "%")) { while (jdbcCols.next()) { /* System.out.println(" --------------------> " + jdbcCols.getString("COLUMN_NAME")); System.out.println("TABLE_CAT: " + jdbcCols.getObject("TABLE_CAT")); System.out.println("TABLE_SCHEM: " + jdbcCols.getObject("TABLE_SCHEM")); System.out.println("TABLE_NAME: " + jdbcCols.getObject("TABLE_NAME")); System.out.println("COLUMN_NAME: " + jdbcCols.getObject("COLUMN_NAME")); System.out.println("DATA_TYPE: " + jdbcCols.getObject("DATA_TYPE")); System.out.println("TYPE_NAME: " + jdbcCols.getObject("TYPE_NAME")); System.out.println("COLUMN_SIZE: " + jdbcCols.getObject("COLUMN_SIZE")); System.out.println("BUFFER_LENGTH: " + jdbcCols.getObject("BUFFER_LENGTH")); System.out.println("DECIMAL_DIGITS: " + jdbcCols.getObject("DECIMAL_DIGITS")); System.out.println("NUM_PREC_RADIX: " + jdbcCols.getObject("NUM_PREC_RADIX")); System.out.println("IS_NULLABLE: " + jdbcCols.getObject("IS_NULLABLE")); System.out.println("REMARKS: " + jdbcCols.getObject("REMARKS")); System.out.println("COLUMN_DEF: " + jdbcCols.getObject("COLUMN_DEF")); System.out.println("SQL_DATA_TYPE: " + jdbcCols.getObject("SQL_DATA_TYPE")); System.out.println("SQL_DATETIME_SUB: " + jdbcCols.getObject("SQL_DATETIME_SUB")); System.out.println("CHAR_OCTET_LENGTH: " + jdbcCols.getObject("CHAR_OCTET_LENGTH")); System.out.println("ORDINAL_POSITION: " + jdbcCols.getObject("ORDINAL_POSITION")); System.out.println("IS_NULLABLE: " + jdbcCols.getObject("IS_NULLABLE")); System.out.println("SCOPE_CATALOG: " + jdbcCols.getObject("SCOPE_CATALOG")); System.out.println("SCOPE_SCHEMA: " + jdbcCols.getObject("SCOPE_SCHEMA")); System.out.println("SCOPE_TABLE: " + jdbcCols.getObject("SCOPE_TABLE")); System.out.println("SOURCE_DATA_TYPE: " + jdbcCols.getObject("SOURCE_DATA_TYPE")); System.out.println("IS_AUTOINCREMENT: " + jdbcCols.getObject("IS_AUTOINCREMENT")); System.out.println("IS_GENERATEDCOLUMN: " + jdbcCols.getObject("IS_GENERATEDCOLUMN")); */ // [TABLE_CAT, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, DATA_TYPE, TYPE_NAME, COLUMN_SIZE, BUFFER_LENGTH, DECIMAL_DIGITS, // NUM_PREC_RADIX, IS_NULLABLE, REMARKS, COLUMN_DEF, SQL_DATA_TYPE, SQL_DATETIME_SUB, CHAR_OCTET_LENGTH, ORDINAL_POSITION, // IS_NULLABLE, SCOPE_CATALOG, SCOPE_SCHEMA, SCOPE_TABLE, SOURCE_DATA_TYPE, IS_AUTOINCREMENT, IS_GENERATEDCOLUMN] String columnName = jdbcCols.getString("COLUMN_NAME"); String typeName = jdbcCols.getString("TYPE_NAME"); int columnLength = jdbcCols.getInt("COLUMN_SIZE"); boolean isNullable = jdbcCols.getString("IS_NULLABLE").equalsIgnoreCase("YES"); JDBCColumn column = sortedColumns.get(columnName.toLowerCase()); if (column == null) { // new column, add it to the list column = factory.newInstance(JDBCColumn.class); column.init(table, keys.contains(columnName), columnName, typeName, columnLength, isNullable); added.add(column); } else { matched.add(column); } } } // gets columns to remove Set<JDBCColumn> removed = new HashSet<>(); for (JDBCColumn column : columns) { if (!matched.contains(column)) removed.add(column); } // clears the columns of the removed ones // using table adder and removed fires notifications for (JDBCColumn column : removed) { table.removeColumn(column); } // adds new columns for (JDBCColumn column : added) { table.addColumn(column); } }
From source file:org.apache.phoenix.util.CSVCommonsLoader.java
/** * Get list of ColumnInfos that contain Column Name and its associated * PDataType for an import. The supplied list of columns can be null -- if it is non-null, * it represents a user-supplied list of columns to be imported. * * @param conn Phoenix connection from which metadata will be read * @param tableName Phoenix table name whose columns are to be checked. Can include a schema * name/*from w w w .j av a2 s . co m*/ * @param columns user-supplied list of import columns, can be null * @param strict if true, an exception will be thrown if unknown columns are supplied */ public static List<ColumnInfo> generateColumnInfo(Connection conn, String tableName, List<String> columns, boolean strict) throws SQLException { Map<String, Integer> columnNameToTypeMap = Maps.newLinkedHashMap(); Set<String> ambiguousColumnNames = new HashSet<String>(); Map<String, Integer> fullColumnNameToTypeMap = Maps.newLinkedHashMap(); DatabaseMetaData dbmd = conn.getMetaData(); int unfoundColumnCount = 0; // TODO: escape wildcard characters here because we don't want that // behavior here String escapedTableName = StringUtil.escapeLike(tableName); String[] schemaAndTable = escapedTableName.split("\\."); ResultSet rs = null; try { rs = dbmd.getColumns(null, (schemaAndTable.length == 1 ? "" : schemaAndTable[0]), (schemaAndTable.length == 1 ? escapedTableName : schemaAndTable[1]), null); while (rs.next()) { String colName = rs.getString(QueryUtil.COLUMN_NAME_POSITION); String colFam = rs.getString(QueryUtil.COLUMN_FAMILY_POSITION); // use family qualifier, if available, otherwise, use column name String fullColumn = (colFam == null ? colName : String.format("%s.%s", colFam, colName)); String sqlTypeName = rs.getString(QueryUtil.DATA_TYPE_NAME_POSITION); // allow for both bare and family qualified names. if (columnNameToTypeMap.keySet().contains(colName)) { ambiguousColumnNames.add(colName); } columnNameToTypeMap.put(colName, PDataType.fromSqlTypeName(sqlTypeName).getSqlType()); fullColumnNameToTypeMap.put(fullColumn, PDataType.fromSqlTypeName(sqlTypeName).getSqlType()); } if (columnNameToTypeMap.isEmpty()) { throw new IllegalArgumentException("Table " + tableName + " not found"); } } finally { if (rs != null) { rs.close(); } } List<ColumnInfo> columnInfoList = Lists.newArrayList(); Set<String> unresolvedColumnNames = new TreeSet<String>(); if (columns == null) { // use family qualified names by default, if no columns are specified. for (Map.Entry<String, Integer> entry : fullColumnNameToTypeMap.entrySet()) { columnInfoList.add(new ColumnInfo(entry.getKey(), entry.getValue())); } } else { // Leave "null" as indication to skip b/c it doesn't exist for (int i = 0; i < columns.size(); i++) { String columnName = columns.get(i).trim(); Integer sqlType = null; if (fullColumnNameToTypeMap.containsKey(columnName)) { sqlType = fullColumnNameToTypeMap.get(columnName); } else if (columnNameToTypeMap.containsKey(columnName)) { if (ambiguousColumnNames.contains(columnName)) { unresolvedColumnNames.add(columnName); } // fall back to bare column name. sqlType = columnNameToTypeMap.get(columnName); } if (unresolvedColumnNames.size() > 0) { StringBuilder exceptionMessage = new StringBuilder(); boolean first = true; exceptionMessage.append("Unable to resolve these column names to a single column family:\n"); for (String col : unresolvedColumnNames) { if (first) first = false; else exceptionMessage.append(","); exceptionMessage.append(col); } exceptionMessage.append("\nAvailable columns with column families:\n"); first = true; for (String col : fullColumnNameToTypeMap.keySet()) { if (first) first = false; else exceptionMessage.append(","); exceptionMessage.append(col); } throw new SQLException(exceptionMessage.toString()); } if (sqlType == null) { if (strict) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_NOT_FOUND) .setColumnName(columnName).setTableName(tableName).build().buildException(); } unfoundColumnCount++; } else { columnInfoList.add(new ColumnInfo(columnName, sqlType)); } } if (unfoundColumnCount == columns.size()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_NOT_FOUND) .setColumnName(Arrays.toString(columns.toArray(new String[0]))).setTableName(tableName) .build().buildException(); } } return columnInfoList; }
From source file:canreg.client.dataentry.Convert.java
public static boolean convertData(canreg.client.gui.management.CanReg4MigrationInternalFrame.MigrationTask task, String filepath, String datafile, String regcode) { Connection conn;//from w w w. ja v a2s . com Statement stmt; ResultSet rs_hdr; ResultSet rs_data; boolean success = false; int totalrowcount = 0; int rowsImported = 0; String csv = filepath + Globals.FILE_SEPARATOR + regcode + ".csv"; CSVPrinter printer; try { debugOut("Migrating data " + datafile); pconn = (ParadoxConnection) DriverManager .getConnection("jdbc:paradox:///" + filepath.replaceAll("\\\\", "/")); final ParadoxTable table = TableData.listTables(pconn, datafile).get(0); totalrowcount = table.getRowCount(); SystemDescription sd = new SystemDescription( Globals.CANREG_SERVER_SYSTEM_CONFIG_FOLDER + Globals.FILE_SEPARATOR + regcode + ".xml"); DatabaseVariablesListElement[] variableListElements; variableListElements = sd.getDatabaseVariableListElements(); ArrayList<String> dbvle = new ArrayList(); ArrayList<String> cols = new ArrayList(); // Handling variables names with reservered word by replacing underscore after variable name. for (DatabaseVariablesListElement variable : variableListElements) { if (variable.getShortName().endsWith("_")) { dbvle.add(variable.getShortName().replace("_", "")); } else { dbvle.add(variable.getShortName()); } } conn = DriverManager.getConnection("jdbc:paradox:///" + filepath.replaceAll("\\\\", "/")); final DatabaseMetaData meta = conn.getMetaData(); rs_hdr = meta.getColumns("", "", datafile, "%"); //Comparing variables in file and database while (rs_hdr.next()) { for (String dbvar : dbvle) { if (rs_hdr.getString("COLUMN_NAME").equals(dbvar) || rs_hdr.getString("COLUMN_NAME").replaceAll(" ", "_").equals(dbvar)) { cols.add(rs_hdr.getString("COLUMN_NAME")); } } } String[] strheader = new String[cols.size()]; String query = "SELECT "; for (int i = 0; i < cols.size(); i++) { strheader[i] = cols.get(i).toString(); if (i == cols.size() - 1) { query += "\"" + strheader[i] + "\""; } else { query += "\"" + strheader[i] + "\","; } } query += " FROM \"" + datafile + "\""; CSVFormat format = CSVFormat.DEFAULT.withFirstRecordAsHeader().withHeader(strheader).withDelimiter(','); debugOut(query); printer = new CSVPrinter(new FileWriter(csv), format); int hdrsize = strheader.length; Object[] strdata = new String[hdrsize]; stmt = conn.createStatement(); rs_data = stmt.executeQuery(query); if (Globals.DEBUG) { Statement stmt2 = conn.createStatement(); String q = "SELECT RecNum FROM \"" + datafile + "\""; ResultSet rs_all_data = stmt2.executeQuery(q); debugOut(rs_all_data.toString()); } while (rs_data.next()) { for (int i = 1; i < rs_data.getMetaData().getColumnCount() + 1; i++) { switch (rs_data.getMetaData().getColumnType(i)) { case 4: strdata[i - 1] = Integer.toString(rs_data.getShort(i)); break; case 12: strdata[i - 1] = StringEscapeUtils.escapeCsv(rs_data.getString(i)); break; } } printer.printRecord(strdata); rowsImported++; } printer.flush(); printer.close(); success = true; } catch (SQLException ex) { Logger.getLogger(Convert.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException ex) { Logger.getLogger(Convert.class.getName()).log(Level.SEVERE, null, ex); } success = success && (rowsImported == totalrowcount); return success; }
From source file:org.apache.hadoop.vertica.VerticaUtil.java
public static void checkOutputSpecs(Configuration conf) throws IOException { VerticaConfiguration vtconfig = new VerticaConfiguration(conf); String writerTable = vtconfig.getOutputTableName(); if (writerTable == null) throw new IOException("Vertica output requires a table name defined by " + VerticaConfiguration.OUTPUT_TABLE_NAME_PROP); String[] def = vtconfig.getOutputTableDef(); boolean dropTable = vtconfig.getDropTable(); String schema = null;/*from ww w . ja va 2 s.c om*/ String table = null; String[] schemaTable = writerTable.split("\\."); if (schemaTable.length == 2) { schema = schemaTable[0]; table = schemaTable[1]; } else table = schemaTable[0]; Statement stmt = null; try { Connection conn = vtconfig.getConnection(true); DatabaseMetaData dbmd = conn.getMetaData(); ResultSet rs = dbmd.getTables(null, schema, table, null); boolean tableExists = rs.next(); stmt = conn.createStatement(); if (tableExists && dropTable) { if (verticaVersion(conf, true) >= 305) { stmt = conn.createStatement(); stmt.execute("TRUNCATE TABLE " + writerTable); } else { // for version < 3.0 drop the table if it exists // if def is empty, grab the columns first to redfine the table if (def == null) { rs = dbmd.getColumns(null, schema, table, null); ArrayList<String> defs = new ArrayList<String>(); while (rs.next()) defs.add(rs.getString(4) + " " + rs.getString(5)); def = defs.toArray(new String[0]); } stmt = conn.createStatement(); stmt.execute("DROP TABLE " + writerTable + " CASCADE"); tableExists = false; // force create } } // create table if it doesn't exist if (!tableExists) { if (def == null) throw new RuntimeException( "Table " + writerTable + " does not exist and no table definition provided"); if (schema != null) { rs = dbmd.getSchemas(null, schema); if (!rs.next()) stmt.execute("CREATE SCHEMA " + schema); } StringBuffer tabledef = new StringBuffer("CREATE TABLE ").append(writerTable).append(" ("); for (String column : def) tabledef.append(column).append(","); tabledef.replace(tabledef.length() - 1, tabledef.length(), ")"); stmt.execute(tabledef.toString()); // TODO: create segmented projections stmt.execute("select implement_temp_design('" + writerTable + "')"); } } catch (Exception e) { throw new RuntimeException(e); } finally { if (stmt != null) try { stmt.close(); } catch (SQLException e) { throw new RuntimeException(e); } } }
From source file:org.openmrs.module.spreadsheetimport.DatabaseBackend.java
public static Map<String, String> getMapOfImportedKeyTableNameToColumnNamesForTable(String tableName) throws Exception { Map<String, String> result = new HashMap<String, String>(); Connection conn = null;//from ww w . jav a 2 s. c o m Exception exception = null; try { // Connect to db Class.forName("com.mysql.jdbc.Driver").newInstance(); Properties p = Context.getRuntimeProperties(); String url = p.getProperty("connection.url"); conn = DriverManager.getConnection(url, p.getProperty("connection.username"), p.getProperty("connection.password")); // Not NULLable columns DatabaseMetaData dmd = conn.getMetaData(); List<String> columnNames = new ArrayList<String>(); ResultSet rsColumns = dmd.getColumns(null, null, tableName, ""); while (rsColumns.next()) { if (!rsColumns.getString("IS_NULLABLE").equals("YES")) { columnNames.add(rsColumns.getString("COLUMN_NAME")); } } rsColumns.close(); // Imported keys ResultSet rsImportedKeys = dmd.getImportedKeys(null, null, tableName); while (rsImportedKeys.next()) { String columnName = rsImportedKeys.getString("FKCOLUMN_NAME"); if (columnNames.contains(columnName)) { result.put(rsImportedKeys.getString("PKTABLE_NAME"), columnName); } } rsImportedKeys.close(); } catch (Exception e) { log.debug(e.toString()); exception = e; } finally { if (conn != null) { try { conn.close(); } catch (Exception e) { } } } if (exception != null) { throw exception; } else { return result; } }
From source file:org.cloudfoundry.identity.uaa.db.TableAndColumnNormalizationTest.java
@Test public void checkColumns() throws Exception { Connection connection = dataSource.getConnection(); try {/*from w w w . ja va 2 s . com*/ DatabaseMetaData metaData = connection.getMetaData(); ResultSet rs = metaData.getColumns(null, null, null, null); int count = 0; while (rs.next()) { String name = rs.getString("TABLE_NAME"); String col = rs.getString("COLUMN_NAME"); logger.info("Checking column [" + name + "." + col + "]"); if (name != null && DatabaseInformation1_5_3.tableNames.contains(name.toLowerCase())) { logger.info("Validating column [" + name + "." + col + "]"); assertTrue("Column[" + name + "." + col + "] is not lower case.", col.toLowerCase().equals(col)); } } } finally { try { connection.close(); } catch (Exception ignore) { } } }