List of usage examples for java.sql Connection getMetaData
DatabaseMetaData getMetaData() throws SQLException;
DatabaseMetaData
object that contains metadata about the database to which this Connection
object represents a connection. From source file:com.liferay.portal.upgrade.util.Table.java
public void populateTable(String tempFileName, Connection con) throws Exception { PreparedStatement ps = null;/*w w w .ja va 2 s . c o m*/ String insertSQL = getInsertSQL(); UnsyncBufferedReader unsyncBufferedReader = new UnsyncBufferedReader(new FileReader(tempFileName)); String line = null; try { DatabaseMetaData databaseMetaData = con.getMetaData(); if (!databaseMetaData.supportsBatchUpdates()) { if (_log.isDebugEnabled()) { _log.debug("Database does not support batch updates"); } } int count = 0; while ((line = unsyncBufferedReader.readLine()) != null) { String[] values = StringUtil.split(line); Object[][] columns = getColumns(); if ((values.length) != (columns.length)) { throw new UpgradeException("Column lengths differ between temp file and schema. " + "Attempted to insert row " + line + "."); } if (count == 0) { ps = con.prepareStatement(insertSQL); } int[] order = getOrder(); for (int i = 0; i < order.length; i++) { int pos = order[i]; setColumn(ps, i, (Integer) columns[pos][1], values[pos]); } if (databaseMetaData.supportsBatchUpdates()) { ps.addBatch(); if (count == BATCH_SIZE) { populateTableRows(ps, true); count = 0; } else { count++; } } else { populateTableRows(ps, false); } } if (databaseMetaData.supportsBatchUpdates()) { if (count != 0) { populateTableRows(ps, true); } } } finally { DataAccess.cleanUp(null, ps); unsyncBufferedReader.close(); } if (_log.isDebugEnabled()) { _log.debug(getTableName() + " table populated with data"); } }
From source file:com.flexive.ejb.beans.configuration.GlobalConfigurationEngineBean.java
/** * {@inheritDoc}//from w ww. j ava2 s.c o m */ @Override @TransactionAttribute(TransactionAttributeType.REQUIRED) public DivisionData createDivisionData(int divisionId, String dataSource, String domainRegEx) { String dbVendor = "unknown"; String dbVersion = "unknown"; String dbDriverVersion = "unknown"; boolean available = false; Connection con = null; try { // lookup non-transactional datasource to avoid issues with the default JEE6 data source in Glassfish con = Database.getDataSource(dataSource + "NoTX").getConnection(); DatabaseMetaData dbmd = con.getMetaData(); dbVendor = dbmd.getDatabaseProductName(); dbVersion = dbmd.getDatabaseProductVersion(); dbDriverVersion = dbmd.getDriverName() + " " + dbmd.getDriverVersion(); available = true; } catch (NamingException e) { LOG.error("Failed to get datasource " + dataSource + " (flagged inactive)"); } catch (SQLException e) { if (LOG.isDebugEnabled()) { LOG.debug("Failed to get database meta information: " + e.getMessage(), e); } } finally { Database.closeObjects(GlobalConfigurationEngineBean.class, con, null); } return new DivisionData(divisionId, available, dataSource, domainRegEx, dbVendor, dbVersion, dbDriverVersion); }
From source file:org.syncope.core.util.ImportExport.java
private List<String> sortByForeignKeys(final Connection conn, final Set<String> tableNames) throws SQLException, CycleInMultiParentTreeException { MultiParentNode<String> root = new MultiParentNode<String>(ROOT_ELEMENT); for (String tableName : tableNames) { MultiParentNode<String> node = MultiParentNodeOp.findInTree(root, tableName); if (node == null) { node = new MultiParentNode<String>(tableName); root.addChild(node);/*from ww w .j a v a 2 s . co m*/ } ResultSet rs = null; try { rs = conn.getMetaData().getExportedKeys(conn.getCatalog(), readSchema(), tableName); while (rs.next()) { String fkTableName = rs.getString("FKTABLE_NAME"); if (!tableName.equals(fkTableName)) { MultiParentNode<String> fkNode = MultiParentNodeOp.findInTree(root, fkTableName); if (fkNode == null) { fkNode = new MultiParentNode<String>(fkTableName); root.addChild(fkNode); } fkNode.addChild(node); if (root.isParent(node)) { root.removeChild(node); } } } } finally { if (rs != null) { try { rs.close(); } catch (SQLException e) { LOG.error("While closing tables result set", e); } } } } List<String> sortedTableNames = new ArrayList<String>(tableNames.size()); MultiParentNodeOp.traverseTree(root, sortedTableNames); return sortedTableNames.subList(0, sortedTableNames.size() - 1); }
From source file:eionet.cr.dao.virtuoso.VirtuosoStagingDatabaseDAO.java
/** * Deletes the given databases from Virtuoso. * * @param dbNames/*from w w w . j a v a 2 s . co m*/ * The database names. * @param conn * The SQL connection to use. * @throws SQLException * When database access error happens. */ private void deleteDatabases(List<String> dbNames, Connection conn) throws SQLException { HashMap<String, HashSet<String>> dbTables = new HashMap<String, HashSet<String>>(); String userName = conn.getMetaData().getUserName(); // First, get the tables present in each of the given databases. ResultSet rs = null; try { for (String dbName : dbNames) { HashSet<String> tables = new HashSet<String>(); rs = conn.getMetaData().getTables(dbName, userName, null, null); while (rs.next()) { tables.add(rs.getString("TABLE_NAME")); } if (!tables.isEmpty()) { dbTables.put(dbName, tables); } } } finally { SQLUtil.close(rs); } // Second, loop through the above-found tables, delete each. for (Entry<String, HashSet<String>> entry : dbTables.entrySet()) { String dbName = entry.getKey(); HashSet<String> tables = entry.getValue(); for (String tableName : tables) { SQLUtil.executeUpdate("drop table " + dbName + "." + userName + ".\"" + tableName + "\"", conn); } } }
From source file:it.doqui.index.ecmengine.business.personalization.multirepository.bootstrap.SchemaBootstrap.java
/** * @return Returns the number of applied patches *//*from w w w .j a va2 s.com*/ private int countAppliedPatches(Connection connection) throws Exception { DatabaseMetaData dbMetadata = connection.getMetaData(); ResultSet tableRs = dbMetadata.getTables(null, null, "%", null); boolean newPatchTable = false; boolean oldPatchTable = false; try { while (tableRs.next()) { String tableName = tableRs.getString("TABLE_NAME"); if (tableName.equalsIgnoreCase("applied_patch")) { oldPatchTable = true; break; } else if (tableName.equalsIgnoreCase("alf_applied_patch")) { newPatchTable = true; break; } } } finally { try { tableRs.close(); } catch (Throwable e) { e.printStackTrace(); } } if (newPatchTable) { Statement stmt = connection.createStatement(); try { ResultSet rs = stmt.executeQuery("select count(id) from alf_applied_patch"); rs.next(); int count = rs.getInt(1); return count; } finally { try { stmt.close(); } catch (Throwable e) { } } } else if (oldPatchTable) { // found the old style table name Statement stmt = connection.createStatement(); try { ResultSet rs = stmt.executeQuery("select count(id) from applied_patch"); rs.next(); int count = rs.getInt(1); return count; } finally { try { stmt.close(); } catch (Throwable e) { } } } else { // The applied patches table is not present throw new NoSchemaException(); } }
From source file:it.unibas.spicy.persistence.relational.DAORelational.java
@SuppressWarnings("unchecked") public void loadInstanceSample(AccessConfiguration accessConfiguration, IDataSourceProxy dataSource, DBFragmentDescription dataDescription, IConnectionFactory dataSourceDB, String rootLabel, boolean translated) throws DAOException { String catalog = null;// w w w.j av a 2 s . c o m INode root = null; String schemaName = accessConfiguration.getSchemaName(); DatabaseMetaData databaseMetaData = null; Connection connection = dataSourceDB.getConnection(accessConfiguration); try { databaseMetaData = connection.getMetaData(); catalog = connection.getCatalog(); if (catalog == null) { catalog = accessConfiguration.getUri(); if (logger.isDebugEnabled()) logger.debug("Catalog is null. Catalog name will be: " + catalog); } if (logger.isDebugEnabled()) logger.debug("Catalog: " + catalog); if (rootLabel == null) { root = new TupleNode(DAORelationalUtility.getNode(catalog).getLabel(), getOID()); } else { this.dataDescription = dataDescription; root = new TupleNode(rootLabel, getOID()); } root.setRoot(true); String[] tableTypes = new String[] { "TABLE" }; ResultSet tableResultSet = databaseMetaData.getTables(catalog, schemaName, null, tableTypes); while (tableResultSet.next()) { String tableName = tableResultSet.getString("TABLE_NAME"); if (!this.dataDescription.checkLoadTable(tableName)) { continue; } SetNode setTable = new SetNode(DAORelationalUtility.getNode(tableName).getLabel(), getOID()); ////INode setTable = new SetNode(tableName, getOID()); //keep the number of instances as information on the Set node String tablePath; //mysql driver if (accessConfiguration.getDriver().equalsIgnoreCase(SpicyEngineConstants.MYSQL_DRIVER)) { tablePath = catalog + "." + tableName; } else { tablePath = "\"" + schemaName + "\".\"" + tableName + "\""; } Statement statement = connection.createStatement(); ResultSet countResult = statement .executeQuery("SELECT COUNT(*) AS instancesCount FROM " + tablePath + ";"); int noOfRows = 0; while (countResult.next()) { noOfRows = countResult.getInt("instancesCount"); } setTable.setFullSize(noOfRows); countResult.close(); if (logger.isDebugEnabled()) logger.debug("extracting value for table " + tableName + " ...."); getInstanceByTable(dataSourceDB, connection, schemaName, tableName, setTable, dataSource, translated); root.addChild(setTable); } int childrenNo = 0; for (INode setnode : root.getChildren()) { childrenNo = childrenNo + setnode.getChildren().size(); } //if there are any instances if (childrenNo > 0) { //load only a sample of the instances to memory to show on MIPMap interface dataSource.addInstanceWithCheck(root); } } catch (SQLException | DAOException ex) { logger.error(ex); throw new DAOException(ex.getMessage()); } finally { if (connection != null) dataSourceDB.close(connection); } }
From source file:io.github.retz.db.Database.java
boolean allTableExists(Connection conn) throws SQLException { DatabaseMetaData meta = conn.getMetaData(); // PostgreSQL accepts only lower case names while // H2DB holds such names with upper case names. WHAT IS THE HELL JDBC // TODO: add PostgreSQL inttest boolean userTableExists = tableExists(meta, "public", "users") || tableExists(meta, "PUBLIC", "USERS"); boolean applicationTableExists = tableExists(meta, "public", "applications") || tableExists(meta, "PUBLIC", "APPLICATIONS"); boolean jobTableExists = tableExists(meta, "public", "jobs") || tableExists(meta, "PUBLIC", "JOBS"); boolean propTableExists = tableExists(meta, "public", "properties") || tableExists(meta, "PUBLIC", "PROPERTIES"); if (userTableExists && applicationTableExists && jobTableExists && propTableExists) { return true; } else if (!userTableExists && !applicationTableExists && !jobTableExists && !propTableExists) { return false; } else {/*from w w w . j av a2s . co m*/ throw new RuntimeException("Database is partially ready: quitting"); } }
From source file:com.atlassian.jira.util.BugzillaImportBean.java
private static boolean tableHasColumn(final Connection conn, final String table, final String column) throws SQLException { final ResultSet rs = conn.getMetaData().getColumns(null, null, table, column); final boolean next = rs.next(); rs.close();/*w w w . j a va 2 s . c o m*/ return next; }
From source file:it.unibas.spicy.persistence.relational.DAORelational.java
public IDataSourceProxy loadSchemaForWeb(int scenarioNo, AccessConfiguration accessConfiguration, DBFragmentDescription dataDescription, IConnectionFactory dataSourceDB, boolean source) throws DAOException { this.dataDescription = dataDescription; INode root = null;//from w ww.j av a2 s . c om String catalog = null; String schemaName = accessConfiguration.getSchemaName(); DatabaseMetaData databaseMetaData = null; Connection connection = dataSourceDB.getConnection(accessConfiguration); IDataSourceProxy dataSource = null; try { databaseMetaData = connection.getMetaData(); catalog = connection.getCatalog(); if (catalog == null) { catalog = accessConfiguration.getUri(); if (logger.isDebugEnabled()) logger.debug("Catalog is null. Catalog name will be: " + catalog); } root = this.createRootNode(catalog); String[] tableTypes = new String[] { "TABLE" }; ResultSet tableResultSet = databaseMetaData.getTables(catalog, schemaName, null, tableTypes); while (tableResultSet.next()) { String tableName = tableResultSet.getString("TABLE_NAME"); if (!this.dataDescription.checkLoadTable(tableName)) { continue; } INode setTable = new SetNode(tableName); setTable.addChild(getTuple(databaseMetaData, catalog, schemaName, tableName)); setTable.setRequired(false); setTable.setNotNull(true); root.addChild(setTable); addNode(tableName, setTable); } dataSource = new ConstantDataSourceProxy(new DataSource(SpicyEngineConstants.TYPE_RELATIONAL, root)); dataSource.addAnnotation(SpicyEngineConstants.ACCESS_CONFIGURATION, accessConfiguration); for (Map.Entry<String, String> entry : changedColumnNames.entrySet()) { dataSource.putChangedValue(entry.getKey(), entry.getValue()); } loadPrimaryKeys(dataSource, databaseMetaData, catalog, schemaName, source, null, scenarioNo, true); loadForeignKeys(dataSource, databaseMetaData, catalog, schemaName, source, scenarioNo); } catch (Throwable ex) { logger.error(ex); throw new DAOException(ex.getMessage()); } finally { if (connection != null) dataSourceDB.close(connection); } return dataSource; }
From source file:com.xpn.xwiki.internal.store.hibernate.HibernateStore.java
/** * Retrieve metadata about the database used (name, version, etc). * <p>/*w w w . java2 s. co m*/ * Note that the database metadata is not cached and it's retrieved at each call. If all you need is the database * product name you should use {@link #getDatabaseProductName()} instead, which is cached. * </p> * * @return the database meta data or null if an error occurred * @since 6.1M1 */ public DatabaseMetaData getDatabaseMetaData() { DatabaseMetaData result; Connection connection = null; // Note that we need to do the cast because this is how Hibernate suggests to get the Connection Provider. // See http://bit.ly/QAJXlr ConnectionProvider connectionProvider = ((SessionFactoryImplementor) getSessionFactory()) .getConnectionProvider(); try { connection = connectionProvider.getConnection(); result = connection.getMetaData(); } catch (SQLException ignored) { result = null; } finally { if (connection != null) { try { connectionProvider.closeConnection(connection); } catch (SQLException ignored) { // Ignore } } } return result; }