Example usage for java.sql DatabaseMetaData getDatabaseMajorVersion

List of usage examples for java.sql DatabaseMetaData getDatabaseMajorVersion

Introduction

In this page you can find the example usage for java.sql DatabaseMetaData getDatabaseMajorVersion.

Prototype

int getDatabaseMajorVersion() throws SQLException;

Source Link

Document

Retrieves the major version number of the underlying database.

Usage

From source file:annis.dao.SpringAnnisDao.java

@Override
public boolean checkDatabaseVersion() throws AnnisException {
    try (Connection conn = getJdbcTemplate().getDataSource().getConnection();) {

        DatabaseMetaData meta = conn.getMetaData();

        log.debug("database info [major: " + meta.getDatabaseMajorVersion() + " minor: "
                + meta.getDatabaseMinorVersion() + " complete: " + meta.getDatabaseProductVersion() + " name: "
                + meta.getDatabaseProductName() + "]");

        if (!"PostgreSQL".equalsIgnoreCase(meta.getDatabaseProductName())) {
            throw new AnnisException("You did provide a database connection to a "
                    + "database that is not PostgreSQL. Please note that this will " + "not work.");
        }/*from   www  .j a va2  s  . c  o m*/
        if (meta.getDatabaseMajorVersion() < 9
                || (meta.getDatabaseMajorVersion() == 9 && meta.getDatabaseMinorVersion() < 1)) // we urge people to use 9.2, but 9.1 should be valid as well
        {
            throw new AnnisException("Wrong PostgreSQL version installed. Please "
                    + "install at least PostgreSQL 9.2 (current installed version is "
                    + meta.getDatabaseProductVersion() + ")");
        }
    } catch (SQLException ex) {
        log.error("could not get database version", ex);
    }

    return false;
}

From source file:net.hydromatic.optiq.test.JdbcTest.java

/**
 * Make sure that the properties look sane.
 *///w w  w . j  av  a  2 s  .  c o m
@Test
public void testVersion() throws ClassNotFoundException, SQLException {
    Class.forName("net.hydromatic.optiq.jdbc.Driver");
    Connection connection = DriverManager.getConnection("jdbc:optiq:");
    OptiqConnection optiqConnection = connection.unwrap(OptiqConnection.class);
    final DatabaseMetaData metaData = optiqConnection.getMetaData();
    assertEquals("Optiq JDBC Driver", metaData.getDriverName());

    final String driverVersion = metaData.getDriverVersion();
    final int driverMajorVersion = metaData.getDriverMajorVersion();
    final int driverMinorVersion = metaData.getDriverMinorVersion();
    assertEquals(0, driverMajorVersion);
    assertEquals(4, driverMinorVersion);

    assertEquals("Optiq", metaData.getDatabaseProductName());
    final String databaseProductVersion = metaData.getDatabaseProductVersion();
    final int databaseMajorVersion = metaData.getDatabaseMajorVersion();
    assertEquals(driverMajorVersion, databaseMajorVersion);
    final int databaseMinorVersion = metaData.getDatabaseMinorVersion();
    assertEquals(driverMinorVersion, databaseMinorVersion);

    // Check how version is composed of major and minor version. Note that
    // version is stored in pom.xml; major and minor version are
    // stored in net-hydromatic-optiq-jdbc.properties.
    if (!driverVersion.endsWith("-SNAPSHOT")) {
        assertTrue(driverVersion.startsWith("0."));
        String[] split = driverVersion.split("\\.");
        assertTrue(split.length >= 2);
        assertTrue(driverVersion.startsWith(driverMajorVersion + "." + driverMinorVersion + "."));
    }
    if (!databaseProductVersion.endsWith("-SNAPSHOT")) {
        assertTrue(databaseProductVersion.startsWith("0."));
        String[] split = databaseProductVersion.split("\\.");
        assertTrue(split.length >= 2);
        assertTrue(databaseProductVersion.startsWith(databaseMajorVersion + "." + databaseMinorVersion + "."));
    }

    connection.close();
}

From source file:com.hangum.tadpole.engine.manager.TadpoleSQLManager.java

/**
 * ? DB? metadata ?./*from  w w w  .  j ava  2 s.  co  m*/
 * 
 * @param searchKey
 * @param userDB
 * @param dbMetadata
 * @return
 */
public static void setMetaData(String searchKey, final UserDBDAO userDB, DatabaseMetaData dbMetaData)
        throws Exception {
    //  ??  .
    if (userDB.getDBDefine() == DBDefine.TADPOLE_SYSTEM_DEFAULT
            || userDB.getDBDefine() == DBDefine.TADPOLE_SYSTEM_MYSQL_DEFAULT)
        return;

    String strIdentifierQuoteString = "";
    try {
        strIdentifierQuoteString = dbMetaData.getIdentifierQuoteString();
    } catch (Exception e) {
        // ignore exception, not support quoteString
    }

    // https://github.com/hangum/TadpoleForDBTools/issues/412 ? ??  .
    TadpoleMetaData tadpoleMetaData = null;
    switch (userDB.getDBDefine()) {
    case ORACLE_DEFAULT:
    case TIBERO_DEFAULT:
        tadpoleMetaData = new TadpoleMetaData(strIdentifierQuoteString,
                TadpoleMetaData.STORES_FIELD_TYPE.LOWCASE_BLANK);
        break;
    case MSSQL_DEFAULT:
    case MSSQL_8_LE_DEFAULT:
    case MYSQL_DEFAULT:
    case MARIADB_DEFAULT:
    case SQLite_DEFAULT:
        tadpoleMetaData = new TadpoleMetaData(strIdentifierQuoteString,
                TadpoleMetaData.STORES_FIELD_TYPE.BLANK);
        break;
    case POSTGRE_DEFAULT:
    case TAJO_DEFAULT:
        tadpoleMetaData = new TadpoleMetaData(strIdentifierQuoteString,
                TadpoleMetaData.STORES_FIELD_TYPE.UPPERCASE_BLANK);
        break;
    default:
        tadpoleMetaData = new TadpoleMetaData(strIdentifierQuoteString, TadpoleMetaData.STORES_FIELD_TYPE.NONE);
    }

    //      SQLConstantFactory factory = new SQLConstantFactory();
    //      SQLConstants sqlConstants = factory.getDB(userDB);
    //      tmd.setKeywords(
    //            StringUtils.replace(
    //                  sqlConstants.keyword() + "|" + sqlConstants.function() + "|" + sqlConstants.constant() + "|" +sqlConstants.variable(),
    //                  "|",
    //                  ","
    //                  )
    //            );
    // set keyword
    if (userDB.getDBDefine() == DBDefine.SQLite_DEFAULT) {
        // not support keyword http://sqlite.org/lang_keywords.html
        tadpoleMetaData.setKeywords(StringUtils.join(SQLConstants.QUOTE_SQLITE_KEYWORDS, ","));
    } else if (userDB.getDBDefine() == DBDefine.MYSQL_DEFAULT | userDB.getDBDefine() == DBDefine.MYSQL_DEFAULT
            | userDB.getDBDefine() == DBDefine.ORACLE_DEFAULT
            | userDB.getDBDefine() == DBDefine.TIBERO_DEFAULT) {
        String strFullKeywords = StringUtils.join(SQLConstants.QUOTE_MYSQL_KEYWORDS, ",") + "," + dbMetadata;
        tadpoleMetaData.setKeywords(strFullKeywords);
    } else if (userDB.getDBDefine() == DBDefine.MONGODB_DEFAULT) {
        // not support this method
        tadpoleMetaData.setKeywords("");
    } else if (userDB.getDBDefine() == DBDefine.MSSQL_8_LE_DEFAULT
            || userDB.getDBDefine() == DBDefine.MSSQL_DEFAULT) {
        String strFullKeywords = StringUtils.join(SQLConstants.QUOTE_MSSQL_KEYWORDS, ",") + ","
                + dbMetaData.getSQLKeywords();
        tadpoleMetaData.setKeywords(strFullKeywords);
    } else {
        tadpoleMetaData.setKeywords(dbMetaData.getSQLKeywords());
    }

    tadpoleMetaData.setDbMajorVersion(dbMetaData.getDatabaseMajorVersion());
    tadpoleMetaData.setMinorVersion(dbMetaData.getDatabaseMinorVersion());
    dbMetadata.put(searchKey, tadpoleMetaData);
}

From source file:org.acmsl.queryj.tools.handlers.DatabaseMetaDataRetrievalHandler.java

/**
 * Retrieves the database major version.
 * @param metaData the database metadata.
 * @return the database major version.// ww w. ja v  a 2  s. c  om
 */
protected int retrieveDatabaseMajorVersion(@NotNull final DatabaseMetaData metaData) {
    int result = -1;

    try {
        result = metaData.getDatabaseMajorVersion();
    } catch (@NotNull final SQLException sqlException) {
        @Nullable
        final Log t_Log = UniqueLogFactory.getLog(DatabaseMetaDataRetrievalHandler.class);

        if (t_Log != null) {
            t_Log.debug("Cannot retrieve database vendor's major version.", sqlException);
        }
    }

    return result;
}

From source file:org.alfresco.repo.domain.schema.DataSourceCheck.java

public void init() {
    logger.info(I18NUtil.getMessage(MSG_DB_CONNECTION, dbUrl, dbUsername));

    Connection con = null;//w  w  w  .  jav  a  2s.  c  om
    try {
        con = dataSource.getConnection();
        con.setAutoCommit(true);
        DatabaseMetaData meta = con.getMetaData();
        logger.info(I18NUtil.getMessage(MSG_DB_VERSION, meta.getDatabaseProductName(),
                meta.getDatabaseProductVersion()));

        Dialect dialect = DialectFactory.buildDialect(cfg.getProperties(), meta.getDatabaseProductName(),
                meta.getDatabaseMajorVersion());

        // Check MS SQL Server specific settings
        if (dialect instanceof SQLServerDialect) {
            if (transactionIsolation != SQL_SERVER_TRANSACTION_ISOLATION) {
                throw new AlfrescoRuntimeException(ERR_WRONG_TRANSACTION_ISOLATION_SQL_SERVER,
                        new Object[] { transactionIsolation, SQL_SERVER_TRANSACTION_ISOLATION });
            }
        }
    } catch (RuntimeException re) {
        // just rethrow
        throw re;
    } catch (Exception e) {
        throw new AlfrescoRuntimeException(ERR_DB_CONNECTION, new Object[] { e.getMessage() }, e);
    } finally {
        try {
            con.close();
        } catch (Exception e) {
        }
    }
}

From source file:org.apache.bigtop.itest.hive.TestJdbc.java

/**
 * Test simple DatabaseMetaData calls.  getColumns is tested elsewhere, as we need to call
 * that on a valid table.  Same with getFunctions.
 *
 * @throws SQLException/*ww  w .ja va2s. co  m*/
 */
@Test
public void databaseMetaDataCalls() throws SQLException {
    DatabaseMetaData md = conn.getMetaData();

    boolean boolrc = md.allTablesAreSelectable();
    LOG.debug("All tables are selectable? " + boolrc);

    String strrc = md.getCatalogSeparator();
    LOG.debug("Catalog separator " + strrc);

    strrc = md.getCatalogTerm();
    LOG.debug("Catalog term " + strrc);

    ResultSet rs = md.getCatalogs();
    while (rs.next()) {
        strrc = rs.getString(1);
        LOG.debug("Found catalog " + strrc);
    }

    Connection c = md.getConnection();

    int intrc = md.getDatabaseMajorVersion();
    LOG.debug("DB major version is " + intrc);

    intrc = md.getDatabaseMinorVersion();
    LOG.debug("DB minor version is " + intrc);

    strrc = md.getDatabaseProductName();
    LOG.debug("DB product name is " + strrc);

    strrc = md.getDatabaseProductVersion();
    LOG.debug("DB product version is " + strrc);

    intrc = md.getDefaultTransactionIsolation();
    LOG.debug("Default transaction isolation is " + intrc);

    intrc = md.getDriverMajorVersion();
    LOG.debug("Driver major version is " + intrc);

    intrc = md.getDriverMinorVersion();
    LOG.debug("Driver minor version is " + intrc);

    strrc = md.getDriverName();
    LOG.debug("Driver name is " + strrc);

    strrc = md.getDriverVersion();
    LOG.debug("Driver version is " + strrc);

    strrc = md.getExtraNameCharacters();
    LOG.debug("Extra name characters is " + strrc);

    strrc = md.getIdentifierQuoteString();
    LOG.debug("Identifier quote string is " + strrc);

    // In Hive 1.2 this always returns an empty RS
    rs = md.getImportedKeys("a", "b", "d");

    // In Hive 1.2 this always returns an empty RS
    rs = md.getIndexInfo("a", "b", "d", true, true);

    intrc = md.getJDBCMajorVersion();
    LOG.debug("JDBC major version is " + intrc);

    intrc = md.getJDBCMinorVersion();
    LOG.debug("JDBC minor version is " + intrc);

    intrc = md.getMaxColumnNameLength();
    LOG.debug("Maximum column name length is " + intrc);

    strrc = md.getNumericFunctions();
    LOG.debug("Numeric functions are " + strrc);

    // In Hive 1.2 this always returns an empty RS
    rs = md.getPrimaryKeys("a", "b", "d");

    // In Hive 1.2 this always returns an empty RS
    rs = md.getProcedureColumns("a", "b", "d", "e");

    strrc = md.getProcedureTerm();
    LOG.debug("Procedures are called " + strrc);

    // In Hive 1.2 this always returns an empty RS
    rs = md.getProcedures("a", "b", "d");

    strrc = md.getSchemaTerm();
    LOG.debug("Schemas are called " + strrc);

    rs = md.getSchemas();
    while (rs.next()) {
        strrc = rs.getString(1);
        LOG.debug("Found schema " + strrc);
    }

    strrc = md.getSearchStringEscape();
    LOG.debug("Search string escape is " + strrc);

    strrc = md.getStringFunctions();
    LOG.debug("String functions are " + strrc);

    strrc = md.getSystemFunctions();
    LOG.debug("System functions are " + strrc);

    rs = md.getTableTypes();
    while (rs.next()) {
        strrc = rs.getString(1);
        LOG.debug("Found table type " + strrc);
    }

    strrc = md.getTimeDateFunctions();
    LOG.debug("Time/date functions are " + strrc);

    rs = md.getTypeInfo();
    while (rs.next()) {
        strrc = rs.getString(1);
        LOG.debug("Found type " + strrc);
    }

    // In Hive 1.2 this always returns an empty RS
    rs = md.getUDTs("a", "b", "d", null);

    boolrc = md.supportsAlterTableWithAddColumn();
    LOG.debug("Supports alter table with add column? " + boolrc);

    boolrc = md.supportsAlterTableWithDropColumn();
    LOG.debug("Supports alter table with drop column? " + boolrc);

    boolrc = md.supportsBatchUpdates();
    LOG.debug("Supports batch updates? " + boolrc);

    boolrc = md.supportsCatalogsInDataManipulation();
    LOG.debug("Supports catalogs in data manipulation? " + boolrc);

    boolrc = md.supportsCatalogsInIndexDefinitions();
    LOG.debug("Supports catalogs in index definition? " + boolrc);

    boolrc = md.supportsCatalogsInPrivilegeDefinitions();
    LOG.debug("Supports catalogs in privilege definition? " + boolrc);

    boolrc = md.supportsCatalogsInProcedureCalls();
    LOG.debug("Supports catalogs in procedure calls? " + boolrc);

    boolrc = md.supportsCatalogsInTableDefinitions();
    LOG.debug("Supports catalogs in table definition? " + boolrc);

    boolrc = md.supportsColumnAliasing();
    LOG.debug("Supports column aliasing? " + boolrc);

    boolrc = md.supportsFullOuterJoins();
    LOG.debug("Supports full outer joins? " + boolrc);

    boolrc = md.supportsGroupBy();
    LOG.debug("Supports group by? " + boolrc);

    boolrc = md.supportsLimitedOuterJoins();
    LOG.debug("Supports limited outer joins? " + boolrc);

    boolrc = md.supportsMultipleResultSets();
    LOG.debug("Supports limited outer joins? " + boolrc);

    boolrc = md.supportsNonNullableColumns();
    LOG.debug("Supports non-nullable columns? " + boolrc);

    boolrc = md.supportsOuterJoins();
    LOG.debug("Supports outer joins? " + boolrc);

    boolrc = md.supportsPositionedDelete();
    LOG.debug("Supports positioned delete? " + boolrc);

    boolrc = md.supportsPositionedUpdate();
    LOG.debug("Supports positioned update? " + boolrc);

    boolrc = md.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT);
    LOG.debug("Supports result set holdability? " + boolrc);

    boolrc = md.supportsResultSetType(ResultSet.HOLD_CURSORS_OVER_COMMIT);
    LOG.debug("Supports result set type? " + boolrc);

    boolrc = md.supportsSavepoints();
    LOG.debug("Supports savepoints? " + boolrc);

    boolrc = md.supportsSchemasInDataManipulation();
    LOG.debug("Supports schemas in data manipulation? " + boolrc);

    boolrc = md.supportsSchemasInIndexDefinitions();
    LOG.debug("Supports schemas in index definitions? " + boolrc);

    boolrc = md.supportsSchemasInPrivilegeDefinitions();
    LOG.debug("Supports schemas in privilege definitions? " + boolrc);

    boolrc = md.supportsSchemasInProcedureCalls();
    LOG.debug("Supports schemas in procedure calls? " + boolrc);

    boolrc = md.supportsSchemasInTableDefinitions();
    LOG.debug("Supports schemas in table definitions? " + boolrc);

    boolrc = md.supportsSelectForUpdate();
    LOG.debug("Supports select for update? " + boolrc);

    boolrc = md.supportsStoredProcedures();
    LOG.debug("Supports stored procedures? " + boolrc);

    boolrc = md.supportsTransactions();
    LOG.debug("Supports transactions? " + boolrc);

    boolrc = md.supportsUnion();
    LOG.debug("Supports union? " + boolrc);

    boolrc = md.supportsUnionAll();
    LOG.debug("Supports union all? " + boolrc);

}

From source file:org.apache.ddlutils.TestSummaryCreatorTask.java

/**
 * Adds the data from the test jdbc propertis file to the document.
 * //from w  w  w .j  ava2 s. c om
 * @param element            The element to add the relevant database properties to
 * @param jdbcPropertiesFile The path of the properties file
 */
protected void addTargetDatabaseInfo(Element element, String jdbcPropertiesFile)
        throws IOException, BuildException {
    if (jdbcPropertiesFile == null) {
        return;
    }

    Properties props = readProperties(jdbcPropertiesFile);
    Connection conn = null;
    DatabaseMetaData metaData = null;

    try {
        String dataSourceClass = props.getProperty(
                TestAgainstLiveDatabaseBase.DATASOURCE_PROPERTY_PREFIX + "class",
                BasicDataSource.class.getName());
        DataSource dataSource = (DataSource) Class.forName(dataSourceClass).newInstance();

        for (Iterator it = props.entrySet().iterator(); it.hasNext();) {
            Map.Entry entry = (Map.Entry) it.next();
            String propName = (String) entry.getKey();

            if (propName.startsWith(TestAgainstLiveDatabaseBase.DATASOURCE_PROPERTY_PREFIX)
                    && !propName.equals(TestAgainstLiveDatabaseBase.DATASOURCE_PROPERTY_PREFIX + "class")) {
                BeanUtils.setProperty(dataSource,
                        propName.substring(TestAgainstLiveDatabaseBase.DATASOURCE_PROPERTY_PREFIX.length()),
                        entry.getValue());
            }
        }

        String platformName = props.getProperty(TestAgainstLiveDatabaseBase.DDLUTILS_PLATFORM_PROPERTY);

        if (platformName == null) {
            platformName = new PlatformUtils().determineDatabaseType(dataSource);
            if (platformName == null) {
                throw new BuildException(
                        "Could not determine platform from datasource, please specify it in the jdbc.properties via the ddlutils.platform property");
            }
        }

        element.addAttribute("platform", platformName);
        element.addAttribute("dataSourceClass", dataSourceClass);

        conn = dataSource.getConnection();
        metaData = conn.getMetaData();

        try {
            element.addAttribute("dbProductName", metaData.getDatabaseProductName());
        } catch (Throwable ex) {
            // we ignore it
        }
        try {
            element.addAttribute("dbProductVersion", metaData.getDatabaseProductVersion());
        } catch (Throwable ex) {
            // we ignore it
        }
        try {
            int databaseMajorVersion = metaData.getDatabaseMajorVersion();
            int databaseMinorVersion = metaData.getDatabaseMinorVersion();

            element.addAttribute("dbVersion", databaseMajorVersion + "." + databaseMinorVersion);
        } catch (Throwable ex) {
            // we ignore it
        }
        try {
            element.addAttribute("driverName", metaData.getDriverName());
        } catch (Throwable ex) {
            // we ignore it
        }
        try {
            element.addAttribute("driverVersion", metaData.getDriverVersion());
        } catch (Throwable ex) {
            // we ignore it
        }
        try {
            int jdbcMajorVersion = metaData.getJDBCMajorVersion();
            int jdbcMinorVersion = metaData.getJDBCMinorVersion();

            element.addAttribute("jdbcVersion", jdbcMajorVersion + "." + jdbcMinorVersion);
        } catch (Throwable ex) {
            // we ignore it
        }
    } catch (Exception ex) {
        throw new BuildException(ex);
    } finally {
        if (conn != null) {
            try {
                conn.close();
            } catch (SQLException ex) {
                // we ignore it
            }
        }
    }
}

From source file:org.apache.druid.metadata.storage.postgresql.PostgreSQLConnector.java

protected boolean canUpsert(Handle handle) throws SQLException {
    if (canUpsert == null) {
        DatabaseMetaData metaData = handle.getConnection().getMetaData();
        canUpsert = metaData.getDatabaseMajorVersion() > 9
                || (metaData.getDatabaseMajorVersion() == 9 && metaData.getDatabaseMinorVersion() >= 5);
    }//from www .j a v  a  2  s.co m
    return canUpsert;
}

From source file:org.apache.hadoop.vertica.VerticaUtil.java

public static int verticaVersion(Configuration conf, boolean output) throws IOException {
    int ver = -1;
    try {/*w  w w  .j  a va2 s.com*/
        VerticaConfiguration vtconfig = new VerticaConfiguration(conf);
        Connection conn = vtconfig.getConnection(output);
        DatabaseMetaData dbmd = conn.getMetaData();
        ver = dbmd.getDatabaseMajorVersion() * 100;
        ver += dbmd.getDatabaseMinorVersion();
    } catch (ClassNotFoundException e) {
        throw new IOException("Vertica Driver required to use Vertica Input or Output Formatters");
    } catch (SQLException e) {
        throw new IOException(e);
    }
    return ver;
}

From source file:org.apache.hive.jdbc.TestJdbcDriver2.java

@Test
public void testDatabaseMetaData() throws SQLException {
    DatabaseMetaData meta = con.getMetaData();

    assertEquals("Apache Hive", meta.getDatabaseProductName());
    assertEquals(HiveVersionInfo.getVersion(), meta.getDatabaseProductVersion());
    assertEquals(System.getProperty("hive.version"), meta.getDatabaseProductVersion());
    assertTrue("verifying hive version pattern. got " + meta.getDatabaseProductVersion(),
            Pattern.matches("\\d+\\.\\d+\\.\\d+.*", meta.getDatabaseProductVersion()));

    assertEquals(DatabaseMetaData.sqlStateSQL99, meta.getSQLStateType());
    assertFalse(meta.supportsCatalogsInTableDefinitions());
    assertTrue(meta.supportsSchemasInTableDefinitions());
    assertTrue(meta.supportsSchemasInDataManipulation());
    assertFalse(meta.supportsMultipleResultSets());
    assertFalse(meta.supportsStoredProcedures());
    assertTrue(meta.supportsAlterTableWithAddColumn());

    //-1 indicates malformed version.
    assertTrue(meta.getDatabaseMajorVersion() > -1);
    assertTrue(meta.getDatabaseMinorVersion() > -1);
}