Example usage for java.sql Connection unwrap

List of usage examples for java.sql Connection unwrap

Introduction

In this page you can find the example usage for java.sql Connection unwrap.

Prototype

<T> T unwrap(java.lang.Class<T> iface) throws java.sql.SQLException;

Source Link

Document

Returns an object that implements the given interface to allow access to non-standard methods, or standard methods not exposed by the proxy.

Usage

From source file:org.apache.phoenix.schema.stats.BaseStatsCollectorIT.java

private void testCompactUpdatesStats(Integer statsUpdateFreq, String tableName) throws Exception {
    int nRows = 10;
    Connection conn = getConnection(statsUpdateFreq);
    PreparedStatement stmt;//from w  w w  .  j av a  2 s  .co  m
    conn.createStatement()
            .execute("CREATE TABLE " + tableName + "(k CHAR(1) PRIMARY KEY, v INTEGER, w INTEGER) "
                    + (!tableDDLOptions.isEmpty() ? tableDDLOptions + "," : "")
                    + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" + Boolean.FALSE);
    stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?,?)");
    for (int i = 0; i < nRows; i++) {
        stmt.setString(1, Character.toString((char) ('a' + i)));
        stmt.setInt(2, i);
        stmt.setInt(3, i);
        stmt.executeUpdate();
    }
    conn.commit();

    TestUtil.doMajorCompaction(conn, physicalTableName);

    if (statsUpdateFreq != 0) {
        invalidateStats(conn, tableName);
    } else {
        // Confirm that when we have a non zero STATS_UPDATE_FREQ_MS_ATTRIB, after we run
        // UPDATATE STATISTICS, the new statistics are faulted in as expected.
        List<KeyRange> keyRanges = getAllSplits(conn, tableName);
        assertNotEquals(nRows + 1, keyRanges.size());
        // If we've set MIN_STATS_UPDATE_FREQ_MS_ATTRIB, an UPDATE STATISTICS will invalidate the cache
        // and forcing the new stats to be pulled over.
        int rowCount = conn.createStatement().executeUpdate("UPDATE STATISTICS " + tableName);
        assertEquals(10, rowCount);
    }
    List<KeyRange> keyRanges = getAllSplits(conn, tableName);
    assertEquals(nRows + 1, keyRanges.size());

    int nDeletedRows = conn.createStatement()
            .executeUpdate("DELETE FROM " + tableName + " WHERE V < " + nRows / 2);
    conn.commit();
    assertEquals(5, nDeletedRows);

    Scan scan = new Scan();
    scan.setRaw(true);
    PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
    try (Table htable = phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
        ResultScanner scanner = htable.getScanner(scan);
        Result result;
        while ((result = scanner.next()) != null) {
            System.out.println(result);
        }
    }

    TestUtil.doMajorCompaction(conn, physicalTableName);

    scan = new Scan();
    scan.setRaw(true);
    phxConn = conn.unwrap(PhoenixConnection.class);
    try (Table htable = phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
        ResultScanner scanner = htable.getScanner(scan);
        Result result;
        while ((result = scanner.next()) != null) {
            System.out.println(result);
        }
    }

    if (statsUpdateFreq != 0) {
        invalidateStats(conn, tableName);
    } else {
        assertEquals(nRows + 1, keyRanges.size());
        // If we've set STATS_UPDATE_FREQ_MS_ATTRIB, an UPDATE STATISTICS will invalidate the cache
        // and force us to pull over the new stats
        int rowCount = conn.createStatement().executeUpdate("UPDATE STATISTICS " + tableName);
        assertEquals(5, rowCount);
    }
    keyRanges = getAllSplits(conn, tableName);
    assertEquals(nRows / 2 + 1, keyRanges.size());
    ResultSet rs = conn.createStatement()
            .executeQuery("SELECT SUM(GUIDE_POSTS_ROW_COUNT) FROM " + "\"" + SYSTEM_CATALOG_SCHEMA + "\".\""
                    + SYSTEM_STATS_TABLE + "\"" + " WHERE PHYSICAL_NAME='" + physicalTableName + "'");
    rs.next();
    assertEquals(nRows - nDeletedRows, rs.getLong(1));
}

From source file:org.apache.phoenix.schema.stats.BaseStatsCollectorIT.java

@Test
public void testWithMultiCF() throws Exception {
    int nRows = 20;
    Connection conn = getConnection(0);
    PreparedStatement stmt;/* w  w  w.j a  v  a2s.com*/
    conn.createStatement()
            .execute("CREATE TABLE " + fullTableName
                    + "(k VARCHAR PRIMARY KEY, a.v INTEGER, b.v INTEGER, c.v INTEGER NULL, d.v INTEGER NULL) "
                    + tableDDLOptions);
    stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " VALUES(?,?, ?, ?, ?)");
    byte[] val = new byte[250];
    for (int i = 0; i < nRows; i++) {
        stmt.setString(1, Character.toString((char) ('a' + i)) + Bytes.toString(val));
        stmt.setInt(2, i);
        stmt.setInt(3, i);
        stmt.setInt(4, i);
        stmt.setInt(5, i);
        stmt.executeUpdate();
    }
    conn.commit();
    stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + "(k, c.v, d.v) VALUES(?,?,?)");
    for (int i = 0; i < 5; i++) {
        stmt.setString(1, Character.toString((char) ('a' + 'z' + i)) + Bytes.toString(val));
        stmt.setInt(2, i);
        stmt.setInt(3, i);
        stmt.executeUpdate();
    }
    conn.commit();

    ResultSet rs;
    collectStatistics(conn, fullTableName);
    List<KeyRange> keyRanges = getAllSplits(conn, fullTableName);
    assertEquals(26, keyRanges.size());
    rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
    assertEquals("CLIENT 26-CHUNK 25 ROWS "
            + (columnEncoded ? (mutable ? "12530" : "13902")
                    : (TransactionFactory.Provider.OMID.name().equals(transactionProvider)) ? "25044" : "12420")
            + " BYTES PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName, QueryUtil.getExplainPlan(rs));

    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    List<HRegionLocation> regions = services.getAllTableRegions(Bytes.toBytes(physicalTableName));
    assertEquals(1, regions.size());

    collectStatistics(conn, fullTableName, Long.toString(1000));
    keyRanges = getAllSplits(conn, fullTableName);
    boolean oneCellPerColFamliyStorageScheme = !mutable && columnEncoded;
    boolean hasShadowCells = TransactionFactory.Provider.OMID.name().equals(transactionProvider);
    assertEquals(oneCellPerColFamliyStorageScheme ? 13 : hasShadowCells ? 23 : 12, keyRanges.size());

    rs = conn.createStatement().executeQuery(
            "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH),COUNT(*) from \"SYSTEM\".STATS where PHYSICAL_NAME = '"
                    + physicalTableName + "' GROUP BY COLUMN_FAMILY ORDER BY COLUMN_FAMILY");

    assertTrue(rs.next());
    assertEquals("A", rs.getString(1));
    assertEquals(24, rs.getInt(2));
    assertEquals(columnEncoded ? (mutable ? 12252 : 13624) : hasShadowCells ? 24756 : 12144, rs.getInt(3));
    assertEquals(oneCellPerColFamliyStorageScheme ? 12 : hasShadowCells ? 22 : 11, rs.getInt(4));

    assertTrue(rs.next());
    assertEquals("B", rs.getString(1));
    assertEquals(oneCellPerColFamliyStorageScheme ? 24 : 20, rs.getInt(2));
    assertEquals(columnEncoded ? (mutable ? 5600 : 6972) : hasShadowCells ? 11260 : 5540, rs.getInt(3));
    assertEquals(oneCellPerColFamliyStorageScheme ? 6 : hasShadowCells ? 10 : 5, rs.getInt(4));

    assertTrue(rs.next());
    assertEquals("C", rs.getString(1));
    assertEquals(24, rs.getInt(2));
    assertEquals(columnEncoded ? (mutable ? 6724 : 6988) : hasShadowCells ? 13520 : 6652, rs.getInt(3));
    assertEquals(hasShadowCells ? 12 : 6, rs.getInt(4));

    assertTrue(rs.next());
    assertEquals("D", rs.getString(1));
    assertEquals(24, rs.getInt(2));
    assertEquals(columnEncoded ? (mutable ? 6724 : 6988) : hasShadowCells ? 13520 : 6652, rs.getInt(3));
    assertEquals(hasShadowCells ? 12 : 6, rs.getInt(4));

    assertFalse(rs.next());

    // Disable stats
    conn.createStatement().execute(
            "ALTER TABLE " + fullTableName + " SET " + PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH + "=0");
    collectStatistics(conn, fullTableName);
    // Assert that there are no more guideposts
    rs = conn.createStatement()
            .executeQuery("SELECT count(1) FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " WHERE "
                    + PhoenixDatabaseMetaData.PHYSICAL_NAME + "='" + physicalTableName + "' AND "
                    + PhoenixDatabaseMetaData.COLUMN_FAMILY + " IS NOT NULL");
    assertTrue(rs.next());
    assertEquals(0, rs.getLong(1));
    assertFalse(rs.next());
    rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
    assertEquals("CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName,
            QueryUtil.getExplainPlan(rs));
}

From source file:org.apache.phoenix.util.PhoenixRuntime.java

/**
 * Runs a series of semicolon-terminated SQL statements using the connection provided, returning
 * the number of SQL statements executed. Note that if the connection has specified an SCN through
 * the {@link org.apache.phoenix.util.PhoenixRuntime#CURRENT_SCN_ATTRIB} connection property, then the timestamp
 * is bumped up by one after each statement execution.
 * @param conn an open JDBC connection/*  w ww.  ja va2  s  .c o  m*/
 * @param reader a reader for semicolumn separated SQL statements
 * @param binds the binds for all statements
 * @return the number of SQL statements that were executed
 * @throws IOException
 * @throws SQLException
 */
public static int executeStatements(Connection conn, Reader reader, List<Object> binds)
        throws IOException, SQLException {
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    // Turn auto commit to true when running scripts in case there's DML
    pconn.setAutoCommit(true);
    return pconn.executeStatements(reader, binds, System.out);
}

From source file:org.apache.phoenix.util.PhoenixRuntime.java

/**
 * Get the list of uncommitted KeyValues for the connection. Currently used to write an
 * Phoenix-compliant HFile from a map/reduce job.
 * @param conn an open JDBC connection//w  w  w  . j a  v a  2 s .  com
 * @return the list of HBase mutations for uncommitted data
 * @throws SQLException
 */
public static Iterator<Pair<byte[], List<KeyValue>>> getUncommittedDataIterator(Connection conn,
        boolean includeMutableIndexes) throws SQLException {
    final PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    final Iterator<Pair<byte[], List<Mutation>>> iterator = pconn.getMutationState()
            .toMutations(includeMutableIndexes);
    return new Iterator<Pair<byte[], List<KeyValue>>>() {

        @Override
        public boolean hasNext() {
            return iterator.hasNext();
        }

        @Override
        public Pair<byte[], List<KeyValue>> next() {
            Pair<byte[], List<Mutation>> pair = iterator.next();
            List<KeyValue> keyValues = Lists.newArrayListWithExpectedSize(pair.getSecond().size() * 5); // Guess-timate 5 key values per row
            for (Mutation mutation : pair.getSecond()) {
                for (List<Cell> keyValueList : mutation.getFamilyCellMap().values()) {
                    for (Cell keyValue : keyValueList) {
                        keyValues.add(org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(keyValue));
                    }
                }
            }
            Collections.sort(keyValues, pconn.getKeyValueBuilder().getKeyValueComparator());
            return new Pair<byte[], List<KeyValue>>(pair.getFirst(), keyValues);
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }

    };
}

From source file:org.apache.phoenix.util.PhoenixRuntime.java

/**
 * /* ww w  .  jav a 2  s . c om*/
 * @param conn
 * @param name requires a pre-normalized table name or a pre-normalized schema and table name
 * @return
 * @throws SQLException
 */
public static PTable getTable(Connection conn, String name) throws SQLException {
    PTable table = null;
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    try {
        table = pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), name));
    } catch (TableNotFoundException e) {
        String schemaName = SchemaUtil.getSchemaNameFromFullName(name);
        String tableName = SchemaUtil.getTableNameFromFullName(name);
        MetaDataMutationResult result = new MetaDataClient(pconn).updateCache(schemaName, tableName);
        if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
            throw e;
        }
        table = result.getTable();
    }
    return table;
}

From source file:org.apache.phoenix.util.PhoenixRuntime.java

private static List<PColumn> getPkColumns(PTable ptable, Connection conn, boolean forDataTable)
        throws SQLException {
    PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
    List<PColumn> pkColumns = ptable.getPKColumns();

    // Skip the salting column and the view index id column if present.
    // Skip the tenant id column too if the connection is tenant specific and the table used by the query plan is multi-tenant
    int offset = (ptable.getBucketNum() == null ? 0 : 1)
            + (ptable.isMultiTenant() && pConn.getTenantId() != null ? 1 : 0)
            + (ptable.getViewIndexId() == null ? 0 : 1);

    // get a sublist of pkColumns by skipping the offset columns.
    pkColumns = pkColumns.subList(offset, pkColumns.size());

    if (ptable.getType() == PTableType.INDEX && forDataTable) {
        // index tables have the same schema name as their parent/data tables.
        String fullDataTableName = ptable.getParentName().getString();

        // Get the corresponding columns of the data table.
        List<PColumn> dataColumns = IndexUtil.getDataColumns(fullDataTableName, pkColumns, pConn);
        pkColumns = dataColumns;//  www.j av  a 2s  .  c  o m
    }
    return pkColumns;
}

From source file:org.apache.phoenix.util.PhoenixRuntime.java

/**
 * Method to expose the metrics associated with sending over mutations to HBase. These metrics are updated when
 * commit is called on the passed connection. Mutation metrics are accumulated for the connection till
 * {@link #resetMetrics(Connection)} is called or the connection is closed. Example usage:
 * //from  w w  w.  j  a va2s. co  m
 * <pre>
 * {@code
 * Map<String, Map<String, Long>> mutationWriteMetrics = null;
 * Map<String, Map<String, Long>> mutationReadMetrics = null;
 * try (Connection conn = DriverManager.getConnection(url)) {
 *    conn.createStatement.executeUpdate(dml1);
 *    ....
 *    conn.createStatement.executeUpdate(dml2);
 *    ...
 *    conn.createStatement.executeUpdate(dml3);
 *    ...
 *    conn.commit();
 *    mutationWriteMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
 *    mutationReadMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(conn);
 *    PhoenixRuntime.resetMetrics(rs);
 * }
 * </pre>
 *  
 * @param conn
 *            connection to get the metrics for
 * @return a map of (table name) -> (map of (metric name) -> (metric value))
 * @throws SQLException
 */
public static Map<String, Map<String, Long>> getWriteMetricsForMutationsSinceLastReset(Connection conn)
        throws SQLException {
    PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
    return pConn.getMutationMetrics();
}

From source file:org.apache.phoenix.util.PhoenixRuntime.java

/**
 * Method to expose the read metrics associated with executing a dml statement. These metrics are updated when
 * commit is called on the passed connection. Read metrics are accumulated till {@link #resetMetrics(Connection)} is
 * called or the connection is closed. Example usage:
 * //from   ww w  .ja v  a 2s  .c o  m
 * <pre>
 * {@code
 * Map<String, Map<String, Long>> mutationWriteMetrics = null;
 * Map<String, Map<String, Long>> mutationReadMetrics = null;
 * try (Connection conn = DriverManager.getConnection(url)) {
 *    conn.createStatement.executeUpdate(dml1);
 *    ....
 *    conn.createStatement.executeUpdate(dml2);
 *    ...
 *    conn.createStatement.executeUpdate(dml3);
 *    ...
 *    conn.commit();
 *    mutationWriteMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
 *    mutationReadMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(conn);
 *    PhoenixRuntime.resetMetrics(rs);
 * }
 * </pre> 
 * @param conn
 *            connection to get the metrics for
 * @return  a map of (table name) -> (map of (metric name) -> (metric value))
 * @throws SQLException
 */
public static Map<String, Map<String, Long>> getReadMetricsForMutationsSinceLastReset(Connection conn)
        throws SQLException {
    PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
    return pConn.getReadMetrics();
}

From source file:org.apache.phoenix.util.PhoenixRuntime.java

/**
 * Reset the mutation and reads-for-mutations metrics collected in the connection.
 * /*from  ww w . ja va 2 s .c  o  m*/
 * @see {@link #getReadMetricsForMutationsSinceLastReset(Connection)} {@link #getWriteMetricsForMutationsSinceLastReset(Connection)}
 * @param conn
 * @throws SQLException
 */
public static void resetMetrics(Connection conn) throws SQLException {
    PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
    pConn.clearMetrics();
}

From source file:org.apache.phoenix.util.TestUtil.java

public static void clearMetaDataCache(Connection conn) throws Throwable {
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    HTableInterface htable = pconn.getQueryServices()
            .getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
    htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
            new Batch.Call<MetaDataService, ClearCacheResponse>() {
                @Override//from  w w  w  . java2s . co m
                public ClearCacheResponse call(MetaDataService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<ClearCacheResponse> rpcCallback = new BlockingRpcCallback<ClearCacheResponse>();
                    ClearCacheRequest.Builder builder = ClearCacheRequest.newBuilder();
                    instance.clearCache(controller, builder.build(), rpcCallback);
                    if (controller.getFailedOn() != null) {
                        throw controller.getFailedOn();
                    }
                    return rpcCallback.get();
                }
            });
}