Example usage for java.sql Connection unwrap

List of usage examples for java.sql Connection unwrap

Introduction

In this page you can find the example usage for java.sql Connection unwrap.

Prototype

<T> T unwrap(java.lang.Class<T> iface) throws java.sql.SQLException;

Source Link

Document

Returns an object that implements the given interface to allow access to non-standard methods, or standard methods not exposed by the proxy.

Usage

From source file:herddb.cli.HerdDBCLI.java

private static void performRestore(String file, String leader, String newschema, Options options,
        final Statement statement, final Connection connection) throws SQLException, Exception {
    if (file.isEmpty()) {
        println("Please provide --file option");
        failAndPrintHelp(options);//from w  w w  .j  a  va 2  s . c o m
        return;
    }
    Path inputfile = Paths.get(file).toAbsolutePath();
    if (leader.isEmpty() || newschema.isEmpty()) {
        println("options 'newleader' and 'newschema' are required");
        failAndPrintHelp(options);
        return;
    }
    List<String> nodes = new ArrayList<>();
    try (ResultSet rs = statement.executeQuery("SELECT nodeid FROM sysnodes")) {
        while (rs.next()) {
            String nodeid = rs.getString(1);
            nodes.add(nodeid);
        }
    }
    println("Restoring tablespace " + newschema + " with leader " + leader + " from file " + inputfile);
    if (!nodes.contains(leader)) {
        println("There is no node with node id '" + leader + "'");
        println("Valid nodes:");
        for (String nodeid : nodes) {
            println("* " + nodeid);
        }
        return;
    }
    try (InputStream fin = wrapStream(file, Files.newInputStream(inputfile));
            InputStream bin = new BufferedInputStream(fin, 16 * 1024 * 1024)) {
        HerdDBConnection hcon = connection.unwrap(HerdDBConnection.class);
        HDBConnection hdbconnection = hcon.getConnection();
        BackupUtils.restoreTableSpace(newschema, leader, hdbconnection, bin, new ProgressListener() {
            @Override
            public void log(String actionType, String message, Map<String, Object> context) {
                println(message);
            }

        });
    }
    println("Restore finished");
}

From source file:com.frameworkset.commons.dbcp2.BasicDataSource.java

/**
 * Manually invalidates a connection, effectively requesting the pool to try
 * to close it, remove it from the pool and reclaim pool capacity.
 *
 * @throws IllegalStateException/*  w  w w .  ja  v  a  2s  .  c  o m*/
 *             if invalidating the connection failed.
 * @since 2.1
 */
public void invalidateConnection(Connection connection) throws IllegalStateException {
    if (connection == null) {
        return;
    }
    if (connectionPool == null) {
        throw new IllegalStateException("Cannot invalidate connection: ConnectionPool is null.");
    }

    final PoolableConnection poolableConnection;
    try {
        poolableConnection = connection.unwrap(PoolableConnection.class);
        if (poolableConnection == null) {
            throw new IllegalStateException(
                    "Cannot invalidate connection: Connection is not a poolable connection.");
        }
    } catch (SQLException e) {
        throw new IllegalStateException("Cannot invalidate connection: Unwrapping poolable connection failed.",
                e);
    }

    // attempt to close the connection for good measure
    try {
        connection.close();
    } catch (Exception e) {
        // ignore any exceptions here
    }

    try {
        connectionPool.invalidateObject(poolableConnection);
    } catch (Exception e) {
        throw new IllegalStateException("Invalidating connection threw unexpected exception", e);
    }
}

From source file:org.apache.phoenix.end2end.DateTimeIT.java

public void testDateFormatTimeZone(String timeZoneId) throws Exception {
    Properties props = new Properties();
    props.setProperty("phoenix.query.dateFormatTimeZone", timeZoneId);
    Connection conn1 = DriverManager.getConnection(getUrl(), props);

    String tableName = generateUniqueName();
    String ddl = "CREATE TABLE IF NOT EXISTS " + tableName + " (k1 INTEGER PRIMARY KEY," + " v_date DATE,"
            + " v_time TIME," + " v_timestamp TIMESTAMP)";
    try {//  ww  w.j a v  a2  s .  c  o  m
        conn1.createStatement().execute(ddl);

        PhoenixConnection pConn = conn1.unwrap(PhoenixConnection.class);
        verifyTimeZoneIDWithConn(pConn, PDate.INSTANCE, timeZoneId);
        verifyTimeZoneIDWithConn(pConn, PTime.INSTANCE, timeZoneId);
        verifyTimeZoneIDWithConn(pConn, PTimestamp.INSTANCE, timeZoneId);

        Calendar cal = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId));
        cal.setTime(date);
        String dateStr = DateUtil.getDateFormatter(DateUtil.DEFAULT_MS_DATE_FORMAT).format(date);

        String dml = "UPSERT INTO " + tableName + " VALUES (" + "1," + "'" + dateStr + "'," + "'" + dateStr
                + "'," + "'" + dateStr + "'" + ")";
        conn1.createStatement().execute(dml);
        conn1.commit();

        PhoenixStatement stmt = conn1.createStatement().unwrap(PhoenixStatement.class);
        ResultSet rs = stmt.executeQuery("SELECT v_date, v_time, v_timestamp FROM " + tableName);

        assertTrue(rs.next());
        assertEquals(rs.getDate(1).toString(), new Date(cal.getTimeInMillis()).toString());
        assertEquals(rs.getTime(2).toString(), new Time(cal.getTimeInMillis()).toString());
        assertEquals(rs.getTimestamp(3).getTime(), cal.getTimeInMillis());
        assertFalse(rs.next());

        StatementContext stmtContext = stmt.getQueryPlan().getContext();
        verifyTimeZoneIDWithFormatter(stmtContext.getDateFormatter(), timeZoneId);
        verifyTimeZoneIDWithFormatter(stmtContext.getTimeFormatter(), timeZoneId);
        verifyTimeZoneIDWithFormatter(stmtContext.getTimestampFormatter(), timeZoneId);

        stmt.close();
    } finally {
        conn1.close();
    }
}

From source file:org.apache.phoenix.end2end.index.InvalidIndexStateClientSideIT.java

@Test
public void testCachedConnections() throws Throwable {
    final String schemaName = generateUniqueName();
    final String tableName = generateUniqueName();
    final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
    final String indexName = generateUniqueName();
    final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
    final Connection conn = DriverManager.getConnection(getUrl());

    // create table and indices
    String createTableSql = "CREATE TABLE " + fullTableName
            + "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, v2 INTEGER, v3 INTEGER)";
    conn.createStatement().execute(createTableSql);
    conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName + "(v1)");
    conn.commit();//from  www.j a v  a 2s  .  c o m
    PhoenixConnection phoenixConn = conn.unwrap(PhoenixConnection.class);
    ConnectionQueryServices queryServices = phoenixConn.getQueryServices();
    Table metaTable = phoenixConn.getQueryServices()
            .getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
    long ts = EnvironmentEdgeManager.currentTimeMillis();
    MutationCode code = IndexUtil.updateIndexState(fullIndexName, ts, metaTable, PIndexState.PENDING_DISABLE)
            .getMutationCode();
    assertEquals(MutationCode.TABLE_ALREADY_EXISTS, code);
    ts = EnvironmentEdgeManager.currentTimeMillis();

    final byte[] schemaBytes = PVarchar.INSTANCE.toBytes(schemaName);
    final byte[] tableBytes = PVarchar.INSTANCE.toBytes(tableName);
    PName tenantId = phoenixConn.getTenantId();
    final long tableTimestamp = HConstants.LATEST_TIMESTAMP;
    long tableResolvedTimestamp = HConstants.LATEST_TIMESTAMP;
    final long resolvedTimestamp = tableResolvedTimestamp;
    final byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes();
    byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
    Batch.Call<MetaDataService, MetaDataResponse> callable = new Batch.Call<MetaDataService, MetaDataResponse>() {
        @Override
        public MetaDataResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
            GetTableRequest.Builder builder = GetTableRequest.newBuilder();
            builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
            builder.setSchemaName(ByteStringer.wrap(schemaBytes));
            builder.setTableName(ByteStringer.wrap(tableBytes));
            builder.setTableTimestamp(tableTimestamp);
            builder.setClientTimestamp(resolvedTimestamp);
            builder.setClientVersion(
                    VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, 13, PHOENIX_PATCH_NUMBER));
            builder.setSkipAddingParentColumns(false);
            builder.setSkipAddingIndexes(false);
            instance.getTable(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    };
    int version = VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, 13, PHOENIX_PATCH_NUMBER);
    LOG.info("Client version: " + version);
    Table ht = queryServices.getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
    try {
        final Map<byte[], MetaDataResponse> results = ht.coprocessorService(MetaDataService.class, tableKey,
                tableKey, callable);

        assert (results.size() == 1);
        MetaDataResponse result = results.values().iterator().next();
        assert (result.getTable().getIndexesCount() == 1);
        assert (PIndexState.valueOf(result.getTable().getIndexes(0).getIndexState())
                .equals(PIndexState.DISABLE));
    } catch (Exception e) {
        LOG.error("Exception Occurred: " + e);

    } finally {
        Closeables.closeQuietly(ht);
    }

}

From source file:org.apache.phoenix.mapreduce.index.IndexTool.java

@Override
public int run(String[] args) throws Exception {
    Connection connection = null;
    try {/*from  w w w . ja v  a 2  s  .  com*/
        CommandLine cmdLine = null;
        try {
            cmdLine = parseOptions(args);
        } catch (IllegalStateException e) {
            printHelpAndExit(e.getMessage(), getOptions());
        }
        final Configuration configuration = HBaseConfiguration.addHbaseResources(getConf());
        final String schemaName = cmdLine.getOptionValue(SCHEMA_NAME_OPTION.getOpt());
        final String dataTable = cmdLine.getOptionValue(DATA_TABLE_OPTION.getOpt());
        final String indexTable = cmdLine.getOptionValue(INDEX_TABLE_OPTION.getOpt());
        final String qDataTable = SchemaUtil.getTableName(schemaName, dataTable);
        final String qIndexTable = SchemaUtil.getTableName(schemaName, indexTable);

        connection = ConnectionUtil.getInputConnection(configuration);
        if (!isValidIndexTable(connection, qDataTable, indexTable)) {
            throw new IllegalArgumentException(
                    String.format(" %s is not an index table for %s ", qIndexTable, qDataTable));
        }

        final PTable pdataTable = PhoenixRuntime.getTable(connection, qDataTable);
        final PTable pindexTable = PhoenixRuntime.getTable(connection, qIndexTable);

        // this is set to ensure index tables remains consistent post population.
        long indxTimestamp = pindexTable.getTimeStamp();
        configuration.set(PhoenixConfigurationUtil.CURRENT_SCN_VALUE, Long.toString(indxTimestamp + 1));

        // check if the index type is LOCAL, if so, derive and set the physicalIndexName that is
        // computed from the qDataTable name.
        String physicalIndexTable = qIndexTable;
        if (IndexType.LOCAL.equals(pindexTable.getIndexType())) {
            physicalIndexTable = MetaDataUtil.getLocalIndexTableName(qDataTable);
        }

        final PhoenixConnection pConnection = connection.unwrap(PhoenixConnection.class);
        final PostIndexDDLCompiler ddlCompiler = new PostIndexDDLCompiler(pConnection,
                new TableRef(pdataTable));
        ddlCompiler.compile(pindexTable);

        final List<String> indexColumns = ddlCompiler.getIndexColumnNames();
        final String selectQuery = ddlCompiler.getSelectQuery();
        final String upsertQuery = QueryUtil.constructUpsertStatement(qIndexTable, indexColumns, Hint.NO_INDEX);

        configuration.set(PhoenixConfigurationUtil.UPSERT_STATEMENT, upsertQuery);
        PhoenixConfigurationUtil.setPhysicalTableName(configuration, physicalIndexTable);
        PhoenixConfigurationUtil.setOutputTableName(configuration, indexTable);
        PhoenixConfigurationUtil.setUpsertColumnNames(configuration,
                indexColumns.toArray(new String[indexColumns.size()]));
        final List<ColumnInfo> columnMetadataList = PhoenixRuntime.generateColumnInfo(connection, qIndexTable,
                indexColumns);
        ColumnInfoToStringEncoderDecoder.encode(configuration, columnMetadataList);

        final Path outputPath = new Path(cmdLine.getOptionValue(OUTPUT_PATH_OPTION.getOpt()),
                physicalIndexTable);
        FileSystem.get(configuration).delete(outputPath, true);

        final String jobName = String.format(INDEX_JOB_NAME_TEMPLATE, dataTable, indexTable);
        final Job job = Job.getInstance(configuration, jobName);
        job.setJarByClass(IndexTool.class);
        job.setMapOutputKeyClass(ImmutableBytesWritable.class);
        FileOutputFormat.setOutputPath(job, outputPath);

        PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, qDataTable, selectQuery);
        TableMapReduceUtil.initCredentials(job);

        boolean useDirectApi = cmdLine.hasOption(DIRECT_API_OPTION.getOpt());
        if (useDirectApi) {
            job.setMapperClass(PhoenixIndexImportDirectMapper.class);
            configureSubmittableJobUsingDirectApi(job, outputPath,
                    cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt()));
        } else {
            job.setMapperClass(PhoenixIndexImportMapper.class);
            configureRunnableJobUsingBulkLoad(job, outputPath);
            // finally update the index state to ACTIVE.
            IndexToolUtil.updateIndexState(connection, qDataTable, indexTable, PIndexState.ACTIVE);
        }
        return 0;
    } catch (Exception ex) {
        LOG.error(" An exception occured while performing the indexing job : "
                + ExceptionUtils.getStackTrace(ex));
        return -1;
    } finally {
        try {
            if (connection != null) {
                connection.close();
            }
        } catch (SQLException sqle) {
            LOG.error(" Failed to close connection ", sqle.getMessage());
            throw new RuntimeException("Failed to close connection");
        }
    }
}

From source file:org.apache.phoenix.mapreduce.OrphanViewTool.java

/**
 * Examples for input arguments:// w  w w.  j a v a2  s .  com
 * -c : cleans orphan views
 * -c -op /tmp/ : cleans orphan views and links, and logs their names to the files named Orphan*.txt in /tmp/
 * -i : identifies orphan views and links, and prints their names on the console
 * -i -op /tmp/ : identifies orphan views and links, and logs the name of their names to files named Orphan*.txt in /tmp/
 * -c -ip /tmp/ : cleans the views listed in files at /tmp/
 */
@Override
public int run(String[] args) throws Exception {
    Connection connection = null;
    try {
        final Configuration configuration = HBaseConfiguration.addHbaseResources(getConf());

        try {
            parseOptions(args);
        } catch (IllegalStateException e) {
            printHelpAndExit(e.getMessage(), getOptions());
        }
        if (outputPath != null) {
            // Create files to log orphan views and links
            for (int i = VIEW; i < ORPHAN_TYPE_COUNT; i++) {
                File file = new File(outputPath + fileName[i]);
                if (file.exists()) {
                    file.delete();
                }
                file.createNewFile();
                writer[i] = new BufferedWriter(new FileWriter(file));
            }
        }
        Properties props = new Properties();
        long scn = System.currentTimeMillis() - ageMs;
        props.setProperty("CurrentSCN", Long.toString(scn));
        connection = ConnectionUtil.getInputConnection(configuration, props);
        PhoenixConnection phoenixConnection = connection.unwrap(PhoenixConnection.class);
        identifyOrphanViews(phoenixConnection);
        if (clean) {
            // Close the connection with SCN
            phoenixConnection.close();
            connection = ConnectionUtil.getInputConnection(configuration);
            phoenixConnection = connection.unwrap(PhoenixConnection.class);
            // Take a snapshot of system tables to be modified
            createSnapshot(phoenixConnection, scn);
        }
        for (Map.Entry<Key, View> entry : orphanViewSet.entrySet()) {
            try {
                dropOrLogOrphanViews(phoenixConnection, configuration, entry.getKey());
            } catch (Exception e) {
                // Ignore
            }
        }
        ;
        if (clean) {
            // Wait for the view drop tasks in the SYSTEM.TASK table to be processed
            long timeInterval = configuration.getLong(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB,
                    QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS);
            Thread.sleep(maxViewLevel * timeInterval);
            // Clean up any remaining orphan view records from system tables
            for (Map.Entry<Key, View> entry : orphanViewSet.entrySet()) {
                try {
                    forcefullyDropView(phoenixConnection, entry.getKey());
                } catch (Exception e) {
                    // Ignore
                }
            }
            ;
        }
        if (inputPath == null) {
            removeOrLogOrphanLinks(phoenixConnection);
        } else {
            readAndRemoveOrphanLinks(phoenixConnection);
        }
        return 0;
    } catch (Exception ex) {
        LOG.error("Orphan View Tool : An exception occurred " + ExceptionUtils.getMessage(ex) + " at:\n"
                + ExceptionUtils.getStackTrace(ex));
        return -1;
    } finally {
        closeConnectionAndFiles(connection);
    }
}

From source file:org.apache.phoenix.monitoring.PhoenixMetricsIT.java

@Test
public void testMetricsForUpsert() throws Exception {
    String tableName = generateUniqueName();
    String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)"
            + " SALT_BUCKETS = 6";
    Connection ddlConn = DriverManager.getConnection(getUrl());
    ddlConn.createStatement().execute(ddl);
    ddlConn.close();//  w w  w .j a va 2  s  . c  o  m

    int numRows = 10;
    Connection conn = insertRowsInTable(tableName, numRows);
    PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
    Map<String, Map<MetricType, Long>> mutationMetrics = PhoenixRuntime
            .getWriteMetricInfoForMutationsSinceLastReset(pConn);
    for (Entry<String, Map<MetricType, Long>> entry : mutationMetrics.entrySet()) {
        String t = entry.getKey();
        assertEquals("Table names didn't match!", tableName, t);
        Map<MetricType, Long> p = entry.getValue();
        assertEquals("There should have been four metrics", 4, p.size());
        boolean mutationBatchSizePresent = false;
        boolean mutationCommitTimePresent = false;
        boolean mutationBytesPresent = false;
        boolean mutationBatchFailedPresent = false;
        for (Entry<MetricType, Long> metric : p.entrySet()) {
            MetricType metricType = metric.getKey();
            long metricValue = metric.getValue();
            if (metricType.equals(MetricType.MUTATION_BATCH_SIZE)) {
                assertEquals("Mutation batch sizes didn't match!", numRows, metricValue);
                mutationBatchSizePresent = true;
            } else if (metricType.equals(MetricType.MUTATION_COMMIT_TIME)) {
                assertTrue("Mutation commit time should be greater than zero", metricValue > 0);
                mutationCommitTimePresent = true;
            } else if (metricType.equals(MetricType.MUTATION_BYTES)) {
                assertTrue("Mutation bytes size should be greater than zero", metricValue > 0);
                mutationBytesPresent = true;
            } else if (metricType.equals(MetricType.MUTATION_BATCH_FAILED_SIZE)) {
                assertEquals("Zero failed mutations expected", 0, metricValue);
                mutationBatchFailedPresent = true;
            }
        }
        assertTrue(mutationBatchSizePresent);
        assertTrue(mutationCommitTimePresent);
        assertTrue(mutationBytesPresent);
        assertTrue(mutationBatchFailedPresent);
    }
    Map<String, Map<MetricType, Long>> readMetrics = PhoenixRuntime
            .getReadMetricInfoForMutationsSinceLastReset(pConn);
    assertEquals("Read metrics should be empty", 0, readMetrics.size());
}

From source file:org.apache.phoenix.monitoring.PhoenixMetricsIT.java

@Test
public void testMetricsForUpsertSelect() throws Exception {
    String tableName1 = generateUniqueName();
    long table1SaltBuckets = 6;
    String ddl = "CREATE TABLE " + tableName1 + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)"
            + " SALT_BUCKETS = " + table1SaltBuckets;
    Connection ddlConn = DriverManager.getConnection(getUrl());
    ddlConn.createStatement().execute(ddl);
    ddlConn.close();//  ww  w  .ja  v a 2s  .c om
    int numRows = 10;
    insertRowsInTable(tableName1, numRows);

    String tableName2 = generateUniqueName();
    ddl = "CREATE TABLE " + tableName2 + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = 10";
    ddlConn = DriverManager.getConnection(getUrl());
    ddlConn.createStatement().execute(ddl);
    ddlConn.close();

    Connection conn = DriverManager.getConnection(getUrl());
    String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " + tableName1;
    conn.createStatement().executeUpdate(upsertSelect);
    conn.commit();
    PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);

    Map<String, Map<MetricType, Long>> mutationMetrics = PhoenixRuntime
            .getWriteMetricInfoForMutationsSinceLastReset(pConn);
    assertMutationMetrics(tableName2, numRows, mutationMetrics);
    Map<String, Map<MetricType, Long>> readMetrics = PhoenixRuntime
            .getReadMetricInfoForMutationsSinceLastReset(pConn);
    assertReadMetricsForMutatingSql(tableName1, table1SaltBuckets, readMetrics);
}

From source file:org.apache.phoenix.monitoring.PhoenixMetricsIT.java

@Test
public void testMetricsForDelete() throws Exception {
    String tableName = generateUniqueName();
    long tableSaltBuckets = 6;
    String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)"
            + " SALT_BUCKETS = " + tableSaltBuckets;
    Connection ddlConn = DriverManager.getConnection(getUrl());
    ddlConn.createStatement().execute(ddl);
    ddlConn.close();/*from   w w  w .j a  v  a  2  s. c om*/
    int numRows = 10;
    insertRowsInTable(tableName, numRows);
    Connection conn = DriverManager.getConnection(getUrl());
    String delete = "DELETE FROM " + tableName;
    conn.createStatement().execute(delete);
    conn.commit();
    PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
    Map<String, Map<MetricType, Long>> mutationMetrics = PhoenixRuntime
            .getWriteMetricInfoForMutationsSinceLastReset(pConn);
    assertMutationMetrics(tableName, numRows, mutationMetrics);

    Map<String, Map<MetricType, Long>> readMetrics = PhoenixRuntime
            .getReadMetricInfoForMutationsSinceLastReset(pConn);
    assertReadMetricsForMutatingSql(tableName, tableSaltBuckets, readMetrics);
}

From source file:org.apache.phoenix.schema.stats.BaseStatsCollectorIT.java

private static void invalidateStats(Connection conn, String tableName) throws SQLException {
    PTable ptable = conn.unwrap(PhoenixConnection.class).getMetaDataCache()
            .getTableRef(new PTableKey(null, tableName)).getTable();
    byte[] name = ptable.getPhysicalName().getBytes();
    conn.unwrap(PhoenixConnection.class).getQueryServices()
            .invalidateStats(new GuidePostsKey(name, SchemaUtil.getEmptyColumnFamily(ptable)));
}