Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:com.thinkbiganalytics.datalake.authorization.SentryAuthorizationService.java

/**
 * If no security policy exists it will be created. If a policy exists it will be updated
 *///  w w w.  j a v a 2  s  .  c o m
@Override
public void updateSecurityGroupsForAllPolicies(String categoryName, String feedName,
        List<String> securityGroupNames, Map<String, Object> feedProperties) {
    if (this.sentryConnection.getKerberosTicketConfiguration().isKerberosEnabled()) {
        try {
            UserGroupInformation ugi = authenticatePolicyCreatorWithKerberos();
            if (ugi == null) {
                log.error(UserGroupObjectError);
            } else {
                ugi.doAs(new PrivilegedExceptionAction<Void>() {
                    @Override
                    public Void run() throws Exception {

                        if (securityGroupNames == null || securityGroupNames.isEmpty()) {

                            // Only delete if the policies exists. It's possibile that someone adds a security group right after feed creation and before initial ingestion
                            String sentryPolicyName = getHivePolicyName(categoryName, feedName);
                            if ((sentryClientObject.checkIfRoleExists(sentryPolicyName))) {
                                deleteHivePolicy(categoryName, feedName);
                            }
                            if (!StringUtils.isEmpty((String) feedProperties.get(REGISTRATION_HDFS_FOLDERS))) {
                                String hdfsFoldersWithCommas = ((String) feedProperties
                                        .get(REGISTRATION_HDFS_FOLDERS)).replace("\n", ",");
                                List<String> hdfsFolders = Arrays.asList(hdfsFoldersWithCommas.split(","))
                                        .stream().collect(Collectors.toList());
                                deleteHdfsPolicy(categoryName, feedName, hdfsFolders);
                            }

                        } else {

                            if (!StringUtils.isEmpty((String) feedProperties.get(REGISTRATION_HDFS_FOLDERS))) {
                                String hdfsFoldersWithCommas = ((String) feedProperties
                                        .get(REGISTRATION_HDFS_FOLDERS)).replace("\n", ",");
                                List<String> hdfsFolders = Arrays.asList(hdfsFoldersWithCommas.split(","))
                                        .stream().collect(Collectors.toList());
                                createReadOnlyHdfsPolicy(categoryName, feedName, securityGroupNames,
                                        hdfsFolders);
                            }

                            String sentryHivePolicyName = getHivePolicyName(categoryName, feedName);
                            if (!sentryClientObject.checkIfRoleExists(sentryHivePolicyName)) {
                                if (!StringUtils
                                        .isEmpty((String) feedProperties.get(REGISTRATION_HIVE_TABLES))) {
                                    String hiveTablesWithCommas = ((String) feedProperties
                                            .get(REGISTRATION_HIVE_TABLES)).replace("\n", ",");
                                    List<String> hiveTables = Arrays.asList(hiveTablesWithCommas.split(","))
                                            .stream().collect(Collectors.toList()); //Stream.of(hiveTablesWithCommas).collect(Collectors.toList());
                                    String hiveSchema = ((String) feedProperties.get(REGISTRATION_HIVE_SCHEMA));
                                    createOrUpdateReadOnlyHivePolicy(categoryName, feedName, securityGroupNames,
                                            hiveSchema, hiveTables);
                                }

                            } else {

                                if (!StringUtils
                                        .isEmpty((String) feedProperties.get(REGISTRATION_HIVE_TABLES))) {
                                    try {
                                        sentryClientObject.dropRole(sentryHivePolicyName);
                                    } catch (SentryClientException e) {
                                        log.error("Unable to delete Hive policy  " + sentryHivePolicyName
                                                + " in Sentry   " + e.getMessage());
                                        throw new RuntimeException(e);
                                    }

                                    String hiveTablesWithCommas = ((String) feedProperties
                                            .get(REGISTRATION_HIVE_TABLES)).replace("\n", ",");
                                    List<String> hiveTables = Arrays.asList(hiveTablesWithCommas.split(","))
                                            .stream().collect(Collectors.toList());
                                    String hiveSchema = ((String) feedProperties.get(REGISTRATION_HIVE_SCHEMA));
                                    List<String> hivePermissions = new ArrayList();
                                    hivePermissions.add(HIVE_READ_ONLY_PERMISSION);
                                    createOrUpdateReadOnlyHivePolicy(categoryName, feedName, securityGroupNames,
                                            hiveSchema, hiveTables);
                                }
                            }
                        }
                        return null;
                    }
                });
            }
        } catch (Exception e) {
            log.error("Error Creating Sentry Policy using Kerberos Authentication" + e.getMessage());
            throw new RuntimeException(e);
        }
    } else {
        if (securityGroupNames == null || securityGroupNames.isEmpty()) {

            String sentryPolicyName = getHivePolicyName(categoryName, feedName);
            if ((sentryClientObject.checkIfRoleExists(sentryPolicyName))) {
                deleteHivePolicy(categoryName, feedName);
            }

            if (!StringUtils.isEmpty((String) feedProperties.get(REGISTRATION_HDFS_FOLDERS))) {
                String hdfsFoldersWithCommas = ((String) feedProperties.get(REGISTRATION_HDFS_FOLDERS))
                        .replace("\n", ",");
                List<String> hdfsFolders = Arrays.asList(hdfsFoldersWithCommas.split(",")).stream()
                        .collect(Collectors.toList());
                deleteHdfsPolicy(categoryName, feedName, hdfsFolders);
            }
        } else {

            if (!StringUtils.isEmpty((String) feedProperties.get(REGISTRATION_HDFS_FOLDERS))) {
                String hdfsFoldersWithCommas = ((String) feedProperties.get(REGISTRATION_HDFS_FOLDERS))
                        .replace("\n", ",");
                List<String> hdfsFolders = Arrays.asList(hdfsFoldersWithCommas.split(",")).stream()
                        .collect(Collectors.toList());
                createReadOnlyHdfsPolicy(categoryName, feedName, securityGroupNames, hdfsFolders);
            }

            String sentryHivePolicyName = getHivePolicyName(categoryName, feedName);
            if (!sentryClientObject.checkIfRoleExists(sentryHivePolicyName)) {

                if (!StringUtils.isEmpty((String) feedProperties.get(REGISTRATION_HIVE_TABLES))) {
                    String hiveTablesWithCommas = ((String) feedProperties.get(REGISTRATION_HIVE_TABLES))
                            .replace("\n", ",");
                    List<String> hiveTables = Arrays.asList(hiveTablesWithCommas.split(",")).stream()
                            .collect(Collectors.toList()); //Stream.of(hiveTablesWithCommas).collect(Collectors.toList());
                    String hiveSchema = ((String) feedProperties.get(REGISTRATION_HIVE_SCHEMA));

                    createOrUpdateReadOnlyHivePolicy(categoryName, feedName, securityGroupNames, hiveSchema,
                            hiveTables);
                }
            } else {

                if (!StringUtils.isEmpty((String) feedProperties.get(REGISTRATION_HIVE_TABLES))) {
                    try {
                        sentryClientObject.dropRole(sentryHivePolicyName);
                    } catch (SentryClientException e) {
                        log.error("Unable to delete Hive policy  " + sentryHivePolicyName + " in Sentry   "
                                + e.getMessage());
                        throw new RuntimeException(e);
                    }
                    String hiveTablesWithCommas = ((String) feedProperties.get(REGISTRATION_HIVE_TABLES))
                            .replace("\n", ",");
                    List<String> hiveTables = Arrays.asList(hiveTablesWithCommas.split(",")).stream()
                            .collect(Collectors.toList()); //Stream.of(hiveTablesWithCommas).collect(Collectors.toList());
                    String hiveSchema = ((String) feedProperties.get(REGISTRATION_HIVE_SCHEMA));
                    List<String> hivePermissions = new ArrayList();
                    hivePermissions.add(HIVE_READ_ONLY_PERMISSION);
                    createOrUpdateReadOnlyHivePolicy(categoryName, feedName, securityGroupNames, hiveSchema,
                            hiveTables);
                }
            }
        }

    }
}

From source file:org.apache.bsf.BSFManager.java

/**
 * Compile the given script of the given language into the given
 * <tt>CodeBuffer</tt>./*from   w w w .  ja  v a2s  . c  o  m*/
 *
 * @param lang     language identifier
 * @param source   (context info) the source of this script
 (e.g., filename)
 * @param lineNo   (context info) the line number in source for script
 * @param columnNo (context info) the column number in source for script
 * @param script   the script to compile
 * @param cb       code buffer to compile into
 *
 * @exception BSFException if any error while compiling the script
 */
public void compileScript(String lang, String source, int lineNo, int columnNo, Object script, CodeBuffer cb)
        throws BSFException {
    logger.debug("BSFManager:compileScript");

    final BSFEngine e = loadScriptingEngine(lang);
    final String sourcef = source;
    final int lineNof = lineNo, columnNof = columnNo;
    final Object scriptf = script;
    final CodeBuffer cbf = cb;

    try {
        AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() throws Exception {
                e.compileScript(sourcef, lineNof, columnNof, scriptf, cbf);
                return null;
            }
        });
    } catch (PrivilegedActionException prive) {

        logger.error("Exception :", prive);
        throw (BSFException) prive.getException();
    }
}

From source file:org.apache.hadoop.hdfs.TestQuota.java

/**
 * Test quota related commands://from  w  w w.j a va 2 s. c o  m
 * setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count
 */
@Test
public void testQuotaCommands() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // set a smaller block size so that we can test with smaller
    // Space quotas
    final int DEFAULT_BLOCK_SIZE = 512;
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    // Make it relinquish locks. When run serially, the result should
    // be identical.
    conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_QUOTA_UPDATE_INTERVAL_KEY, 1000);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem fs = cluster.getFileSystem();
    assertTrue("Not a HDFS: " + fs.getUri(), fs instanceof DistributedFileSystem);
    final DistributedFileSystem dfs = (DistributedFileSystem) fs;
    DFSAdmin admin = new DFSAdmin(conf);

    try {
        final int fileLen = 1024;
        final short replication = 5;
        final long spaceQuota = fileLen * replication * 15 / 8;

        // 1: create a directory /test and set its quota to be 3
        final Path parent = new Path("/test");
        assertTrue(dfs.mkdirs(parent));
        String[] args = new String[] { "-setQuota", "3", parent.toString() };
        runCommand(admin, args, false);

        //try setting space quota with a 'binary prefix'
        runCommand(admin, false, "-setSpaceQuota", "2t", parent.toString());
        assertEquals(2L << 40, DFSTestUtil.getContentSummary(dfs, parent).getSpaceQuota());

        // set diskspace quota to 10000 
        runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota), parent.toString());

        // 2: create directory /test/data0
        final Path childDir0 = new Path(parent, "data0");
        assertTrue(dfs.mkdirs(childDir0));

        // 3: create a file /test/datafile0
        final Path childFile0 = new Path(parent, "datafile0");
        DFSTestUtil.createFile(fs, childFile0, fileLen, replication, 0);
        DFSTestUtil.waitForQuotaUpdatesToBeApplied();
        // 4: count -q /test
        ContentSummary c = DFSTestUtil.getContentSummary(dfs, parent);
        DFSTestUtil.waitForQuotaUpdatesToBeApplied();
        assertEquals(c.getFileCount() + c.getDirectoryCount(), 3);
        assertEquals(c.getQuota(), 3);
        assertEquals(c.getSpaceConsumed(), fileLen * replication);
        assertEquals(c.getSpaceQuota(), spaceQuota);

        // 5: count -q /test/data0
        c = DFSTestUtil.getContentSummary(dfs, childDir0);
        assertEquals(c.getFileCount() + c.getDirectoryCount(), 1);
        assertEquals(c.getQuota(), -1);
        // check disk space consumed
        c = DFSTestUtil.getContentSummary(dfs, parent);
        assertEquals(c.getSpaceConsumed(), fileLen * replication);

        // 6: create a directory /test/data1
        final Path childDir1 = new Path(parent, "data1");
        boolean hasException = false;
        try {
            // HOP - Wait for quota updates to be applied
            DFSTestUtil.waitForQuotaUpdatesToBeApplied();
            assertFalse(dfs.mkdirs(childDir1));
        } catch (QuotaExceededException e) {
            hasException = true;
        }
        assertTrue(hasException);

        OutputStream fout;

        // 7: create a file /test/datafile1
        final Path childFile1 = new Path(parent, "datafile1");
        hasException = false;
        try {
            fout = dfs.create(childFile1);
        } catch (QuotaExceededException e) {
            hasException = true;
        }
        assertTrue(hasException);

        // 8: clear quota /test
        runCommand(admin, new String[] { "-clrQuota", parent.toString() }, false);
        c = DFSTestUtil.getContentSummary(dfs, parent);
        assertEquals(c.getQuota(), -1);
        assertEquals(c.getSpaceQuota(), spaceQuota);

        // 9: clear quota /test/data0
        runCommand(admin, new String[] { "-clrQuota", childDir0.toString() }, false);
        c = DFSTestUtil.getContentSummary(dfs, childDir0);
        assertEquals(c.getQuota(), -1);

        // 10: create a file /test/datafile1
        fout = dfs.create(childFile1, replication);

        // 10.s: but writing fileLen bytes should result in an quota exception
        hasException = false;
        try {
            // HOP - Write in single blocks and wait to trigger exception
            fout.write(new byte[fileLen / 2]);
            // ensure that the first block is written out (see FSOutputSummer#flush)
            fout.flush();
            DFSTestUtil.waitForQuotaUpdatesToBeApplied();
            fout.write(new byte[fileLen / 2]);
            // ensure that the first block is written out (see FSOutputSummer#flush)
            fout.flush();
            fout.close();
        } catch (QuotaExceededException e) {
            hasException = true;
            IOUtils.closeStream(fout);
        }
        assertTrue(hasException);

        //delete the file
        dfs.delete(childFile1, false);

        // 9.s: clear diskspace quota
        runCommand(admin, false, "-clrSpaceQuota", parent.toString());
        c = DFSTestUtil.getContentSummary(dfs, parent);
        assertEquals(c.getQuota(), -1);
        assertEquals(c.getSpaceQuota(), -1);

        // now creating childFile1 should succeed
        DFSTestUtil.createFile(dfs, childFile1, fileLen, replication, 0);

        // 11: set the quota of /test to be 1
        // HADOOP-5872 - we can set quota even if it is immediately violated 
        args = new String[] { "-setQuota", "1", parent.toString() };
        runCommand(admin, args, false);
        runCommand(admin, false, "-setSpaceQuota", // for space quota
                Integer.toString(fileLen), args[2]);
        if (true) {
            return;
        }
        // 12: set the quota of /test/data0 to be 1
        args = new String[] { "-setQuota", "1", childDir0.toString() };
        runCommand(admin, args, false);

        // 13: not able create a directory under data0
        hasException = false;
        try {
            assertFalse(dfs.mkdirs(new Path(childDir0, "in")));
        } catch (QuotaExceededException e) {
            hasException = true;
        }
        assertTrue(hasException);
        c = DFSTestUtil.getContentSummary(dfs, childDir0);
        assertEquals(c.getDirectoryCount() + c.getFileCount(), 1);
        assertEquals(c.getQuota(), 1);

        // 14a: set quota on a non-existent directory
        Path nonExistentPath = new Path("/test1");
        assertFalse(dfs.exists(nonExistentPath));
        args = new String[] { "-setQuota", "1", nonExistentPath.toString() };
        runCommand(admin, args, true);
        runCommand(admin, true, "-setSpaceQuota", "1g", // for space quota
                nonExistentPath.toString());

        // 14b: set quota on a file
        assertTrue(dfs.isFile(childFile0));
        args[1] = childFile0.toString();
        runCommand(admin, args, true);
        // same for space quota
        runCommand(admin, true, "-setSpaceQuota", "1t", args[1]);

        // 15a: clear quota on a file
        args[0] = "-clrQuota";
        runCommand(admin, args, true);
        runCommand(admin, true, "-clrSpaceQuota", args[1]);

        // 15b: clear quota on a non-existent directory
        args[1] = nonExistentPath.toString();
        runCommand(admin, args, true);
        runCommand(admin, true, "-clrSpaceQuota", args[1]);

        // 16a: set the quota of /test to be 0
        args = new String[] { "-setQuota", "0", parent.toString() };
        runCommand(admin, args, true);
        runCommand(admin, true, "-setSpaceQuota", "0", args[2]);

        // 16b: set the quota of /test to be -1
        args[1] = "-1";
        runCommand(admin, args, true);
        runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);

        // 16c: set the quota of /test to be Long.MAX_VALUE+1
        args[1] = String.valueOf(Long.MAX_VALUE + 1L);
        runCommand(admin, args, true);
        runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);

        // 16d: set the quota of /test to be a non integer
        args[1] = "33aa1.5";
        runCommand(admin, args, true);
        runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);

        // 16e: set space quota with a value larger than Long.MAX_VALUE
        runCommand(admin, true, "-setSpaceQuota", (Long.MAX_VALUE / 1024 / 1024 + 1024) + "m", args[2]);

        // 17:  setQuota by a non-administrator
        final String username = "userxx";
        UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username,
                new String[] { "groupyy" });

        final String[] args2 = args.clone(); // need final ref for doAs block
        ugi.doAs(new PrivilegedExceptionAction<Object>() {
            @Override
            public Object run() throws Exception {
                assertEquals("Not running as new user", username,
                        UserGroupInformation.getCurrentUser().getShortUserName());
                DFSAdmin userAdmin = new DFSAdmin(conf);

                args2[1] = "100";
                runCommand(userAdmin, args2, true);
                runCommand(userAdmin, true, "-setSpaceQuota", "1g", args2[2]);

                // 18: clrQuota by a non-administrator
                String[] args3 = new String[] { "-clrQuota", parent.toString() };
                runCommand(userAdmin, args3, true);
                runCommand(userAdmin, true, "-clrSpaceQuota", args3[1]);

                return null;
            }
        });

        // 19: clrQuota on the root directory ("/") should fail
        runCommand(admin, true, "-clrQuota", "/");

        // 20: setQuota on the root directory ("/") should succeed
        runCommand(admin, false, "-setQuota", "1000000", "/");

        runCommand(admin, true, "-clrQuota", "/");
        runCommand(admin, false, "-clrSpaceQuota", "/");
        runCommand(admin, new String[] { "-clrQuota", parent.toString() }, false);
        runCommand(admin, false, "-clrSpaceQuota", parent.toString());

        // 2: create directory /test/data2
        final Path childDir2 = new Path(parent, "data2");
        assertTrue(dfs.mkdirs(childDir2));

        final Path childFile2 = new Path(childDir2, "datafile2");
        final Path childFile3 = new Path(childDir2, "datafile3");
        final long spaceQuota2 = DEFAULT_BLOCK_SIZE * replication;
        final long fileLen2 = DEFAULT_BLOCK_SIZE;
        // set space quota to a real low value 
        runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());
        // clear space quota
        runCommand(admin, false, "-clrSpaceQuota", childDir2.toString());
        // create a file that is greater than the size of space quota
        DFSTestUtil.createFile(fs, childFile2, fileLen2, replication, 0);

        // now set space quota again. This should succeed
        runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());

        hasException = false;
        try {
            DFSTestUtil.createFile(fs, childFile3, fileLen2, replication, 0);
        } catch (DSQuotaExceededException e) {
            hasException = true;
        }
        assertTrue(hasException);

        // now test the same for root
        final Path childFile4 = new Path("/", "datafile2");
        final Path childFile5 = new Path("/", "datafile3");

        runCommand(admin, true, "-clrQuota", "/");
        runCommand(admin, false, "-clrSpaceQuota", "/");
        // set space quota to a real low value 
        runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");
        runCommand(admin, false, "-clrSpaceQuota", "/");
        DFSTestUtil.createFile(fs, childFile4, fileLen2, replication, 0);
        runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");

        hasException = false;
        try {
            DFSTestUtil.createFile(fs, childFile5, fileLen2, replication, 0);
        } catch (DSQuotaExceededException e) {
            hasException = true;
        }
        assertTrue(hasException);
        assertEquals(4, cluster.getNamesystem().getFSDirectory().getYieldCount());
    } finally {
        cluster.shutdown();
    }
}

From source file:org.apache.hadoop.hbase.security.access.TestCellACLWithMultipleVersions.java

@Test
public void testCellPermissionsWithDeleteMutipleVersions() throws Exception {
    // table/column/qualifier level permissions
    final byte[] TEST_ROW1 = Bytes.toBytes("r1");
    final byte[] TEST_ROW2 = Bytes.toBytes("r2");
    final byte[] TEST_Q1 = Bytes.toBytes("q1");
    final byte[] TEST_Q2 = Bytes.toBytes("q2");
    final byte[] ZERO = Bytes.toBytes(0L);

    // additional test user
    final User user1 = User.createUserForTesting(conf, "user1", new String[0]);
    final User user2 = User.createUserForTesting(conf, "user2", new String[0]);

    verifyAllowed(new AccessTestAction() {
        @Override//from   www .  j ava2  s .c  om
        public Object run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                // with rw ACL for "user1"
                Put p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q1, ZERO);
                p.add(TEST_FAMILY1, TEST_Q2, ZERO);
                p.setACL(user1.getShortName(), new Permission(Permission.Action.READ, Permission.Action.WRITE));
                t.put(p);
                // with rw ACL for "user1"
                p = new Put(TEST_ROW2);
                p.add(TEST_FAMILY1, TEST_Q1, ZERO);
                p.add(TEST_FAMILY1, TEST_Q2, ZERO);
                p.setACL(user1.getShortName(), new Permission(Permission.Action.READ, Permission.Action.WRITE));
                t.put(p);
            } finally {
                t.close();
            }
            return null;
        }
    }, USER_OWNER);

    verifyAllowed(new AccessTestAction() {
        @Override
        public Object run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                // with rw ACL for "user1" and "user2"
                Put p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q1, ZERO);
                p.add(TEST_FAMILY1, TEST_Q2, ZERO);
                Map<String, Permission> perms = new HashMap<String, Permission>();
                perms.put(user1.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                perms.put(user2.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                p.setACL(perms);
                t.put(p);
                // with rw ACL for "user1" and "user2"
                p = new Put(TEST_ROW2);
                p.add(TEST_FAMILY1, TEST_Q1, ZERO);
                p.add(TEST_FAMILY1, TEST_Q2, ZERO);
                p.setACL(perms);
                t.put(p);
            } finally {
                t.close();
            }
            return null;
        }
    }, user1);

    // user1 should be allowed to delete TEST_ROW1 as he is having write permission on both
    // versions of the cells
    user1.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Delete d = new Delete(TEST_ROW1);
                d.deleteColumns(TEST_FAMILY1, TEST_Q1);
                d.deleteColumns(TEST_FAMILY1, TEST_Q2);
                t.delete(d);
            } finally {
                t.close();
            }
            return null;
        }
    });
    // user2 should not be allowed to delete TEST_ROW2 as he is having write permission only on one
    // version of the cells.
    user2.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Delete d = new Delete(TEST_ROW2);
                d.deleteColumns(TEST_FAMILY1, TEST_Q1);
                d.deleteColumns(TEST_FAMILY1, TEST_Q2);
                t.delete(d);
                fail("user2 should not be allowed to delete the row");
            } catch (Exception e) {

            } finally {
                t.close();
            }
            return null;
        }
    });
    // user1 should be allowed to delete the cf. (All data under cf for a row)
    user1.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Delete d = new Delete(TEST_ROW2);
                d.deleteFamily(TEST_FAMILY1);
                t.delete(d);
            } finally {
                t.close();
            }
            return null;
        }
    });
}

From source file:org.apache.hadoop.hdfs.TestSmallFilesQuota.java

/**
 * Test quota related commands:/*w  w  w . j  av a2s.  c  o  m*/
 * setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count
 */
@Test
public void testQuotaCommands() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_QUOTA_UPDATE_INTERVAL_KEY, 1000);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem fs = cluster.getFileSystem();
    assertTrue("Not a HDFS: " + fs.getUri(), fs instanceof DistributedFileSystem);
    final DistributedFileSystem dfs = (DistributedFileSystem) fs;

    dfs.setStoragePolicy(new Path("/"), "DB");

    DFSAdmin admin = new DFSAdmin(conf);

    try {
        final int fileLen = 1024;
        final short replication = 5;
        final long spaceQuota = fileLen * replication * 15 / 8;

        // 1: create a directory /test and set its quota to be 3
        final Path parent = new Path("/test");
        assertTrue(dfs.mkdirs(parent));
        String[] args = new String[] { "-setQuota", "3", parent.toString() };
        runCommand(admin, args, false);

        //try setting space quota with a 'binary prefix'
        runCommand(admin, false, "-setSpaceQuota", "2t", parent.toString());
        assertEquals(2L << 40, DFSTestUtil.getContentSummary(dfs, parent).getSpaceQuota());

        // set diskspace quota to 10000 
        runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota), parent.toString());

        // 2: create directory /test/data0
        final Path childDir0 = new Path(parent, "data0");
        assertTrue(dfs.mkdirs(childDir0));

        // 3: create a file /test/datafile0
        final Path childFile0 = new Path(parent, "datafile0");
        DFSTestUtil.createFile(fs, childFile0, fileLen, replication, 0);
        DFSTestUtil.waitForQuotaUpdatesToBeApplied();
        // 4: count -q /test
        ContentSummary c = DFSTestUtil.getContentSummary(dfs, parent);
        DFSTestUtil.waitForQuotaUpdatesToBeApplied();
        assertEquals(c.getFileCount() + c.getDirectoryCount(), 3);
        assertEquals(c.getQuota(), 3);
        assertEquals(c.getSpaceConsumed(), fileLen * replication);
        assertEquals(c.getSpaceQuota(), spaceQuota);

        // 5: count -q /test/data0
        c = DFSTestUtil.getContentSummary(dfs, childDir0);
        assertEquals(c.getFileCount() + c.getDirectoryCount(), 1);
        assertEquals(c.getQuota(), -1);
        // check disk space consumed
        c = DFSTestUtil.getContentSummary(dfs, parent);
        assertEquals(c.getSpaceConsumed(), fileLen * replication);

        // 6: create a directory /test/data1
        final Path childDir1 = new Path(parent, "data1");
        boolean hasException = false;
        try {
            // HOP - Wait for quota updates to be applied
            DFSTestUtil.waitForQuotaUpdatesToBeApplied();
            assertFalse(dfs.mkdirs(childDir1));
        } catch (QuotaExceededException e) {
            hasException = true;
        }
        assertTrue(hasException);

        OutputStream fout;

        // 7: create a file /test/datafile1
        final Path childFile1 = new Path(parent, "datafile1");
        hasException = false;
        try {
            fout = dfs.create(childFile1);
        } catch (QuotaExceededException e) {
            hasException = true;
        }
        assertTrue(hasException);

        // 8: clear quota /test
        runCommand(admin, new String[] { "-clrQuota", parent.toString() }, false);
        c = DFSTestUtil.getContentSummary(dfs, parent);
        assertEquals(c.getQuota(), -1);
        assertEquals(c.getSpaceQuota(), spaceQuota);

        // 9: clear quota /test/data0
        runCommand(admin, new String[] { "-clrQuota", childDir0.toString() }, false);
        c = DFSTestUtil.getContentSummary(dfs, childDir0);
        assertEquals(c.getQuota(), -1);

        // 10: create a file /test/datafile1
        fout = dfs.create(childFile1, replication);

        // 10.s: but writing fileLen bytes should result in an quota exception
        hasException = false;
        try {
            // HOP - Write in single blocks and wait to trigger exception
            fout.write(new byte[fileLen / 2]);
            DFSTestUtil.waitForQuotaUpdatesToBeApplied();
            fout.write(new byte[fileLen / 2]);
            fout.close();
        } catch (QuotaExceededException e) {
            hasException = true;
            IOUtils.closeStream(fout);
        }
        assertTrue(hasException);

        //delete the file
        dfs.delete(childFile1, false);

        // 9.s: clear diskspace quota
        runCommand(admin, false, "-clrSpaceQuota", parent.toString());
        c = DFSTestUtil.getContentSummary(dfs, parent);
        assertEquals(c.getQuota(), -1);
        assertEquals(c.getSpaceQuota(), -1);

        // now creating childFile1 should succeed
        DFSTestUtil.createFile(dfs, childFile1, fileLen, replication, 0);

        // 11: set the quota of /test to be 1
        // HADOOP-5872 - we can set quota even if it is immediately violated 
        args = new String[] { "-setQuota", "1", parent.toString() };
        runCommand(admin, args, false);
        runCommand(admin, false, "-setSpaceQuota", // for space quota
                Integer.toString(fileLen), args[2]);
        if (true) {
            return;
        }
        // 12: set the quota of /test/data0 to be 1
        args = new String[] { "-setQuota", "1", childDir0.toString() };
        runCommand(admin, args, false);

        // 13: not able create a directory under data0
        hasException = false;
        try {
            assertFalse(dfs.mkdirs(new Path(childDir0, "in")));
        } catch (QuotaExceededException e) {
            hasException = true;
        }
        assertTrue(hasException);
        c = DFSTestUtil.getContentSummary(dfs, childDir0);
        assertEquals(c.getDirectoryCount() + c.getFileCount(), 1);
        assertEquals(c.getQuota(), 1);

        // 14a: set quota on a non-existent directory
        Path nonExistentPath = new Path("/test1");
        assertFalse(dfs.exists(nonExistentPath));
        args = new String[] { "-setQuota", "1", nonExistentPath.toString() };
        runCommand(admin, args, true);
        runCommand(admin, true, "-setSpaceQuota", "1g", // for space quota
                nonExistentPath.toString());

        // 14b: set quota on a file
        assertTrue(dfs.isFile(childFile0));
        args[1] = childFile0.toString();
        runCommand(admin, args, true);
        // same for space quota
        runCommand(admin, true, "-setSpaceQuota", "1t", args[1]);

        // 15a: clear quota on a file
        args[0] = "-clrQuota";
        runCommand(admin, args, true);
        runCommand(admin, true, "-clrSpaceQuota", args[1]);

        // 15b: clear quota on a non-existent directory
        args[1] = nonExistentPath.toString();
        runCommand(admin, args, true);
        runCommand(admin, true, "-clrSpaceQuota", args[1]);

        // 16a: set the quota of /test to be 0
        args = new String[] { "-setQuota", "0", parent.toString() };
        runCommand(admin, args, true);
        runCommand(admin, true, "-setSpaceQuota", "0", args[2]);

        // 16b: set the quota of /test to be -1
        args[1] = "-1";
        runCommand(admin, args, true);
        runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);

        // 16c: set the quota of /test to be Long.MAX_VALUE+1
        args[1] = String.valueOf(Long.MAX_VALUE + 1L);
        runCommand(admin, args, true);
        runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);

        // 16d: set the quota of /test to be a non integer
        args[1] = "33aa1.5";
        runCommand(admin, args, true);
        runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);

        // 16e: set space quota with a value larger than Long.MAX_VALUE
        runCommand(admin, true, "-setSpaceQuota", (Long.MAX_VALUE / 1024 / 1024 + 1024) + "m", args[2]);

        // 17:  setQuota by a non-administrator
        final String username = "userxx";
        UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username,
                new String[] { "groupyy" });

        final String[] args2 = args.clone(); // need final ref for doAs block
        ugi.doAs(new PrivilegedExceptionAction<Object>() {
            @Override
            public Object run() throws Exception {
                assertEquals("Not running as new user", username,
                        UserGroupInformation.getCurrentUser().getShortUserName());
                DFSAdmin userAdmin = new DFSAdmin(conf);

                args2[1] = "100";
                runCommand(userAdmin, args2, true);
                runCommand(userAdmin, true, "-setSpaceQuota", "1g", args2[2]);

                // 18: clrQuota by a non-administrator
                String[] args3 = new String[] { "-clrQuota", parent.toString() };
                runCommand(userAdmin, args3, true);
                runCommand(userAdmin, true, "-clrSpaceQuota", args3[1]);

                return null;
            }
        });

        // 19: clrQuota on the root directory ("/") should fail
        runCommand(admin, true, "-clrQuota", "/");

        // 20: setQuota on the root directory ("/") should succeed
        runCommand(admin, false, "-setQuota", "1000000", "/");

        runCommand(admin, true, "-clrQuota", "/");
        runCommand(admin, false, "-clrSpaceQuota", "/");
        runCommand(admin, new String[] { "-clrQuota", parent.toString() }, false);
        runCommand(admin, false, "-clrSpaceQuota", parent.toString());

        // 2: create directory /test/data2
        final Path childDir2 = new Path(parent, "data2");
        assertTrue(dfs.mkdirs(childDir2));

        final Path childFile2 = new Path(childDir2, "datafile2");
        final Path childFile3 = new Path(childDir2, "datafile3");
        final long fileLen2 = 512;
        final long spaceQuota2 = fileLen2 * replication;
        // set space quota to a real low value 
        runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());
        // clear space quota
        runCommand(admin, false, "-clrSpaceQuota", childDir2.toString());
        // create a file that is greater than the size of space quota
        DFSTestUtil.createFile(fs, childFile2, fileLen2, replication, 0);

        // now set space quota again. This should succeed
        runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());

        hasException = false;
        try {
            DFSTestUtil.createFile(fs, childFile3, fileLen2, replication, 0);
        } catch (DSQuotaExceededException e) {
            hasException = true;
        }
        assertTrue(hasException);

        // now test the same for root
        final Path childFile4 = new Path("/", "datafile2");
        final Path childFile5 = new Path("/", "datafile3");

        runCommand(admin, true, "-clrQuota", "/");
        runCommand(admin, false, "-clrSpaceQuota", "/");
        // set space quota to a real low value 
        runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");
        runCommand(admin, false, "-clrSpaceQuota", "/");
        DFSTestUtil.createFile(fs, childFile4, fileLen2, replication, 0);
        runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");

        hasException = false;
        try {
            DFSTestUtil.createFile(fs, childFile5, fileLen2, replication, 0);
        } catch (DSQuotaExceededException e) {
            hasException = true;
        }
        assertTrue(hasException);

    } finally {
        cluster.shutdown();
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.java

/** Handle HTTP PUT request. */
@PUT/*from  w w w . j  a  va  2  s .c o  m*/
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Consumes({ "*/*" })
@Produces({ MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON })
public Response put(@Context final UserGroupInformation ugi,
        @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) final DelegationParam delegation,
        @QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT) final UserParam username,
        @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) final DoAsParam doAsUser,
        @PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
        @QueryParam(PutOpParam.NAME) @DefaultValue(PutOpParam.DEFAULT) final PutOpParam op,
        @QueryParam(DestinationParam.NAME) @DefaultValue(DestinationParam.DEFAULT) final DestinationParam destination,
        @QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT) final OwnerParam owner,
        @QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT) final GroupParam group,
        @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT) final PermissionParam permission,
        @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) final OverwriteParam overwrite,
        @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT) final BufferSizeParam bufferSize,
        @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT) final ReplicationParam replication,
        @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) final BlockSizeParam blockSize,
        @QueryParam(ModificationTimeParam.NAME) @DefaultValue(ModificationTimeParam.DEFAULT) final ModificationTimeParam modificationTime,
        @QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT) final AccessTimeParam accessTime,
        @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT) final TokenArgumentParam delegationTokenArgument)
        throws IOException, InterruptedException {

    init(ugi, delegation, username, doAsUser, path, op, destination, owner, group, permission, overwrite,
            bufferSize, replication, blockSize, modificationTime, accessTime, delegationTokenArgument);

    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
        @Override
        public Response run() throws IOException, URISyntaxException {
            REMOTE_ADDRESS.set(request.getRemoteAddr());
            try {

                final String fullpath = path.getAbsolutePath();
                final Configuration conf = (Configuration) context.getAttribute(JspHelper.CURRENT_CONF);
                final NameNode namenode = (NameNode) context.getAttribute("name.node");

                switch (op.getValue()) {
                case CREATE: {
                    final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser, fullpath,
                            op.getValue(), -1L, permission, overwrite, bufferSize, replication, blockSize);
                    return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
                }
                case MKDIRS: {
                    final boolean b = namenode.mkdirs(fullpath, permission.getFsPermission());
                    final String js = JsonUtil.toJsonString("boolean", b);
                    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                }
                case RENAME: {
                    final boolean b = namenode.rename(fullpath, destination.getValue());
                    final String js = JsonUtil.toJsonString("boolean", b);
                    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                }
                case SETREPLICATION: {
                    final boolean b = namenode.setReplication(fullpath, replication.getValue(conf));
                    final String js = JsonUtil.toJsonString("boolean", b);
                    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                }
                case SETOWNER: {
                    if (owner.getValue() == null && group.getValue() == null) {
                        throw new IllegalArgumentException("Both owner and group are empty.");
                    }

                    namenode.setOwner(fullpath, owner.getValue(), group.getValue());
                    return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
                }
                case SETPERMISSION: {
                    namenode.setPermission(fullpath, permission.getFsPermission());
                    return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
                }
                case SETTIMES: {
                    namenode.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue());
                    return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
                }
                case RENEWDELEGATIONTOKEN: {
                    final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
                    token.decodeFromUrlString(delegationTokenArgument.getValue());
                    final long expiryTime = namenode.renewDelegationToken(token);
                    final String js = JsonUtil.toJsonString("long", expiryTime);
                    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                }
                case CANCELDELEGATIONTOKEN: {
                    final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
                    token.decodeFromUrlString(delegationTokenArgument.getValue());
                    namenode.cancelDelegationToken(token);
                    return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
                }
                default:
                    throw new UnsupportedOperationException(op + " is not supported");
                }

            } finally {
                REMOTE_ADDRESS.set(null);
            }
        }
    });
}

From source file:com.trendmicro.hdfs.webdav.HDFSResource.java

@SuppressWarnings({ "rawtypes", "unchecked" })
private void populateProperties() {
    if (properties != null) {
        return;//from w  w w  .j a  v  a  2 s .  c om
    }
    properties = new DavPropertySet();
    FileStatus stat = null;
    try {
        stat = user.doAs(new PrivilegedExceptionAction<FileStatus>() {
            public FileStatus run() throws Exception {
                return FileSystem.get(conf).getFileStatus(getPath());
            }
        });
    } catch (IOException ex) {
        LOG.warn(StringUtils.stringifyException(ex));
    } catch (InterruptedException e) {
        LOG.warn(StringUtils.stringifyException(e));
    }
    if (stat != null) {
        properties.add(new DefaultDavProperty(DavPropertyName.GETCONTENTLENGTH, stat.getLen()));
        SimpleDateFormat simpleFormat = (SimpleDateFormat) DavConstants.modificationDateFormat.clone();
        simpleFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
        Date date = new Date(stat.getModificationTime());
        properties.add(new DefaultDavProperty(DavPropertyName.GETLASTMODIFIED, simpleFormat.format(date)));
        properties.add(new DefaultDavProperty(SecurityConstants.OWNER, stat.getOwner()));
        properties.add(new DefaultDavProperty(SecurityConstants.GROUP, stat.getGroup()));
        // TODO: Populate DAV property SecurityConstants.CURRENT_USER_PRIVILEGE_SET
    }
    if (getDisplayName() != null) {
        properties.add(new DefaultDavProperty(DavPropertyName.DISPLAYNAME, getDisplayName()));
    }
    if (isCollection()) {
        properties.add(new ResourceType(ResourceType.COLLECTION));
        // Windows XP support
        properties.add(new DefaultDavProperty(DavPropertyName.ISCOLLECTION, "1"));
    } else {
        properties.add(new ResourceType(ResourceType.DEFAULT_RESOURCE));
        // Windows XP support
        properties.add(new DefaultDavProperty(DavPropertyName.ISCOLLECTION, "0"));
    }
}

From source file:org.apache.hadoop.crypto.key.kms.server.KMS.java

@GET
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" + KMSRESTConstants.METADATA_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)/*from w w w . j  av a2s . c  o  m*/
public Response getMetadata(@PathParam("name") final String name) throws Exception {
    try {
        LOG.trace("Entering getMetadata method.");
        UserGroupInformation user = HttpUserGroupInformation.get();
        KMSClientProvider.checkNotEmpty(name, "name");
        KMSWebApp.getAdminCallsMeter().mark();
        assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name);
        LOG.debug("Getting metadata for key with name {}.", name);

        KeyProvider.Metadata metadata = user.doAs(new PrivilegedExceptionAction<KeyProvider.Metadata>() {
            @Override
            public KeyProvider.Metadata run() throws Exception {
                return provider.getMetadata(name);
            }
        });

        Object json = KMSServerJSONUtils.toJSON(name, metadata);
        kmsAudit.ok(user, KMSOp.GET_METADATA, name, "");
        LOG.trace("Exiting getMetadata method.");
        return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
    } catch (Exception e) {
        LOG.debug("Exception in getMetadata.", e);
        throw e;
    }
}

From source file:org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure.java

/**
 * Coprocessor Action./*from  w  w w  . j  a  va  2s .  co m*/
 * @param env MasterProcedureEnv
 * @param state the procedure state
 * @throws IOException
 * @throws InterruptedException
 */
private void runCoprocessorAction(final MasterProcedureEnv env, final ModifyColumnFamilyState state)
        throws IOException, InterruptedException {
    final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
    if (cpHost != null) {
        user.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                switch (state) {
                case MODIFY_COLUMN_FAMILY_PRE_OPERATION:
                    cpHost.preModifyColumnHandler(tableName, cfDescriptor);
                    break;
                case MODIFY_COLUMN_FAMILY_POST_OPERATION:
                    cpHost.postModifyColumnHandler(tableName, cfDescriptor);
                    break;
                default:
                    throw new UnsupportedOperationException(this + " unhandled state=" + state);
                }
                return null;
            }
        });
    }
}

From source file:org.apache.hadoop.hbase.rest.TestSecureRESTServer.java

@Test
public void testPositiveAuthorization() throws Exception {
    // Create a table, write a row to it, grant read perms to the client
    UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL,
            serviceKeytab.getAbsolutePath());
    final TableName table = TableName.valueOf("publicTable");
    superuser.doAs(new PrivilegedExceptionAction<Void>() {
        @Override/*from   ww  w  .  j  av a 2  s. c om*/
        public Void run() throws Exception {
            try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
                TableDescriptor desc = TableDescriptorBuilder.newBuilder(table)
                        .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")).build();
                conn.getAdmin().createTable(desc);
                try (Table t = conn.getTable(table)) {
                    Put p = new Put(Bytes.toBytes("a"));
                    p.addColumn(Bytes.toBytes("f1"), new byte[0], Bytes.toBytes("1"));
                    t.put(p);
                }
                AccessControlClient.grant(conn, CLIENT_PRINCIPAL, Action.READ);
            } catch (Throwable e) {
                if (e instanceof Exception) {
                    throw (Exception) e;
                } else {
                    throw new Exception(e);
                }
            }
            return null;
        }
    });

    // Read that row as the client
    Pair<CloseableHttpClient, HttpClientContext> pair = getClient();
    CloseableHttpClient client = pair.getFirst();
    HttpClientContext context = pair.getSecond();

    HttpGet get = new HttpGet(
            new URL("http://localhost:" + REST_TEST.getServletPort()).toURI() + "/" + table + "/a");
    get.addHeader("Accept", "application/json");
    UserGroupInformation user = UserGroupInformation.loginUserFromKeytabAndReturnUGI(CLIENT_PRINCIPAL,
            clientKeytab.getAbsolutePath());
    String jsonResponse = user.doAs(new PrivilegedExceptionAction<String>() {
        @Override
        public String run() throws Exception {
            try (CloseableHttpResponse response = client.execute(get, context)) {
                final int statusCode = response.getStatusLine().getStatusCode();
                assertEquals(response.getStatusLine().toString(), HttpURLConnection.HTTP_OK, statusCode);
                HttpEntity entity = response.getEntity();
                return EntityUtils.toString(entity);
            }
        }
    });
    ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class,
            MediaType.APPLICATION_JSON_TYPE);
    CellSetModel model = mapper.readValue(jsonResponse, CellSetModel.class);
    assertEquals(1, model.getRows().size());
    RowModel row = model.getRows().get(0);
    assertEquals("a", Bytes.toString(row.getKey()));
    assertEquals(1, row.getCells().size());
    CellModel cell = row.getCells().get(0);
    assertEquals("1", Bytes.toString(cell.getValue()));
}