Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:io.hops.security.TestUsersGroups.java

@Test
public void testSuperUserCheck() throws Exception {
    Configuration conf = new HdfsConfiguration();

    String userName = UserGroupInformation.getCurrentUser().getShortUserName();
    conf.set(String.format("hadoop.proxyuser.%s.hosts", userName), "*");
    conf.set(String.format("hadoop.proxyuser.%s.users", userName), "*");
    conf.set(String.format("hadoop.proxyuser.%s.groups", userName), "*");

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();/*w ww . ja  va2 s  .  c  o  m*/

    String user = "testUser";

    DistributedFileSystem dfs = cluster.getFileSystem();
    dfs.addUser(user);

    UserGroupInformation ugi = UserGroupInformation.createProxyUserForTesting(user,
            UserGroupInformation.getLoginUser(), new String[] {});

    DistributedFileSystem dfsTestUser = (DistributedFileSystem) ugi
            .doAs(new PrivilegedExceptionAction<FileSystem>() {
                @Override
                public FileSystem run() throws Exception {
                    return cluster.getFileSystem();
                }
            });

    try {
        dfsTestUser.addUser("user");
        fail();
    } catch (AccessControlException ex) {
        //only super use should be able to call
    }

    try {
        dfsTestUser.addGroup("group");
        fail();
    } catch (AccessControlException ex) {
        //only super use should be able to call
    }

    try {
        dfsTestUser.addUserToGroup("user", "group");
        fail();
    } catch (AccessControlException ex) {
        //only super use should be able to call
    }

    try {
        dfsTestUser.removeUser("user");
        fail();
    } catch (AccessControlException ex) {
        //only super use should be able to call
    }

    try {
        dfsTestUser.removeGroup("group");
        fail();
    } catch (AccessControlException ex) {
        //only super use should be able to call
    }

    try {
        dfsTestUser.removeUserFromGroup("user", "group");
        fail();
    } catch (AccessControlException ex) {
        //only super use should be able to call
    }
}

From source file:org.apache.axis2.jaxws.runtime.description.marshal.impl.PackageSetBuilder.java

private static Definition getWSDLDefinition(String wsdlLoc) {
    Definition wsdlDefinition = null;//from   w ww .  j av  a 2s.co  m
    final String wsdlLocation = wsdlLoc;
    if (wsdlLocation != null && wsdlLocation.trim().length() > 0) {
        try {
            wsdlDefinition = (Definition) AccessController.doPrivileged(new PrivilegedExceptionAction() {
                public Object run() throws MalformedURLException, IOException, WSDLException {
                    String baseDir = new File(System.getProperty("basedir", ".")).getCanonicalPath();
                    String wsdlLocationPath = new File(baseDir + File.separator + wsdlLocation)
                            .getAbsolutePath();
                    File file = new File(wsdlLocationPath);
                    URL url = file.toURL();
                    if (log.isDebugEnabled()) {
                        log.debug("Reading WSDL from URL:" + url.toString());
                    }
                    // This is a temporary wsdl and we use it to dig into the schemas,
                    // Thus the memory limit is set to false.  It will be discarded after it is used
                    // by the PackageSetBuilder implementation.
                    WSDLWrapper wsdlWrapper = new WSDL4JWrapper(url, false, 0);
                    return wsdlWrapper.getDefinition();
                }
            });
        } catch (PrivilegedActionException e) {
            // Swallow and continue
            if (log.isDebugEnabled()) {
                log.debug("Exception getting wsdlLocation: " + e.getException());
            }
        }
    }

    return wsdlDefinition;
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestSubtreeLock.java

@Test
public void testRecursiveDeleteAsaProxyUser() throws Exception {
    Configuration conf = new HdfsConfiguration();

    final int NumberOfFileSystems = 100;
    final String UserPrefix = "testUser";

    String userName = UserGroupInformation.getCurrentUser().getShortUserName();
    conf.set(String.format("hadoop.proxyuser.%s.hosts", userName), "*");
    conf.set(String.format("hadoop.proxyuser.%s.users", userName), "*");
    conf.set(String.format("hadoop.proxyuser.%s.groups", userName), "*");

    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).format(true).build();
    cluster.waitActive();/*from   ww  w. j  a v  a 2  s  .c om*/

    FileSystem superFS = cluster.getFileSystem();

    List<UserGroupInformation> ugis = new ArrayList<>();
    List<FileSystem> fss = new ArrayList<>();

    for (int u = 0; u < NumberOfFileSystems; u++) {
        UserGroupInformation ugi = UserGroupInformation.createProxyUserForTesting(UserPrefix + u,
                UserGroupInformation.getLoginUser(), new String[] { UserPrefix + u });

        FileSystem fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {

            @Override
            public FileSystem run() throws Exception {
                return cluster.getFileSystem();
            }
        });

        ugis.add(ugi);
        fss.add(fs);
    }

    try {

        superFS.mkdirs(new Path("/root"));
        superFS.setPermission(new Path("/root"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));

        for (int u = 0; u < fss.size(); u++) {
            FileSystem fs = fss.get(u);
            Path root = new Path(String.format("/root/a%d", u));
            fs.mkdirs(root);
            fs.setOwner(root, UserPrefix + u, UserPrefix + u);

            fs.mkdirs(new Path(root, "b" + u));
            fs.mkdirs(new Path(root, "c" + u));

            fs.create(new Path(root, "b" + u + "/f")).close();
            fs.create(new Path(root, "c" + u + "/f")).close();

        }

        for (int u = 0; u < fss.size(); u++) {
            FileSystem fs = fss.get(u);
            Assert.assertTrue(fs.delete(new Path(String.format("/root/a%d", u)), true));
            FileSystem.closeAllForUGI(ugis.get(u));
        }

    } finally {
        cluster.shutdown();
    }
}

From source file:org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDeletes.java

@Test
public void testDeletesWithoutAndWithVisibilityLabels() throws Exception {
    final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    Admin hBaseAdmin = TEST_UTIL.getAdmin();
    HColumnDescriptor colDesc = new HColumnDescriptor(fam);
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(colDesc);/*from   w  w  w .j  a  v a 2  s  .c  om*/
    hBaseAdmin.createTable(desc);
    try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
        Put put = new Put(row1);
        put.addColumn(fam, qual, value);
        put.setCellVisibility(new CellVisibility(CONFIDENTIAL));
        table.put(put);
        Delete d = new Delete(row1);
        // without visibility
        d.addColumn(fam, qual);
        table.delete(d);
        PrivilegedExceptionAction<Void> scanAction = new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                try (Connection connection = ConnectionFactory.createConnection(conf);
                        Table table = connection.getTable(tableName)) {
                    Scan s = new Scan();
                    ResultScanner scanner = table.getScanner(s);
                    // The delete would not be able to apply it because of visibility mismatch
                    Result[] next = scanner.next(3);
                    assertEquals(next.length, 1);
                } catch (Throwable t) {
                    throw new IOException(t);
                }
                return null;
            }
        };
        SUPERUSER.runAs(scanAction);
        d = new Delete(row1);
        // with visibility
        d.setCellVisibility(new CellVisibility(CONFIDENTIAL));
        d.addColumn(fam, qual);
        table.delete(d);
        scanAction = new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                try (Connection connection = ConnectionFactory.createConnection(conf);
                        Table table = connection.getTable(tableName)) {
                    Scan s = new Scan();
                    ResultScanner scanner = table.getScanner(s);
                    Result[] next = scanner.next(3);
                    // this will alone match
                    assertEquals(next.length, 0);
                } catch (Throwable t) {
                    throw new IOException(t);
                }
                return null;
            }
        };
        SUPERUSER.runAs(scanAction);
    }
}

From source file:com.cloudera.beeswax.BeeswaxServiceImpl.java

/**
 * Run a query synchronously and return a handle (QueryHandle).
 *///from w w  w.  ja  v a2s  . c o  m
@Override
public QueryHandle executeAndWait(final Query query, String clientUuid) throws BeeswaxException {
    // First, create an id and reset the LogContext
    String queryUuid = UUID.randomUUID().toString();
    String contextUuid;

    LOG.info("got context " + clientUuid);
    if (clientUuid.isEmpty()) {
        contextUuid = queryUuid;
    } else {
        contextUuid = clientUuid;
    }
    LOG.info("running query " + query.query + " context " + contextUuid);
    final QueryHandle handle = new QueryHandle(queryUuid, contextUuid);
    final LogContext lc = LogContext.registerCurrentThread(handle.log_context);
    lc.resetLog();

    // Make an administrative record
    final RunningQueryState state = new RunningQueryState(query, lc);
    try {
        return doWithState(state, new PrivilegedExceptionAction<QueryHandle>() {
            public QueryHandle run() throws Exception {
                state.setQueryHandle(handle);
                runningQueries.put(handle.id, state);
                state.initialize();
                try {
                    state.run();
                } catch (BeeswaxException perr) {
                    state.saveException(perr);
                    throw perr;
                } catch (Throwable t) {
                    state.saveException(t);
                    throw new BeeswaxException(t.toString(), handle.log_context, handle);
                }
                return handle;
            }
        });
    } catch (BeeswaxException e) {
        throw e;
    }
}

From source file:org.apache.axis2.jaxws.runtime.description.marshal.impl.PackageSetBuilder.java

/**
 * Return the class for this name/*  w w w  . java2 s. c  om*/
 *
 * @return Class
 */
static Class forName(final String className, final boolean initialize, final ClassLoader classloader)
        throws ClassNotFoundException {
    // NOTE: This method must remain protected because it uses AccessController
    Class cl = null;
    try {
        cl = (Class) AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() throws ClassNotFoundException {
                // Class.forName does not support primitives
                Class cls = ClassUtils.getPrimitiveClass(className);
                if (cls == null) {
                    cls = Class.forName(className, initialize, classloader);
                }
                return cls;
            }
        });
    } catch (PrivilegedActionException e) {
        if (log.isDebugEnabled()) {
            log.debug("Exception thrown from AccessController: " + e);
        }
        throw (ClassNotFoundException) e.getException();
    }

    return cl;
}

From source file:org.apache.axis2.jaxws.util.WSDL4JWrapper.java

/**
 * This method provides a Java2 Security compliant way to obtain the InputStream
 * for a given URLConnection object. This is needed as a given URLConnection object
 * may be an instance of a FileURLConnection object which would require access 
 * permissions if Java2 Security was enabled.
 *//*from   w ww.  j  av  a2s.c o  m*/
private InputStream getInputStream(URLConnection urlCon) throws Exception {
    final URLConnection finalURLCon = urlCon;
    InputStream is = null;
    try {
        is = (InputStream) AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() throws IOException {
                return finalURLCon.getInputStream();
            }
        });
    } catch (PrivilegedActionException e) {
        throw e.getException();
    }
    return is;
}

From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.java

/**
 * Create an HRegion with the result of a WAL split and test we only see the
 * good edits//from  w w  w . ja v  a2 s  . c om
 * @throws Exception
 */
@Test
public void testReplayEditsWrittenIntoWAL() throws Exception {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
    final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
    deleteDir(basedir);

    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region2);
    final WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    final byte[] rowName = tableName.getName();
    final byte[] regionName = hri.getEncodedNameAsBytes();

    // Add 1k to each family.
    final int countPerFamily = 1000;
    Set<byte[]> familyNames = new HashSet<byte[]>();
    NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, htd, mvcc, scopes);
        familyNames.add(hcd.getName());
    }

    // Add a cache flush, shouldn't have any effect
    wal.startCacheFlush(regionName, familyNames);
    wal.completeCacheFlush(regionName);

    // Add an edit to another family, should be skipped.
    WALEdit edit = new WALEdit();
    long now = ee.currentTime();
    edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName));
    wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit, true);

    // Delete the c family to verify deletes make it over.
    edit = new WALEdit();
    now = ee.currentTime();
    edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
    wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit, true);

    // Sync.
    wal.sync();
    // Make a new conf and a new fs for the splitter to run on so we can take
    // over old wal.
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime");
    user.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // 100k seems to make for about 4 flushes during HRegion#initialize.
            newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
            // Make a new wal for new region.
            WAL newWal = createWAL(newConf, hbaseRootDir, logName);
            final AtomicInteger flushcount = new AtomicInteger(0);
            try {
                final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {
                    @Override
                    protected FlushResult internalFlushcache(final WAL wal, final long myseqid,
                            final Collection<Store> storesToFlush, MonitoredTask status,
                            boolean writeFlushWalMarker) throws IOException {
                        LOG.info("InternalFlushCache Invoked");
                        FlushResult fs = super.internalFlushcache(wal, myseqid, storesToFlush,
                                Mockito.mock(MonitoredTask.class), writeFlushWalMarker);
                        flushcount.incrementAndGet();
                        return fs;
                    }
                };
                // The seq id this region has opened up with
                long seqid = region.initialize();

                // The mvcc readpoint of from inserting data.
                long writePoint = mvcc.getWritePoint();

                // We flushed during init.
                assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
                assertTrue((seqid - 1) == writePoint);

                Get get = new Get(rowName);
                Result result = region.get(get);
                // Make sure we only see the good edits
                assertEquals(countPerFamily * (htd.getFamilies().size() - 1), result.size());
                region.close();
            } finally {
                newWal.close();
            }
            return null;
        }
    });
}

From source file:org.apache.hadoop.hbase.security.access.TestCellACLWithMultipleVersions.java

@Test
public void testCellPermissionsForCheckAndDelete() throws Exception {
    final byte[] TEST_ROW1 = Bytes.toBytes("r1");
    final byte[] ZERO = Bytes.toBytes(0L);

    final User user1 = User.createUserForTesting(conf, "user1", new String[0]);
    final User user2 = User.createUserForTesting(conf, "user2", new String[0]);

    verifyAllowed(new AccessTestAction() {
        @Override//w w w .  ja  va  2s  .  c  o m
        public Object run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Map<String, Permission> permsU1andOwner = new HashMap<String, Permission>();
                permsU1andOwner.put(user1.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                permsU1andOwner.put(USER_OWNER.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                Map<String, Permission> permsU1andU2andOwner = new HashMap<String, Permission>();
                permsU1andU2andOwner.put(user1.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                permsU1andU2andOwner.put(user2.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                permsU1andU2andOwner.put(USER_OWNER.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                Map<String, Permission> permsU1andU2 = new HashMap<String, Permission>();
                permsU1andU2.put(user1.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));
                permsU1andU2.put(user2.getShortName(),
                        new Permission(Permission.Action.READ, Permission.Action.WRITE));

                Put p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q1, 120, ZERO);
                p.add(TEST_FAMILY1, TEST_Q2, 120, ZERO);
                p.setACL(permsU1andU2andOwner);
                t.put(p);

                p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q1, 123, ZERO);
                p.add(TEST_FAMILY1, TEST_Q2, 123, ZERO);
                p.setACL(permsU1andOwner);
                t.put(p);

                p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q1, 127, ZERO);
                p.setACL(permsU1andU2);
                t.put(p);

                p = new Put(TEST_ROW1);
                p.add(TEST_FAMILY1, TEST_Q2, 127, ZERO);
                p.setACL(user2.getShortName(), new Permission(Permission.Action.READ));
                t.put(p);
            } finally {
                t.close();
            }
            return null;
        }
    }, USER_OWNER);

    // user1 should be allowed to do the checkAndDelete. user1 having read permission on the latest
    // version cell and write permission on all versions
    user1.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Delete d = new Delete(TEST_ROW1);
                d.deleteColumns(TEST_FAMILY1, TEST_Q1);
                t.checkAndDelete(TEST_ROW1, TEST_FAMILY1, TEST_Q1, ZERO, d);
            } finally {
                t.close();
            }
            return null;
        }
    });
    // user2 shouldn't be allowed to do the checkAndDelete. user2 having RW permission on the latest
    // version cell but not on cell version TS=123
    user2.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Delete d = new Delete(TEST_ROW1);
                d.deleteColumns(TEST_FAMILY1, TEST_Q1);
                t.checkAndDelete(TEST_ROW1, TEST_FAMILY1, TEST_Q1, ZERO, d);
                fail("user2 should not be allowed to do checkAndDelete");
            } catch (Exception e) {
            } finally {
                t.close();
            }
            return null;
        }
    });
    // user2 should be allowed to do the checkAndDelete when delete tries to delete the old version
    // TS=120. user2 having R permission on the latest version(no W permission) cell
    // and W permission on cell version TS=120.
    user2.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            HTable t = new HTable(conf, TEST_TABLE.getTableName());
            try {
                Delete d = new Delete(TEST_ROW1);
                d.deleteColumn(TEST_FAMILY1, TEST_Q2, 120);
                t.checkAndDelete(TEST_ROW1, TEST_FAMILY1, TEST_Q2, ZERO, d);
            } finally {
                t.close();
            }
            return null;
        }
    });
}

From source file:azkaban.execapp.FlowRunner.java

private boolean evaluateExpression(final String expression) {
    boolean result = false;
    final ScriptEngineManager sem = new ScriptEngineManager();
    final ScriptEngine se = sem.getEngineByName("JavaScript");

    // Restrict permission using the two-argument form of doPrivileged()
    try {/*from  www  .  j  a  va 2 s .c o m*/
        final Object object = AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
            @Override
            public Object run() throws ScriptException {
                return se.eval(expression);
            }
        }, new AccessControlContext(new ProtectionDomain[] { new ProtectionDomain(null, null) }) // no permissions
        );
        if (object != null) {
            result = (boolean) object;
        }
    } catch (final Exception e) {
        this.logger.error("Failed to evaluate the expression.", e);
    }

    this.logger.info("Evaluate expression result: " + result);
    return result;
}