Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:org.apache.hadoop.hdfs.server.namenode.TestSubtreeLockACL.java

@Test
public void testSubtreeDeleteBlockedByAccessAcl() throws IOException, InterruptedException {
    try {/*  ww  w .  j a  va2 s.  co m*/
        setup();

        //Deny access via default acl down subtree1
        setDenyUserAccessAcl(user2.getShortUserName(), level1folder1);

        //Try to delete subtree1. Should fail because of access acl down the tree.
        FileSystem user2fs = user2.doAs(new PrivilegedExceptionAction<FileSystem>() {
            @Override
            public FileSystem run() throws Exception {
                return FileSystem.get(conf);
            }
        });

        try {
            user2fs.delete(subtree1, true);
            fail("Acl should block delete");
        } catch (AccessControlException expected) {
            assertTrue("Wrong inode triggered access control exception.",
                    expected.getMessage().contains("projectedInode=\"level1folder1\""));
            //Operation should fail.
        }

    } finally {
        teardown();
    }
}

From source file:org.apache.hadoop.crypto.key.kms.server.KMS.java

@DELETE
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
public Response deleteKey(@PathParam("name") final String name) throws Exception {
    try {/*from  www.j  a  v a2 s .  c o  m*/
        LOG.trace("Entering deleteKey method.");
        KMSWebApp.getAdminCallsMeter().mark();
        UserGroupInformation user = HttpUserGroupInformation.get();
        assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
        KMSClientProvider.checkNotEmpty(name, "name");
        LOG.debug("Deleting key with name {}.", name);
        user.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                provider.deleteKey(name);
                provider.flush();
                return null;
            }
        });

        kmsAudit.ok(user, KMSOp.DELETE_KEY, name, "");
        LOG.trace("Exiting deleteKey method.");
        return Response.ok().build();
    } catch (Exception e) {
        LOG.debug("Exception in deleteKey.", e);
        throw e;
    }
}

From source file:org.apache.falcon.hadoop.HadoopClientFactory.java

/**
 * Return a FileSystem created with the provided user for the specified URI.
 *
 * @param ugi user group information/*  w w  w.  j a va  2  s.co  m*/
 * @param uri  file system URI.
 * @param conf Configuration with all necessary information to create the FileSystem.
 * @return FileSystem created with the provided user/group.
 * @throws org.apache.falcon.FalconException
 *          if the filesystem could not be created.
 */
@SuppressWarnings("ResultOfMethodCallIgnored")
public FileSystem createFileSystem(UserGroupInformation ugi, final URI uri, final Configuration conf)
        throws FalconException {
    Validate.notNull(ugi, "ugi cannot be null");
    Validate.notNull(conf, "configuration cannot be null");

    try {
        if (UserGroupInformation.isSecurityEnabled()) {
            ugi.checkTGTAndReloginFromKeytab();
        }
    } catch (IOException ioe) {
        throw new FalconException(
                "Exception while getting FileSystem. Unable to check TGT for user " + ugi.getShortUserName(),
                ioe);
    }

    validateNameNode(uri, conf);

    try {
        // prevent falcon impersonating falcon, no need to use doas
        final String proxyUserName = ugi.getShortUserName();
        if (proxyUserName.equals(UserGroupInformation.getLoginUser().getShortUserName())) {
            LOG.info("Creating FS for the login user {}, impersonation not required", proxyUserName);
            return FileSystem.get(uri, conf);
        }

        LOG.info("Creating FS impersonating user {}", proxyUserName);
        return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
            public FileSystem run() throws Exception {
                return FileSystem.get(uri, conf);
            }
        });
    } catch (InterruptedException ex) {
        throw new FalconException("Exception creating FileSystem:" + ex.getMessage(), ex);
    } catch (IOException ex) {
        throw new FalconException("Exception creating FileSystem:" + ex.getMessage(), ex);
    }
}

From source file:com.lucidworks.security.authentication.server.KerberosAuthenticationHandler.java

/**
 * Initializes the authentication handler instance.
 * <p/>/*from   w ww .  ja v  a 2 s .  c  om*/
 * It creates a Kerberos context using the principal and keytab specified in the configuration.
 * <p/>
 * This method is invoked by the {@link AuthenticationFilter#init} method.
 *
 * @param config configuration properties to initialize the handler.
 *
 * @throws ServletException thrown if the handler could not be initialized.
 */
@Override
public void init(Properties config) throws ServletException {
    try {
        principal = config.getProperty(PRINCIPAL, principal);
        if (principal == null || principal.trim().length() == 0) {
            throw new ServletException("Principal not defined in configuration");
        }
        keytab = config.getProperty(KEYTAB, keytab);
        if (keytab == null || keytab.trim().length() == 0) {
            throw new ServletException("Keytab not defined in configuration");
        }
        if (!new File(keytab).exists()) {
            throw new ServletException("Keytab does not exist: " + keytab);
        }

        String nameRules = config.getProperty(NAME_RULES, null);
        if (nameRules != null) {
            KerberosName.setRules(nameRules);
        }

        Set<Principal> principals = new HashSet<Principal>();
        principals.add(new KerberosPrincipal(principal));
        Subject subject = new Subject(false, principals, new HashSet<Object>(), new HashSet<Object>());

        KerberosConfiguration kerberosConfiguration = new KerberosConfiguration(keytab, principal);

        LOG.info("Login using keytab " + keytab + ", for principal " + principal);
        loginContext = new LoginContext("", subject, null, kerberosConfiguration);
        loginContext.login();

        Subject serverSubject = loginContext.getSubject();
        try {
            gssManager = Subject.doAs(serverSubject, new PrivilegedExceptionAction<GSSManager>() {

                @Override
                public GSSManager run() throws Exception {
                    return GSSManager.getInstance();
                }
            });
        } catch (PrivilegedActionException ex) {
            throw ex.getException();
        }
        LOG.info("Initialized, principal [{}] from keytab [{}]", principal, keytab);
    } catch (Exception ex) {
        throw new ServletException(ex);
    }
}

From source file:org.apache.hadoop.hdfs.security.TestDelegationToken.java

@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
    ((Log4JLogger) NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
    final DelegationTokenSecretManager dtSecretManager = cluster.getNameNode().getNamesystem()
            .getDelegationTokenSecretManager();
    final String uri = WebHdfsFileSystem.SCHEME + "://" + config.get("dfs.http.address");
    //get file system as JobTracker
    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting("JobTracker",
            new String[] { "user" });
    final WebHdfsFileSystem webhdfs = ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
        @Override//  w  w w.j av  a 2s.  c o m
        public WebHdfsFileSystem run() throws Exception {
            return (WebHdfsFileSystem) FileSystem.get(new URI(uri), config);
        }
    });

    final Token<DelegationTokenIdentifier> token = webhdfs.getDelegationToken("JobTracker");
    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
    byte[] tokenId = token.getIdentifier();
    identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
    LOG.info("A valid token should have non-null password, and should be renewed successfully");
    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
    dtSecretManager.renewToken(token, "JobTracker");
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
        @Override
        public Object run() throws Exception {
            token.renew(config);
            token.cancel(config);
            return null;
        }
    });
}

From source file:org.apache.hadoop.hbase.http.TestSpnegoHttpServer.java

@Test
public void testAllowedClient() throws Exception {
    // Create the subject for the client
    final Subject clientSubject = JaasKrbUtil.loginUsingKeytab(CLIENT_PRINCIPAL, clientKeytab);
    final Set<Principal> clientPrincipals = clientSubject.getPrincipals();
    // Make sure the subject has a principal
    assertFalse(clientPrincipals.isEmpty());

    // Get a TGT for the subject (might have many, different encryption types). The first should
    // be the default encryption type.
    Set<KerberosTicket> privateCredentials = clientSubject.getPrivateCredentials(KerberosTicket.class);
    assertFalse(privateCredentials.isEmpty());
    KerberosTicket tgt = privateCredentials.iterator().next();
    assertNotNull(tgt);/*from   w w  w.j av a  2s .  c om*/

    // The name of the principal
    final String principalName = clientPrincipals.iterator().next().getName();

    // Run this code, logged in as the subject (the client)
    HttpResponse resp = Subject.doAs(clientSubject, new PrivilegedExceptionAction<HttpResponse>() {
        @Override
        public HttpResponse run() throws Exception {
            // Logs in with Kerberos via GSS
            GSSManager gssManager = GSSManager.getInstance();
            // jGSS Kerberos login constant
            Oid oid = new Oid("1.2.840.113554.1.2.2");
            GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME);
            GSSCredential credential = gssManager.createCredential(gssClient, GSSCredential.DEFAULT_LIFETIME,
                    oid, GSSCredential.INITIATE_ONLY);

            HttpClientContext context = HttpClientContext.create();
            Lookup<AuthSchemeProvider> authRegistry = RegistryBuilder.<AuthSchemeProvider>create()
                    .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build();

            HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry).build();
            BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider();
            credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential));

            URL url = new URL(getServerURL(server), "/echo?a=b");
            context.setTargetHost(new HttpHost(url.getHost(), url.getPort()));
            context.setCredentialsProvider(credentialsProvider);
            context.setAuthSchemeRegistry(authRegistry);

            HttpGet get = new HttpGet(url.toURI());
            return client.execute(get, context);
        }
    });

    assertNotNull(resp);
    assertEquals(HttpURLConnection.HTTP_OK, resp.getStatusLine().getStatusCode());
    assertEquals("a:b", EntityUtils.toString(resp.getEntity()).trim());
}

From source file:org.apache.axis2.jaxws.message.databinding.impl.ClassFinderImpl.java

public void updateClassPath(final String filePath, final ClassLoader cl) throws Exception {
    if (filePath == null) {
        return;/*from  w ww .j  av a  2s  .c om*/
    }
    if (filePath.length() == 0) {
        return;
    }
    if (cl instanceof URLClassLoader) {
        //lets add the path to the classloader.
        try {
            AccessController.doPrivileged(new PrivilegedExceptionAction() {
                public Object run() throws Exception {
                    URLClassLoader ucl = (URLClassLoader) cl;
                    //convert file path to URL.
                    File file = new File(filePath);
                    URL url = file.toURI().toURL();
                    Class uclClass = URLClassLoader.class;
                    Method method = uclClass.getDeclaredMethod("addURL", new Class[] { URL.class });
                    method.setAccessible(true);
                    method.invoke(ucl, new Object[] { url });
                    return ucl;
                }
            });
        } catch (PrivilegedActionException e) {
            if (log.isDebugEnabled()) {
                log.debug("Exception thrown from AccessController: " + e);
            }
            throw ExceptionFactory.makeWebServiceException(e.getException());
        }

    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestStore.java

/**
 * Test we do not lose data if we fail a flush and then close.
 * Part of HBase-10466//from   w ww  .  ja v  a  2 s  . c o m
 * @throws Exception
 */
@Test
public void testFlushSizeAccounting() throws Exception {
    LOG.info("Setting up a faulty file system that cannot write in " + this.name.getMethodName());
    final Configuration conf = HBaseConfiguration.create();
    // Only retry once.
    conf.setInt("hbase.hstore.flush.retries.number", 1);
    User user = User.createUserForTesting(conf, this.name.getMethodName(), new String[] { "foo" });
    // Inject our faulty LocalFileSystem
    conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class);
    user.runAs(new PrivilegedExceptionAction<Object>() {
        public Object run() throws Exception {
            // Make sure it worked (above is sensitive to caching details in hadoop core)
            FileSystem fs = FileSystem.get(conf);
            Assert.assertEquals(FaultyFileSystem.class, fs.getClass());
            FaultyFileSystem ffs = (FaultyFileSystem) fs;

            // Initialize region
            init(name.getMethodName(), conf);

            long size = store.memstore.getFlushableSize();
            Assert.assertEquals(0, size);
            LOG.info("Adding some data");
            long kvSize = store.add(new KeyValue(row, family, qf1, 1, (byte[]) null));
            size = store.memstore.getFlushableSize();
            Assert.assertEquals(kvSize, size);
            // Flush.  Bug #1 from HBASE-10466.  Make sure size calculation on failed flush is right.
            try {
                LOG.info("Flushing");
                flushStore(store, id++);
                Assert.fail("Didn't bubble up IOE!");
            } catch (IOException ioe) {
                Assert.assertTrue(ioe.getMessage().contains("Fault injected"));
            }
            size = store.memstore.getFlushableSize();
            Assert.assertEquals(kvSize, size);
            store.add(new KeyValue(row, family, qf2, 2, (byte[]) null));
            // Even though we add a new kv, we expect the flushable size to be 'same' since we have
            // not yet cleared the snapshot -- the above flush failed.
            Assert.assertEquals(kvSize, size);
            ffs.fault.set(false);
            flushStore(store, id++);
            size = store.memstore.getFlushableSize();
            // Size should be the foreground kv size.
            Assert.assertEquals(kvSize, size);
            flushStore(store, id++);
            size = store.memstore.getFlushableSize();
            Assert.assertEquals(0, size);
            return null;
        }
    });
}

From source file:org.apache.hadoop.hbase.rest.TestSecureRESTServer.java

@BeforeClass
public static void setupServer() throws Exception {
    final File target = new File(System.getProperty("user.dir"), "target");
    assertTrue(target.exists());//from w w w  .  j av a 2  s  .c  o m

    /*
     * Keytabs
     */
    File keytabDir = new File(target, TestSecureRESTServer.class.getSimpleName() + "_keytabs");
    if (keytabDir.exists()) {
        FileUtils.deleteDirectory(keytabDir);
    }
    keytabDir.mkdirs();
    // Keytab for HBase services (RS, Master)
    serviceKeytab = new File(keytabDir, "hbase.service.keytab");
    // The keytab for the REST server
    restServerKeytab = new File(keytabDir, "spnego.keytab");
    // Keytab for the client
    clientKeytab = new File(keytabDir, CLIENT_PRINCIPAL + ".keytab");

    /*
     * Update UGI
     */
    Configuration conf = TEST_UTIL.getConfiguration();

    /*
     * Start KDC
     */
    KDC = TEST_UTIL.setupMiniKdc(serviceKeytab);
    KDC.createPrincipal(clientKeytab, CLIENT_PRINCIPAL);
    KDC.createPrincipal(serviceKeytab, SERVICE_PRINCIPAL);
    // REST server's keytab contains keys for both principals REST uses
    KDC.createPrincipal(restServerKeytab, SPNEGO_SERVICE_PRINCIPAL, REST_SERVER_PRINCIPAL);

    // Set configuration for HBase
    HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
    HBaseKerberosUtils.setKeytabFileForTesting(serviceKeytab.getAbsolutePath());
    // Why doesn't `setKeytabFileForTesting` do this?
    conf.set("hbase.master.keytab.file", serviceKeytab.getAbsolutePath());
    conf.set("hbase.regionserver.hostname", "localhost");
    conf.set("hbase.master.hostname", "localhost");
    HBaseKerberosUtils.setSecuredConfiguration(conf, SERVICE_PRINCIPAL + "@" + KDC.getRealm(),
            SPNEGO_SERVICE_PRINCIPAL + "@" + KDC.getRealm());
    setHdfsSecuredConfiguration(conf);
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, TokenProvider.class.getName(),
            AccessController.class.getName());
    conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
    conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
    // Enable EXEC permission checking
    conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true);
    conf.set("hbase.superuser", "hbase");
    conf.set("hadoop.proxyuser.rest.hosts", "*");
    conf.set("hadoop.proxyuser.rest.users", "*");
    UserGroupInformation.setConfiguration(conf);

    updateKerberosConfiguration(conf, REST_SERVER_PRINCIPAL, SPNEGO_SERVICE_PRINCIPAL, restServerKeytab);

    // Start HDFS
    TEST_UTIL.startMiniCluster(
            StartMiniClusterOption.builder().numMasters(1).numRegionServers(1).numZkServers(1).build());

    // Start REST
    UserGroupInformation restUser = UserGroupInformation.loginUserFromKeytabAndReturnUGI(REST_SERVER_PRINCIPAL,
            restServerKeytab.getAbsolutePath());
    restUser.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            REST_TEST.startServletContainer(conf);
            return null;
        }
    });
    baseUrl = new URL("http://localhost:" + REST_TEST.getServletPort());

    LOG.info("HTTP server started: " + baseUrl);
    TEST_UTIL.waitTableAvailable(TableName.valueOf("hbase:acl"));

    // Let the REST server create, read, and write globally
    UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI(SERVICE_PRINCIPAL,
            serviceKeytab.getAbsolutePath());
    superuser.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
                AccessControlClient.grant(conn, REST_SERVER_PRINCIPAL, Action.CREATE, Action.READ,
                        Action.WRITE);
            } catch (Throwable t) {
                if (t instanceof Exception) {
                    throw (Exception) t;
                } else {
                    throw new Exception(t);
                }
            }
            return null;
        }
    });
}

From source file:org.apache.coheigea.bigdata.hdfs.HDFSTest.java

@org.junit.Test
public void testDirectoryPermissions() throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file
    final Path file = new Path("/tmp/tmpdir/data-file4");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();//  w ww  . j a v  a2  s. c  o  m
    }
    out.close();

    // Try to read the directory as "bob" - this should be allowed
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob");
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false);
            Assert.assertTrue(iter.hasNext());

            fs.close();
            return null;
        }
    });

    // Change permissions so that the directory can't be read by "other"
    fileSystem.setPermission(file.getParent(), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE));

    // Try to read the base directory as the file owner
    RemoteIterator<LocatedFileStatus> iter = fileSystem.listFiles(file.getParent(), false);
    Assert.assertTrue(iter.hasNext());

    // Now try to read the directory as "bob" again - this should fail
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            try {
                RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false);
                Assert.assertTrue(iter.hasNext());
                Assert.fail("Failure expected on an incorrect permission");
            } catch (AccessControlException ex) {
                // expected
            }

            fs.close();
            return null;
        }
    });
}