List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction
PrivilegedExceptionAction
From source file:org.apache.axis2.jaxws.util.WSDL4JWrapper.java
private URLConnection openConnection(final URL url) throws IOException { try {// ww w . j a v a 2 s .c om return (URLConnection) AccessController.doPrivileged(new PrivilegedExceptionAction() { public Object run() throws IOException { return url.openConnection(); } }); } catch (PrivilegedActionException e) { throw (IOException) e.getException(); } }
From source file:org.apache.drill.exec.server.rest.spnego.TestSpnegoAuthentication.java
/** * Validate successful {@link DrillSpnegoLoginService#login(String, Object)} when provided with client token for a * configured service principal.//w ww .j a v a 2 s. com * @throws Exception */ @Test public void testDrillSpnegoLoginService() throws Exception { // Create client subject using it's principal and keytab final Subject clientSubject = JaasKrbUtil.loginUsingKeytab(spnegoHelper.CLIENT_PRINCIPAL, spnegoHelper.clientKeytab.getAbsoluteFile()); // Generate a SPNEGO token for the peer SERVER_PRINCIPAL from this CLIENT_PRINCIPAL final String token = Subject.doAs(clientSubject, new PrivilegedExceptionAction<String>() { @Override public String run() throws Exception { final GSSManager gssManager = GSSManager.getInstance(); GSSContext gssContext = null; try { final Oid oid = GSSUtil.GSS_SPNEGO_MECH_OID; final GSSName serviceName = gssManager.createName(spnegoHelper.SERVER_PRINCIPAL, GSSName.NT_USER_NAME, oid); gssContext = gssManager.createContext(serviceName, oid, null, GSSContext.DEFAULT_LIFETIME); gssContext.requestCredDeleg(true); gssContext.requestMutualAuth(true); byte[] outToken = new byte[0]; outToken = gssContext.initSecContext(outToken, 0, outToken.length); return Base64.encodeBase64String(outToken); } finally { if (gssContext != null) { gssContext.dispose(); } } } }); // Create a DrillbitContext with service principal and keytab for DrillSpnegoLoginService final DrillConfig newConfig = new DrillConfig(DrillConfig.create() .withValue(ExecConstants.HTTP_AUTHENTICATION_MECHANISMS, ConfigValueFactory.fromIterable(Lists.newArrayList("spnego"))) .withValue(ExecConstants.HTTP_SPNEGO_PRINCIPAL, ConfigValueFactory.fromAnyRef(spnegoHelper.SERVER_PRINCIPAL)) .withValue(ExecConstants.HTTP_SPNEGO_KEYTAB, ConfigValueFactory.fromAnyRef(spnegoHelper.serverKeytab.toString()))); final SystemOptionManager optionManager = Mockito.mock(SystemOptionManager.class); Mockito.when(optionManager.getOption(ExecConstants.ADMIN_USERS_VALIDATOR)) .thenReturn(ExecConstants.ADMIN_USERS_VALIDATOR.DEFAULT_ADMIN_USERS); Mockito.when(optionManager.getOption(ExecConstants.ADMIN_USER_GROUPS_VALIDATOR)) .thenReturn(ExecConstants.ADMIN_USER_GROUPS_VALIDATOR.DEFAULT_ADMIN_USER_GROUPS); final DrillbitContext drillbitContext = Mockito.mock(DrillbitContext.class); Mockito.when(drillbitContext.getConfig()).thenReturn(newConfig); Mockito.when(drillbitContext.getOptionManager()).thenReturn(optionManager); final DrillSpnegoLoginService loginService = new DrillSpnegoLoginService(drillbitContext); // Authenticate the client using its SPNEGO token final UserIdentity user = loginService.login(null, token); // Validate the UserIdentity of authenticated client assertTrue(user != null); assertTrue(user.getUserPrincipal().getName().equals(spnegoHelper.CLIENT_SHORT_NAME)); assertTrue(user.isUserInRole("authenticated", null)); }
From source file:org.apache.hadoop.hdfs.web.TestWebHdfsTokens.java
@SuppressWarnings("unchecked") private void validateLazyTokenFetch(final Configuration clusterConf) throws Exception { final String testUser = "DummyUser"; UserGroupInformation ugi = UserGroupInformation.createUserForTesting(testUser, new String[] { "supergroup" }); WebHdfsFileSystem fs = ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() { @Override//w w w .j a v a2 s. c om public WebHdfsFileSystem run() throws IOException { return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, clusterConf)); } }); // verify token ops don't get a token Assert.assertNull(fs.getRenewToken()); Token<?> token = fs.getDelegationToken(null); fs.renewDelegationToken(token); fs.cancelDelegationToken(token); verify(fs, never()).getDelegationToken(); verify(fs, never()).replaceExpiredDelegationToken(); verify(fs, never()).setDelegationToken(any(Token.class)); Assert.assertNull(fs.getRenewToken()); reset(fs); // verify first non-token op gets a token final Path p = new Path("/f"); fs.create(p, (short) 1).close(); verify(fs, times(1)).getDelegationToken(); verify(fs, never()).replaceExpiredDelegationToken(); verify(fs, times(1)).getDelegationToken(anyString()); verify(fs, times(1)).setDelegationToken(any(Token.class)); token = fs.getRenewToken(); Assert.assertNotNull(token); Assert.assertEquals(testUser, getTokenOwner(token)); Assert.assertEquals(fs.getTokenKind(), token.getKind()); reset(fs); // verify prior token is reused fs.getFileStatus(p); verify(fs, times(1)).getDelegationToken(); verify(fs, never()).replaceExpiredDelegationToken(); verify(fs, never()).getDelegationToken(anyString()); verify(fs, never()).setDelegationToken(any(Token.class)); Token<?> token2 = fs.getRenewToken(); Assert.assertNotNull(token2); Assert.assertEquals(fs.getTokenKind(), token.getKind()); Assert.assertSame(token, token2); reset(fs); // verify renew of expired token fails w/o getting a new token token = fs.getRenewToken(); fs.cancelDelegationToken(token); try { fs.renewDelegationToken(token); Assert.fail("should have failed"); } catch (InvalidToken it) { } catch (Exception ex) { Assert.fail("wrong exception:" + ex); } verify(fs, never()).getDelegationToken(); verify(fs, never()).replaceExpiredDelegationToken(); verify(fs, never()).getDelegationToken(anyString()); verify(fs, never()).setDelegationToken(any(Token.class)); token2 = fs.getRenewToken(); Assert.assertNotNull(token2); Assert.assertEquals(fs.getTokenKind(), token.getKind()); Assert.assertSame(token, token2); reset(fs); // verify cancel of expired token fails w/o getting a new token try { fs.cancelDelegationToken(token); Assert.fail("should have failed"); } catch (InvalidToken it) { } catch (Exception ex) { Assert.fail("wrong exception:" + ex); } verify(fs, never()).getDelegationToken(); verify(fs, never()).replaceExpiredDelegationToken(); verify(fs, never()).getDelegationToken(anyString()); verify(fs, never()).setDelegationToken(any(Token.class)); token2 = fs.getRenewToken(); Assert.assertNotNull(token2); Assert.assertEquals(fs.getTokenKind(), token.getKind()); Assert.assertSame(token, token2); reset(fs); // verify an expired token is replaced with a new token fs.open(p).close(); verify(fs, times(2)).getDelegationToken(); // first bad, then good verify(fs, times(1)).replaceExpiredDelegationToken(); verify(fs, times(1)).getDelegationToken(null); verify(fs, times(1)).setDelegationToken(any(Token.class)); token2 = fs.getRenewToken(); Assert.assertNotNull(token2); Assert.assertEquals(fs.getTokenKind(), token.getKind()); Assert.assertNotSame(token, token2); Assert.assertEquals(testUser, getTokenOwner(token2)); reset(fs); // verify with open because it's a little different in how it // opens connections fs.cancelDelegationToken(fs.getRenewToken()); InputStream is = fs.open(p); is.read(); is.close(); verify(fs, times(2)).getDelegationToken(); // first bad, then good verify(fs, times(1)).replaceExpiredDelegationToken(); verify(fs, times(1)).getDelegationToken(null); verify(fs, times(1)).setDelegationToken(any(Token.class)); token2 = fs.getRenewToken(); Assert.assertNotNull(token2); Assert.assertEquals(fs.getTokenKind(), token.getKind()); Assert.assertNotSame(token, token2); Assert.assertEquals(testUser, getTokenOwner(token2)); reset(fs); // verify fs close cancels the token fs.close(); verify(fs, never()).getDelegationToken(); verify(fs, never()).replaceExpiredDelegationToken(); verify(fs, never()).getDelegationToken(anyString()); verify(fs, never()).setDelegationToken(any(Token.class)); verify(fs, times(1)).cancelDelegationToken(eq(token2)); // add a token to ugi for a new fs, verify it uses that token token = fs.getDelegationToken(null); ugi.addToken(token); fs = ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() { @Override public WebHdfsFileSystem run() throws IOException { return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, clusterConf)); } }); Assert.assertNull(fs.getRenewToken()); fs.getFileStatus(new Path("/")); verify(fs, times(1)).getDelegationToken(); verify(fs, never()).replaceExpiredDelegationToken(); verify(fs, never()).getDelegationToken(anyString()); verify(fs, times(1)).setDelegationToken(eq(token)); token2 = fs.getRenewToken(); Assert.assertNotNull(token2); Assert.assertEquals(fs.getTokenKind(), token.getKind()); Assert.assertSame(token, token2); reset(fs); // verify it reuses the prior ugi token fs.getFileStatus(new Path("/")); verify(fs, times(1)).getDelegationToken(); verify(fs, never()).replaceExpiredDelegationToken(); verify(fs, never()).getDelegationToken(anyString()); verify(fs, never()).setDelegationToken(any(Token.class)); token2 = fs.getRenewToken(); Assert.assertNotNull(token2); Assert.assertEquals(fs.getTokenKind(), token.getKind()); Assert.assertSame(token, token2); reset(fs); // verify an expired ugi token is NOT replaced with a new token fs.cancelDelegationToken(token); for (int i = 0; i < 2; i++) { try { fs.getFileStatus(new Path("/")); Assert.fail("didn't fail"); } catch (InvalidToken it) { } catch (Exception ex) { Assert.fail("wrong exception:" + ex); } verify(fs, times(1)).getDelegationToken(); verify(fs, times(1)).replaceExpiredDelegationToken(); verify(fs, never()).getDelegationToken(anyString()); verify(fs, never()).setDelegationToken(any(Token.class)); token2 = fs.getRenewToken(); Assert.assertNotNull(token2); Assert.assertEquals(fs.getTokenKind(), token.getKind()); Assert.assertSame(token, token2); reset(fs); } // verify fs close does NOT cancel the ugi token fs.close(); verify(fs, never()).getDelegationToken(); verify(fs, never()).replaceExpiredDelegationToken(); verify(fs, never()).getDelegationToken(anyString()); verify(fs, never()).setDelegationToken(any(Token.class)); verify(fs, never()).cancelDelegationToken(any(Token.class)); }
From source file:org.apache.carbondata.core.util.CarbonUtil.java
public static void deleteFoldersAndFilesSilent(final CarbonFile... file) throws IOException, InterruptedException { UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() { @Override/*ww w . j av a2s .co m*/ public Void run() throws Exception { for (int i = 0; i < file.length; i++) { deleteRecursiveSilent(file[i]); } return null; } }); }
From source file:org.apache.coheigea.bigdata.hdfs.ranger.HDFSRangerTest.java
@org.junit.Test public void executeTest() throws Exception { FileSystem fileSystem = hdfsCluster.getFileSystem(); // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser" final Path file = new Path("/tmp/tmpdir3/data-file2"); FSDataOutputStream out = fileSystem.create(file); for (int i = 0; i < 1024; ++i) { out.write(("data" + i + "\n").getBytes("UTF-8")); out.flush();/* w w w . j a va 2 s. c o m*/ } out.close(); // Change permissions to read-only fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE)); // Change the parent directory permissions to be execute only for the owner Path parentDir = new Path("/tmp/tmpdir3"); fileSystem.setPermission(parentDir, new FsPermission(FsAction.EXECUTE, FsAction.NONE, FsAction.NONE)); // Try to read the directory as "bob" - this should be allowed (by the policy - user) UserGroupInformation ugi = UserGroupInformation.createUserForTesting("bob", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); fs.close(); return null; } }); // Try to read the directory as "alice" - this should be allowed (by the policy - group) ugi = UserGroupInformation.createUserForTesting("alice", new String[] { "IT" }); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); fs.close(); return null; } }); // Now try to read the directory as unknown user "eve" - this should not be allowed ugi = UserGroupInformation.createUserForTesting("eve", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Write to the file try { RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); Assert.fail("Failure expected on an incorrect permission"); } catch (RemoteException ex) { // expected Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName())); } fs.close(); return null; } }); // Now try to read the directory as known user "dave" - this should not be allowed, as he doesn't have the correct permissions ugi = UserGroupInformation.createUserForTesting("dave", new String[] {}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Configuration conf = new Configuration(); conf.set("fs.defaultFS", defaultFs); FileSystem fs = FileSystem.get(conf); // Write to the file try { RemoteIterator<LocatedFileStatus> iter = fs.listFiles(file.getParent(), false); Assert.assertTrue(iter.hasNext()); Assert.fail("Failure expected on an incorrect permission"); } catch (RemoteException ex) { // expected Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName())); } fs.close(); return null; } }); }
From source file:org.apache.hadoop.ipc.RPCCallBenchmark.java
private TestContext setupClientTestContext(final MyOptions opts) throws IOException, InterruptedException { if (opts.clientThreads <= 0) { return null; }//from w w w . j a v a 2s. co m // Set up a separate proxy for each client thread, // rather than making them share TCP pipes. int numProxies = opts.clientThreads; final RpcServiceWrapper proxies[] = new RpcServiceWrapper[numProxies]; for (int i = 0; i < numProxies; i++) { proxies[i] = UserGroupInformation.createUserForTesting("proxy-" + i, new String[] {}) .doAs(new PrivilegedExceptionAction<RpcServiceWrapper>() { @Override public RpcServiceWrapper run() throws Exception { return createRpcClient(opts); } }); } // Create an echo message of the desired length final StringBuilder msgBuilder = new StringBuilder(opts.msgSize); for (int c = 0; c < opts.msgSize; c++) { msgBuilder.append('x'); } final String echoMessage = msgBuilder.toString(); // Create the clients in a test context TestContext ctx = new TestContext(); for (int i = 0; i < opts.clientThreads; i++) { final RpcServiceWrapper proxy = proxies[i % numProxies]; ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) { @Override public void doAnAction() throws Exception { proxy.doEcho(echoMessage); callCount.incrementAndGet(); } }); } return ctx; }
From source file:org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.java
/** * Download <code>fsimage</code> and <code>edits</code> * files from the name-node.//from w w w. ja va 2 s . co m * @throws IOException */ private void downloadCheckpointFiles(final CheckpointSignature sig) throws IOException { try { UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { checkpointImage.cTime = sig.cTime; checkpointImage.checkpointTime = sig.checkpointTime; // get fsimage String fileid = "getimage=1"; File[] srcNames = checkpointImage.getImageFiles(); assert srcNames.length > 0 : "No checkpoint targets."; TransferFsImage.getFileClient(fsName, fileid, srcNames); LOG.info("Downloaded file " + srcNames[0].getName() + " size " + srcNames[0].length() + " bytes."); // get edits file fileid = "getedit=1"; srcNames = checkpointImage.getEditsFiles(); assert srcNames.length > 0 : "No checkpoint targets."; TransferFsImage.getFileClient(fsName, fileid, srcNames); LOG.info("Downloaded file " + srcNames[0].getName() + " size " + srcNames[0].length() + " bytes."); checkpointImage.checkpointUploadDone(); return null; } }); } catch (InterruptedException e) { throw new RuntimeException(e); } }
From source file:org.apache.axis2.jaxws.runtime.description.marshal.impl.AnnotationBuilder.java
/** @return ClassLoader */ static ClassLoader getContextClassLoader() { // NOTE: This method must remain private because it uses AccessController ClassLoader cl = null;/*from w w w . ja v a 2 s. com*/ try { cl = (ClassLoader) AccessController.doPrivileged(new PrivilegedExceptionAction() { public Object run() throws ClassNotFoundException { return Thread.currentThread().getContextClassLoader(); } }); } catch (PrivilegedActionException e) { if (log.isDebugEnabled()) { log.debug("Exception thrown from AccessController: " + e); } throw (RuntimeException) e.getException(); } return cl; }
From source file:com.trendmicro.hdfs.webdav.HDFSResource.java
@Override public long getModificationTime() { try {/* w w w.j a v a 2s . c o m*/ return user.doAs(new PrivilegedExceptionAction<Long>() { public Long run() throws Exception { return FileSystem.get(conf).getFileStatus(path).getModificationTime(); } }); } catch (IOException e) { throw new RuntimeException(e); } catch (InterruptedException e) { throw new RuntimeException(e); } }
From source file:org.apache.axis2.deployment.util.Utils.java
public static File createTempFile(final String suffix, InputStream in, final File tmpDir) throws IOException { byte data[] = new byte[2048]; int count;//from w w w . j ava2 s . c o m File f = TempFileManager.createTempFile("axis2", suffix); // if (tmpDir == null) { // String directory = (String)org.apache.axis2.java.security.AccessController // .doPrivileged(new PrivilegedAction() { // public Object run() { // return System.getProperty("java.io.tmpdir"); // } // }); // final File tempFile = new File(directory, "_axis2"); // Boolean exists = (Boolean)org.apache.axis2.java.security.AccessController // .doPrivileged(new PrivilegedAction() { // public Object run() { // return tempFile.exists(); // } // }); // if (!exists) { // Boolean mkdirs = (Boolean)org.apache.axis2.java.security.AccessController // .doPrivileged(new PrivilegedAction() { // public Object run() { // return tempFile.mkdirs(); // } // }); // if (!mkdirs) { // throw new IOException("Unable to create the directory"); // } // } // try { // f = (File)org.apache.axis2.java.security.AccessController // .doPrivileged(new PrivilegedExceptionAction() { // public Object run() throws IOException { // return File.createTempFile("axis2", suffix, // tempFile); // } // }); // f.deleteOnExit(); // } catch (PrivilegedActionException e) { // throw (IOException)e.getException(); // } // } else { // try { // f = (File)org.apache.axis2.java.security.AccessController // .doPrivileged(new PrivilegedExceptionAction() { // public Object run() throws IOException { // return File.createTempFile("axis2", suffix, // tmpDir); // } // }); // f.deleteOnExit(); // } catch (PrivilegedActionException e) { // throw (IOException)e.getException(); // } // } // if (log.isDebugEnabled()) { // log.debug("Created temporary file : " + f.getAbsolutePath());// $NON-SEC-4 // } // final File f2 = f; // org.apache.axis2.java.security.AccessController // .doPrivileged(new PrivilegedAction() { // public Object run() { // f2.deleteOnExit(); // return null; // } // }); FileOutputStream out; final File f2 = f; try { out = (FileOutputStream) org.apache.axis2.java.security.AccessController .doPrivileged(new PrivilegedExceptionAction() { public Object run() throws FileNotFoundException { return new FileOutputStream(f2); } }); } catch (PrivilegedActionException e) { throw (FileNotFoundException) e.getException(); } while ((count = in.read(data, 0, 2048)) != -1) { out.write(data, 0, count); } out.close(); return f; }