Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:com.stratuscom.harvester.classloading.VirtualFileSystemClassLoader.java

@Override
protected Class<?> findClass(final String name) throws ClassNotFoundException {
    try {//from   w w  w .j  av  a2 s. c  o m
        return (Class) Security.doPrivileged(new PrivilegedExceptionAction<Class>() {

            public Class run() throws ClassNotFoundException {
                String resourceName = classToResourceName(name);
                FileObject resourceFileObject = findResourceFileObject(resourceName);
                if (resourceFileObject == null) {
                    if (log.isLoggable(Level.FINE)) {
                        log.fine(getDebugName() + " was asked for " + resourceName + " but couldn't find it.");
                    }
                    throw new ClassNotFoundException(name + "(" + resourceName + ")");
                }
                try {
                    byte[] bytes = FileUtil.getContent(resourceFileObject);
                    return defineClass(name, bytes, 0, bytes.length);
                } catch (IOException ioe) {
                    if (log.isLoggable(Level.FINE)) {
                        log.fine(getDebugName() + " was asked for " + resourceName
                                + " but got IOException while loading it.");
                    }
                    throw new ClassNotFoundException(name, ioe);
                }

            }
        });
    } catch (PrivilegedActionException ex) {
        throw (ClassNotFoundException) ex.getException();
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.ha.TestDelegationTokensWithHA.java

@Test(timeout = 300000)
public void testDelegationTokenWithDoAs() throws Exception {
    final Token<DelegationTokenIdentifier> token = getDelegationToken(fs, "JobTracker");
    final UserGroupInformation longUgi = UserGroupInformation.createRemoteUser("JobTracker/foo.com@FOO.COM");
    final UserGroupInformation shortUgi = UserGroupInformation.createRemoteUser("JobTracker");
    longUgi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override/*  w  w  w.java2s.  c  o m*/
        public Void run() throws Exception {
            // try renew with long name
            token.renew(conf);
            return null;
        }
    });
    shortUgi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            token.renew(conf);
            return null;
        }
    });
    longUgi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            token.cancel(conf);
            ;
            return null;
        }
    });
}

From source file:org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDefaultVisLabelService.java

@Test
public void testListLabelsWithRegEx() throws Throwable {
    PrivilegedExceptionAction<ListLabelsResponse> action = new PrivilegedExceptionAction<ListLabelsResponse>() {
        public ListLabelsResponse run() throws Exception {
            ListLabelsResponse response = null;
            try (Connection conn = ConnectionFactory.createConnection(conf)) {
                response = VisibilityClient.listLabels(conn, ".*secret");
            } catch (Throwable e) {
                fail("Should not have thrown exception");
            }//  w w  w  . jav a  2  s. c om
            // Only return the labels that end with 'secret'
            List<ByteString> labels = response.getLabelList();
            assertEquals(2, labels.size());
            assertTrue(labels.contains(ByteString.copyFrom(SECRET.getBytes())));
            assertTrue(labels.contains(ByteString.copyFrom(TOPSECRET.getBytes())));
            return null;
        }
    };
    SUPERUSER.runAs(action);
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestSubtreeLockACL.java

@Test
public void testSubtreeDeleteBlockedByInheritedDefaultDeepAcl() throws IOException, InterruptedException {
    try {// w  w w . jav a 2 s  .c  o m
        setup();

        //Deny access via default acl down level1folder1
        setDenyUserDefaultAcl(user2.getShortUserName(), level1folder1);

        //Try to delete subtree1. Should fail because of access acl down the tree.
        FileSystem user2fs = user2.doAs(new PrivilegedExceptionAction<FileSystem>() {
            @Override
            public FileSystem run() throws Exception {
                return FileSystem.get(conf);
            }
        });

        try {
            user2fs.delete(subtree1, true);
            fail("Acl should block delete");
        } catch (AccessControlException expected) {
            assertTrue("Wrong inode triggered access control exception.",
                    expected.getMessage().contains("projectedInode=\"level2folder1\""));
            //Operation should fail.
        }

    } finally {
        teardown();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLogSplit.java

/**
 * Simulates splitting a WAL out from under a regionserver that is still trying to write it.  Ensures we do not
 * lose edits./*www  .j  a v  a 2s. co m*/
 * @throws IOException
 * @throws InterruptedException
 */
@Test(timeout = 300000)
public void testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException {
    final AtomicLong counter = new AtomicLong(0);
    AtomicBoolean stop = new AtomicBoolean(false);
    // Region we'll write edits too and then later examine to make sure they all made it in.
    final String region = REGIONS.get(0);
    Thread zombie = new ZombieLastLogWriterRegionServer(this.conf, counter, stop, region);
    try {
        long startCount = counter.get();
        zombie.start();
        // Wait till writer starts going.
        while (startCount == counter.get())
            Threads.sleep(1);
        // Give it a second to write a few appends.
        Threads.sleep(1000);
        final Configuration conf2 = HBaseConfiguration.create(this.conf);
        final User robber = User.createUserForTesting(conf2, ROBBER, GROUP);
        int count = robber.runAs(new PrivilegedExceptionAction<Integer>() {
            @Override
            public Integer run() throws Exception {
                FileSystem fs = FileSystem.get(conf2);
                int expectedFiles = fs.listStatus(HLOGDIR).length;
                HLogSplitter.split(HBASEDIR, HLOGDIR, OLDLOGDIR, fs, conf2);
                Path[] logfiles = getLogForRegion(HBASEDIR, TABLE_NAME, region);
                assertEquals(expectedFiles, logfiles.length);
                int count = 0;
                for (Path logfile : logfiles) {
                    count += countHLog(logfile, fs, conf2);
                }
                return count;
            }
        });
        LOG.info("zombie=" + counter.get() + ", robber=" + count);
        assertTrue(
                "The log file could have at most 1 extra log entry, but can't have less. Zombie could write "
                        + counter.get() + " and logfile had only " + count,
                counter.get() == count || counter.get() + 1 == count);
    } finally {
        stop.set(true);
        zombie.interrupt();
        Threads.threadDumpingIsAlive(zombie);
    }
}

From source file:org.apache.atlas.hive.hook.HiveHook.java

void notifyAsPrivilegedAction(final HiveEventContext event) {

    try {/*  www.ja v  a 2  s .co  m*/
        PrivilegedExceptionAction<Object> privilegedNotify = new PrivilegedExceptionAction<Object>() {
            @Override
            public Object run() throws Exception {
                notifyEntities(event.getMessages());
                return event;
            }
        };

        //Notify as 'hive' service user in doAs mode
        UserGroupInformation realUser = event.getUgi().getRealUser();
        if (realUser != null) {
            LOG.info("Sending notification for event {} as service user {} #messages {} ", event.getOperation(),
                    realUser.getShortUserName(), event.getMessages().size());
            realUser.doAs(privilegedNotify);
        } else {
            LOG.info("Sending notification for event {} as current user {} #messages {} ", event.getOperation(),
                    event.getUgi().getShortUserName(), event.getMessages().size());
            event.getUgi().doAs(privilegedNotify);
        }
    } catch (Throwable e) {
        LOG.error("Error during notify {} ", event.getOperation(), e);
    }
}

From source file:com.cloudera.alfredo.server.KerberosAuthenticationHandler.java

/**
 * It enforces the the Kerberos SPNEGO authentication sequence returning an {@link AuthenticationToken} only
 * after the Kerberos SPNEGO sequence completed successfully.
 * <p/>//ww  w  . ja  v  a  2 s  .co m
 *
 * @param request the HTTP client request.
 * @param response the HTTP client response.
 * @return an authentication token if the Kerberos SPNEGO sequence is complete and valid,
 * <code>null</code> if is in progress (in this case the handler handles the response to the client).
 * @throws IOException thrown if an IO error occurred.
 * @throws AuthenticationException thrown if Kerberos SPNEGO sequence failed.
 */
@Override
public AuthenticationToken authenticate(HttpServletRequest request, final HttpServletResponse response)
        throws IOException, AuthenticationException {
    AuthenticationToken token = null;
    String authorization = request.getHeader(KerberosAuthenticator.AUTHORIZATION);

    if (authorization == null) {
        response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
        response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
        LOG.trace("SPNEGO starts");
    } else if (!authorization.startsWith(KerberosAuthenticator.NEGOTIATE)) {
        response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
        response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
        LOG.warn("'" + KerberosAuthenticator.AUTHORIZATION + "' does not start with '"
                + KerberosAuthenticator.NEGOTIATE + "' :  {}", authorization);
    } else {
        authorization = authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
        final Base64 base64 = new Base64(0);
        final byte[] clientToken = base64.decode(authorization);
        Subject serverSubject = loginContext.getSubject();
        try {
            token = Subject.doAs(serverSubject, new PrivilegedExceptionAction<AuthenticationToken>() {

                @Override
                public AuthenticationToken run() throws Exception {
                    AuthenticationToken token = null;
                    GSSContext gssContext = null;
                    try {
                        gssContext = gssManager.createContext((GSSCredential) null);
                        byte[] serverToken = gssContext.acceptSecContext(clientToken, 0, clientToken.length);
                        if (serverToken != null && serverToken.length > 0) {
                            String authenticate = base64.encodeToString(serverToken);
                            response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE,
                                    KerberosAuthenticator.NEGOTIATE + " " + authenticate);
                        }
                        if (!gssContext.isEstablished()) {
                            response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
                            LOG.trace("SPNEGO in progress");
                        } else {
                            String clientPrincipal = gssContext.getSrcName().toString();
                            int index = clientPrincipal.indexOf("/");
                            if (index == -1) {
                                index = clientPrincipal.indexOf("@");
                            }
                            String userName = (index == -1) ? clientPrincipal
                                    : clientPrincipal.substring(0, index);
                            token = new AuthenticationToken(userName, clientPrincipal, TYPE);
                            response.setStatus(HttpServletResponse.SC_OK);
                            LOG.trace("SPNEGO completed for principal [{}]", clientPrincipal);
                        }
                    } finally {
                        if (gssContext != null) {
                            gssContext.dispose();
                        }
                    }
                    return token;
                }
            });
        } catch (PrivilegedActionException ex) {
            if (ex.getException() instanceof IOException) {
                throw (IOException) ex.getException();
            } else {
                throw new AuthenticationException(ex.getException());
            }
        }
    }
    return token;
}

From source file:org.apache.hadoop.fs.http.server.TestHttpFSWithKerberos.java

private void testDelegationTokenWithinDoAs(final Class fileSystemClass, boolean proxyUser) throws Exception {
    Configuration conf = new Configuration();
    conf.set("hadoop.security.authentication", "kerberos");
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation.loginUserFromKeytab("client", "/Users/tucu/tucu.keytab");
    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
    if (proxyUser) {
        ugi = UserGroupInformation.createProxyUser("foo", ugi);
    }//  w  w w .  j a  v a2s.  co  m
    conf = new Configuration();
    UserGroupInformation.setConfiguration(conf);
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            testDelegationTokenWithFS(fileSystemClass);
            return null;
        }
    });
}

From source file:org.apache.hadoop.mapred.ClientServiceDelegate.java

private MRClientProtocol getProxy() throws IOException {
    if (realProxy != null) {
        return realProxy;
    }/*from  www.  ja  v  a  2  s.co  m*/

    // Possibly allow nulls through the PB tunnel, otherwise deal with an exception
    // and redirect to the history server.
    ApplicationReport application = null;
    try {
        application = rm.getApplicationReport(appId);
    } catch (ApplicationNotFoundException e) {
        application = null;
    } catch (YarnException e2) {
        throw new IOException(e2);
    }
    if (application != null) {
        trackingUrl = application.getTrackingUrl();
    }
    InetSocketAddress serviceAddr = null;
    while (application == null || YarnApplicationState.RUNNING == application.getYarnApplicationState()) {
        if (application == null) {
            LOG.info(
                    "Could not get Job info from RM for job " + jobId + ". Redirecting to job history server.");
            return checkAndGetHSProxy(null, JobState.NEW);
        }
        try {
            if (application.getHost() == null || "".equals(application.getHost())) {
                LOG.debug("AM not assigned to Job. Waiting to get the AM ...");
                Thread.sleep(2000);

                LOG.debug("Application state is " + application.getYarnApplicationState());
                application = rm.getApplicationReport(appId);
                continue;
            } else if (UNAVAILABLE.equals(application.getHost())) {
                if (!amAclDisabledStatusLogged) {
                    LOG.info("Job " + jobId + " is running, but the host is unknown."
                            + " Verify user has VIEW_JOB access.");
                    amAclDisabledStatusLogged = true;
                }
                return getNotRunningJob(application, JobState.RUNNING);
            }
            if (!conf.getBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED, false)) {
                UserGroupInformation newUgi = UserGroupInformation
                        .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName());
                serviceAddr = NetUtils.createSocketAddrForHost(application.getHost(), application.getRpcPort());
                if (UserGroupInformation.isSecurityEnabled()) {
                    org.apache.hadoop.yarn.api.records.Token clientToAMToken = application.getClientToAMToken();
                    Token<ClientToAMTokenIdentifier> token = ConverterUtils.convertFromYarn(clientToAMToken,
                            serviceAddr);
                    newUgi.addToken(token);
                }
                LOG.debug("Connecting to " + serviceAddr);
                final InetSocketAddress finalServiceAddr = serviceAddr;
                realProxy = newUgi.doAs(new PrivilegedExceptionAction<MRClientProtocol>() {
                    @Override
                    public MRClientProtocol run() throws IOException {
                        return instantiateAMProxy(finalServiceAddr);
                    }
                });
            } else {
                if (!amAclDisabledStatusLogged) {
                    LOG.info("Network ACL closed to AM for job " + jobId
                            + ". Not going to try to reach the AM.");
                    amAclDisabledStatusLogged = true;
                }
                return getNotRunningJob(null, JobState.RUNNING);
            }
            return realProxy;
        } catch (IOException e) {
            //possibly the AM has crashed
            //there may be some time before AM is restarted
            //keep retrying by getting the address from RM
            LOG.info("Could not connect to " + serviceAddr + ". Waiting for getting the latest AM address...");
            try {
                Thread.sleep(2000);
            } catch (InterruptedException e1) {
                LOG.warn("getProxy() call interruped", e1);
                throw new YarnRuntimeException(e1);
            }
            try {
                application = rm.getApplicationReport(appId);
            } catch (YarnException e1) {
                throw new IOException(e1);
            }
            if (application == null) {
                LOG.info("Could not get Job info from RM for job " + jobId
                        + ". Redirecting to job history server.");
                return checkAndGetHSProxy(null, JobState.RUNNING);
            }
        } catch (InterruptedException e) {
            LOG.warn("getProxy() call interruped", e);
            throw new YarnRuntimeException(e);
        } catch (YarnException e) {
            throw new IOException(e);
        }
    }

    /** we just want to return if its allocating, so that we don't
     * block on it. This is to be able to return job status
     * on an allocating Application.
     */
    String user = application.getUser();
    if (user == null) {
        throw new IOException("User is not set in the application report");
    }
    if (application.getYarnApplicationState() == YarnApplicationState.NEW
            || application.getYarnApplicationState() == YarnApplicationState.NEW_SAVING
            || application.getYarnApplicationState() == YarnApplicationState.SUBMITTED
            || application.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
        realProxy = null;
        return getNotRunningJob(application, JobState.NEW);
    }

    if (application.getYarnApplicationState() == YarnApplicationState.FAILED) {
        realProxy = null;
        return getNotRunningJob(application, JobState.FAILED);
    }

    if (application.getYarnApplicationState() == YarnApplicationState.KILLED) {
        realProxy = null;
        return getNotRunningJob(application, JobState.KILLED);
    }

    //History server can serve a job only if application
    //succeeded.
    if (application.getYarnApplicationState() == YarnApplicationState.FINISHED) {
        LOG.info("Application state is completed. FinalApplicationStatus="
                + application.getFinalApplicationStatus().toString() + ". Redirecting to job history server");
        realProxy = checkAndGetHSProxy(application, JobState.SUCCEEDED);
    }
    return realProxy;
}

From source file:org.apache.bsf.BSFManager.java

/**
 * Compile the application of the given anonymous function of the given
 * language to the given parameters into the given <tt>CodeBuffer</tt>.
 *
 * @param lang language identifier//from  www .j a  v  a 2s  . c  o m
 * @param source (context info) the source of this expression
 (e.g., filename)
 * @param lineNo (context info) the line number in source for expr
 * @param columnNo (context info) the column number in source for expr
 * @param funcBody the multi-line, value returning script to evaluate
 * @param paramNames the names of the parameters above assumes
 * @param arguments values of the above parameters
 * @param cb       code buffer to compile into
 *
 * @exception BSFException if anything goes wrong while running the script
 */
public void compileApply(String lang, String source, int lineNo, int columnNo, Object funcBody,
        Vector paramNames, Vector arguments, CodeBuffer cb) throws BSFException {
    logger.debug("BSFManager:compileApply");

    final BSFEngine e = loadScriptingEngine(lang);
    final String sourcef = source;
    final int lineNof = lineNo, columnNof = columnNo;
    final Object funcBodyf = funcBody;
    final Vector paramNamesf = paramNames;
    final Vector argumentsf = arguments;
    final CodeBuffer cbf = cb;

    try {
        AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() throws Exception {
                e.compileApply(sourcef, lineNof, columnNof, funcBodyf, paramNamesf, argumentsf, cbf);
                return null;
            }
        });
    } catch (PrivilegedActionException prive) {

        logger.error("Exception :", prive);
        throw (BSFException) prive.getException();
    }
}