Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods.java

/** Handle HTTP PUT request. */
@PUT//from w  ww  . j av  a 2s  .  c om
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Consumes({ "*/*" })
@Produces({ MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON })
public Response put(final InputStream in, @Context final UserGroupInformation ugi,
        @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) final DelegationParam delegation,
        @PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
        @QueryParam(PutOpParam.NAME) @DefaultValue(PutOpParam.DEFAULT) final PutOpParam op,
        @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT) final PermissionParam permission,
        @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) final OverwriteParam overwrite,
        @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT) final BufferSizeParam bufferSize,
        @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT) final ReplicationParam replication,
        @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) final BlockSizeParam blockSize)
        throws IOException, InterruptedException {

    init(ugi, delegation, path, op, permission, overwrite, bufferSize, replication, blockSize);

    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
        @Override
        public Response run() throws IOException, URISyntaxException {

            final String fullpath = path.getAbsolutePath();
            final DataNode datanode = (DataNode) context.getAttribute("datanode");

            switch (op.getValue()) {
            case CREATE: {
                final Configuration conf = new Configuration(datanode.getConf());
                conf.set(FsPermission.UMASK_LABEL, "000");

                final int b = bufferSize.getValue(conf);
                DFSClient dfsclient = new DFSClient(conf);
                FSDataOutputStream out = null;
                try {
                    out = new FSDataOutputStream(
                            dfsclient.create(fullpath, permission.getFsPermission(), overwrite.getValue(),
                                    replication.getValue(conf), blockSize.getValue(conf), null, b),
                            null);
                    IOUtils.copyBytes(in, out, b);
                    out.close();
                    out = null;
                    dfsclient.close();
                    dfsclient = null;
                } finally {
                    IOUtils.cleanup(LOG, out);
                    IOUtils.cleanup(LOG, dfsclient);
                }
                final String nnAddr = NameNode.getInfoServer(conf);
                final URI uri = new URI(WebHdfsFileSystem.SCHEME + "://" + nnAddr + fullpath);
                return Response.created(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
            }
            default:
                throw new UnsupportedOperationException(op + " is not supported");
            }
        }
    });
}

From source file:com.cloudera.alfredo.client.KerberosAuthenticator.java

/**
 * Implements the SPNEGO authentication sequence interaction using the current default principal
 * in the Kerberos cache (normally set via kinit).
 *
 * @param token the authencation token being used for the user.
 * @throws IOException if an IO error occurred.
 * @throws AuthenticationException if an authentication error occurred.
 *///from  ww w . java 2 s . c  o  m
private void doSpnegoSequence(AuthenticatedURL.Token token) throws IOException, AuthenticationException {
    try {
        AccessControlContext context = AccessController.getContext();
        Subject subject = Subject.getSubject(context);
        if (subject == null) {
            subject = new Subject();
            LoginContext login = new LoginContext("", subject);
            login.login();
        }
        Subject.doAs(subject, new PrivilegedExceptionAction<Void>() {

            @Override
            public Void run() throws Exception {
                GSSContext gssContext = null;
                try {
                    GSSManager gssManager = GSSManager.getInstance();
                    String servicePrincipal = "HTTP/" + KerberosAuthenticator.this.url.getHost();
                    GSSName serviceName = gssManager.createName(servicePrincipal,
                            GSSUtil.NT_GSS_KRB5_PRINCIPAL);
                    gssContext = gssManager.createContext(serviceName, GSSUtil.GSS_KRB5_MECH_OID, null,
                            GSSContext.DEFAULT_LIFETIME);
                    gssContext.requestCredDeleg(true);
                    gssContext.requestMutualAuth(true);

                    byte[] inToken = new byte[0];
                    byte[] outToken;
                    boolean established = false;

                    // Loop while the context is still not established
                    while (!established) {
                        outToken = gssContext.initSecContext(inToken, 0, inToken.length);
                        if (outToken != null) {
                            sendToken(outToken);
                        }

                        if (!gssContext.isEstablished()) {
                            inToken = readToken();
                        } else {
                            established = true;
                        }
                    }
                } finally {
                    if (gssContext != null) {
                        gssContext.dispose();
                    }
                }
                return null;
            }
        });
    } catch (PrivilegedActionException ex) {
        throw new AuthenticationException(ex.getException());
    } catch (LoginException ex) {
        throw new AuthenticationException(ex);
    }
    AuthenticatedURL.extractToken(conn, token);
}

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcChannelImpl.java

/**
 * Connect to channel// w  ww .ja  v  a  2  s. com
 * @param bootstrap to connect to
 * @return future of connection
 */
private ChannelFuture connect(final Bootstrap bootstrap) {
    return bootstrap.remoteAddress(address).connect().addListener(new GenericFutureListener<ChannelFuture>() {
        @Override
        public void operationComplete(final ChannelFuture f) throws Exception {
            if (!f.isSuccess()) {
                retryOrClose(bootstrap, failureCounter++, client.failureSleep, f.cause());
                return;
            }
            channel = f.channel();

            setupAuthorization();

            ByteBuf b = channel.alloc().directBuffer(6);
            createPreamble(b, authMethod);
            channel.writeAndFlush(b).addListener(ChannelFutureListener.CLOSE_ON_FAILURE);
            if (useSasl) {
                UserGroupInformation ticket = AsyncRpcChannelImpl.this.ticket.getUGI();
                if (authMethod == AuthMethod.KERBEROS) {
                    if (ticket != null && ticket.getRealUser() != null) {
                        ticket = ticket.getRealUser();
                    }
                }
                SaslClientHandler saslHandler;
                if (ticket == null) {
                    throw new FatalConnectionException("ticket/user is null");
                }
                final UserGroupInformation realTicket = ticket;
                saslHandler = ticket.doAs(new PrivilegedExceptionAction<SaslClientHandler>() {
                    @Override
                    public SaslClientHandler run() throws IOException {
                        return getSaslHandler(realTicket, bootstrap);
                    }
                });
                if (saslHandler != null) {
                    // Sasl connect is successful. Let's set up Sasl channel handler
                    channel.pipeline().addFirst(saslHandler);
                } else {
                    // fall back to simple auth because server told us so.
                    authMethod = AuthMethod.SIMPLE;
                    useSasl = false;
                }
            } else {
                startHBaseConnection(f.channel());
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.LocalHBaseCluster.java

public JVMClusterUtil.RegionServerThread addRegionServer(final Configuration config, final int index, User user)
        throws IOException, InterruptedException {
    return user.runAs(new PrivilegedExceptionAction<JVMClusterUtil.RegionServerThread>() {
        public JVMClusterUtil.RegionServerThread run() throws Exception {
            return addRegionServer(config, index);
        }/*from w w  w  . jav  a2s  . c  o m*/
    });
}

From source file:org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.java

/**
 * Initialize SecondaryNameNode./*from   ww w . ja v  a2  s.  c om*/
 */
private void initialize(final Configuration conf) throws IOException {
    final InetSocketAddress infoSocAddr = getHttpAddress(conf);
    infoBindAddress = infoSocAddr.getHostName();
    if (UserGroupInformation.isSecurityEnabled()) {
        SecurityUtil.login(conf, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
                DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY, infoBindAddress);
    }
    // initiate Java VM metrics
    JvmMetricsSource.create("SecondaryNameNode", conf.get("session.id"));

    // Create connection to the namenode.
    shouldRun = true;
    nameNodeAddr = NameNode.getServiceAddress(conf, true);

    this.conf = conf;
    this.namenode = (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class, NamenodeProtocol.versionID,
            nameNodeAddr, conf);

    // initialize checkpoint directories
    fsName = getInfoServer();
    checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary");
    checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary");
    checkpointImage = new CheckpointStorage();
    checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);

    // Initialize other scheduling parameters from the configuration
    checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
    checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);

    // initialize the webserver for uploading files.
    // Kerberized SSL servers must be run from the host principal...
    UserGroupInformation httpUGI = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
            SecurityUtil.getServerPrincipal(
                    conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY), infoBindAddress),
            conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
    try {
        infoServer = httpUGI.doAs(new PrivilegedExceptionAction<HttpServer>() {

            @Override
            public HttpServer run() throws IOException, InterruptedException {
                LOG.info("Starting web server as: " + UserGroupInformation.getCurrentUser().getUserName());

                int tmpInfoPort = infoSocAddr.getPort();
                infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf,
                        SecurityUtil.getAdminAcls(conf, DFSConfigKeys.DFS_ADMIN));

                if (UserGroupInformation.isSecurityEnabled()) {
                    System.setProperty("https.cipherSuites",
                            Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0));
                    InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(infoBindAddress + ":"
                            + conf.get("dfs.secondary.https.port", infoBindAddress + ":" + 0));
                    imagePort = secInfoSocAddr.getPort();
                    infoServer.addSslListener(secInfoSocAddr, conf, false, true);
                }

                infoServer.setAttribute("name.system.image", checkpointImage);
                infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
                infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class, true);
                infoServer.start();
                return infoServer;
            }
        });
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
    LOG.info("Web server init done");
    // The web-server port can be ephemeral... ensure we have the correct info

    infoPort = infoServer.getPort();
    if (!UserGroupInformation.isSecurityEnabled())
        imagePort = infoPort;

    conf.set("dfs.secondary.http.address", infoBindAddress + ":" + infoPort);
    LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
    LOG.info("Secondary image servlet up at: " + infoBindAddress + ":" + imagePort);
    LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " + "(" + checkpointPeriod / 60 + " min)");
    LOG.warn("Log Size Trigger    :" + checkpointSize + " bytes " + "(" + checkpointSize / 1024 + " KB)");
}

From source file:org.apache.axis2.deployment.util.Utils.java

public static URL[] getURLsForAllJars(URL url, File tmpDir) {
    FileInputStream fin = null;/*from ww  w.  j av a2  s  .c o  m*/
    InputStream in = null;
    ZipInputStream zin = null;
    try {
        ArrayList array = new ArrayList();
        in = url.openStream();
        String fileName = url.getFile();
        int index = fileName.lastIndexOf('/');
        if (index != -1) {
            fileName = fileName.substring(index + 1);
        }
        final File f = createTempFile(fileName, in, tmpDir);

        fin = (FileInputStream) org.apache.axis2.java.security.AccessController
                .doPrivileged(new PrivilegedExceptionAction() {
                    public Object run() throws FileNotFoundException {
                        return new FileInputStream(f);
                    }
                });
        array.add(f.toURL());
        zin = new ZipInputStream(fin);

        ZipEntry entry;
        String entryName;
        while ((entry = zin.getNextEntry()) != null) {
            entryName = entry.getName();
            /**
             * id the entry name start with /lib and end with .jar then
             * those entry name will be added to the arraylist
             */
            if ((entryName != null) && entryName.toLowerCase().startsWith("lib/")
                    && entryName.toLowerCase().endsWith(".jar")) {
                String suffix = entryName.substring(4);
                File f2 = createTempFile(suffix, zin, tmpDir);
                array.add(f2.toURL());
            }
        }
        return (URL[]) array.toArray(new URL[array.size()]);
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (fin != null) {
            try {
                fin.close();
            } catch (IOException e) {
                //
            }
        }
        if (in != null) {
            try {
                in.close();
            } catch (IOException e) {
                //
            }
        }
        if (zin != null) {
            try {
                zin.close();
            } catch (IOException e) {
                //
            }
        }
    }
}

From source file:com.thinkbiganalytics.kerberos.TestKerberosKinit.java

private void testHdfsWithUserImpersonation(final String configResources, final String keytab,
        final String principal, String proxyUser, final String environment, final String hdfsUrl) {
    final String path = "/user";
    try {/*from w  w  w .  j  a  v a2s  . c o  m*/
        final Configuration configuration = TestKerberosKinit.createConfigurationFromList(configResources);
        UserGroupInformation realugi = TestKerberosKinit.generateKerberosTicket(configuration, keytab,
                principal);
        System.out.println(" ");
        System.out.println("Sucessfully got a kerberos ticket in the JVM");
        System.out.println("current user is: " + realugi.getUserName());

        UserGroupInformation ugiProxy = UserGroupInformation.createProxyUser(proxyUser, realugi);
        System.out.println("proxy user is: " + ugiProxy.getUserName());
        ugiProxy.doAs(new PrivilegedExceptionAction<Object>() {
            public Object run() {
                try {
                    searchHDFS(configuration, environment, path, hdfsUrl);
                } catch (Exception e) {
                    throw new RuntimeException("Error testing HDFS with Kerberos Hive Impersonation", e);
                }
                return null;
            }
        });

    } catch (Exception e) {
        System.out.println("Error testing HDFS\n\n");
        e.printStackTrace();
    }
}

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcChannel.java

/**
 * Connect to channel//from  ww w  . j a  v  a  2 s .c  om
 *
 * @param bootstrap to connect to
 * @return future of connection
 */
private ChannelFuture connect(final Bootstrap bootstrap) {
    return bootstrap.remoteAddress(address).connect().addListener(new GenericFutureListener<ChannelFuture>() {
        @Override
        public void operationComplete(final ChannelFuture f) throws Exception {
            if (!f.isSuccess()) {
                if (f.cause() instanceof SocketException) {
                    retryOrClose(bootstrap, connectFailureCounter++, f.cause());
                } else {
                    retryOrClose(bootstrap, ioFailureCounter++, f.cause());
                }
                return;
            }
            channel = f.channel();

            setupAuthorization();

            ByteBuf b = channel.alloc().directBuffer(6);
            createPreamble(b, authMethod);
            channel.writeAndFlush(b).addListener(ChannelFutureListener.CLOSE_ON_FAILURE);
            if (useSasl) {
                UserGroupInformation ticket = AsyncRpcChannel.this.ticket.getUGI();
                if (authMethod == AuthMethod.KERBEROS) {
                    if (ticket != null && ticket.getRealUser() != null) {
                        ticket = ticket.getRealUser();
                    }
                }
                SaslClientHandler saslHandler;
                if (ticket == null) {
                    throw new FatalConnectionException("ticket/user is null");
                }
                final UserGroupInformation realTicket = ticket;
                saslHandler = ticket.doAs(new PrivilegedExceptionAction<SaslClientHandler>() {
                    @Override
                    public SaslClientHandler run() throws IOException {
                        return getSaslHandler(realTicket, bootstrap);
                    }
                });
                if (saslHandler != null) {
                    // Sasl connect is successful. Let's set up Sasl channel handler
                    channel.pipeline().addFirst(saslHandler);
                } else {
                    // fall back to simple auth because server told us so.
                    authMethod = AuthMethod.SIMPLE;
                    useSasl = false;
                }
            } else {
                startHBaseConnection(f.channel());
            }
        }
    });
}

From source file:com.thinkbiganalytics.datalake.authorization.SentryAuthorizationService.java

@Override
public void createOrUpdateReadOnlyHdfsPolicy(String categoryName, String feedName,
        List<String> hadoopAuthorizationGroups, List<String> hdfsPaths) {

    if (this.sentryConnection.getKerberosTicketConfiguration().isKerberosEnabled()) {
        try {/*  w  w  w .j  a va 2  s.  com*/
            UserGroupInformation ugi = authenticatePolicyCreatorWithKerberos();
            if (ugi == null) {
                log.error(UserGroupObjectError);
            } else {
                ugi.doAs(new PrivilegedExceptionAction<Void>() {
                    @Override
                    public Void run() throws Exception {
                        createReadOnlyHdfsPolicy(categoryName, feedName, hadoopAuthorizationGroups, hdfsPaths);
                        return null;
                    }
                });
            }
        } catch (Exception e) {
            log.error("Error Creating Sentry HDFS Policy using Kerberos Authentication" + e.getMessage());
            throw new RuntimeException(e);
        }
    } else {
        createReadOnlyHdfsPolicy(categoryName, feedName, hadoopAuthorizationGroups, hdfsPaths);
    }
}

From source file:org.nebulaframework.deployment.classloading.GridArchiveClassLoader.java

/**
 * Creates a temporary file which consists of the {@code byte[]} of a given
 * {@code GridArchive}./*from   www .ja va2 s  .  co m*/
 * 
 * @param archive
 *            {@code GridArchive}
 * @return A {@code File} reference for new temporary file
 * 
 * @throws IOException
 *             if IOException occurs during {@code File} handling
 */
protected File createTempArchiveFile(final GridArchive archive) throws Exception {

    try {
        // Run with Privileges
        return AccessController.doPrivileged(new PrivilegedExceptionAction<File>() {

            @Override
            public File run() throws IOException {
                // Create Temp File
                File archiveFile = File.createTempFile("archivetemp", "nar");
                archiveFile.deleteOnExit(); // Mark to delete

                // Write the byte[]
                FileOutputStream fout = new FileOutputStream(archiveFile);
                fout.write(archive.getBytes());
                fout.flush();
                fout.close();

                return archiveFile;
            }

        });
    } catch (PrivilegedActionException e) {
        throw e.getException();
    }
}