Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:org.apache.hadoop.hbase.ipc.BlockingRpcConnection.java

private void setupIOstreams() throws IOException {
    if (socket != null) {
        // The connection is already available. Perfect.
        return;//  w w w . j  ava2s.  c  o m
    }

    if (this.rpcClient.failedServers.isFailedServer(remoteId.getAddress())) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Not trying to connect to " + remoteId.address
                    + " this server is in the failed servers list");
        }
        throw new FailedServerException("This server is in the failed servers list: " + remoteId.address);
    }

    try {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Connecting to " + remoteId.address);
        }

        short numRetries = 0;
        final short MAX_RETRIES = 5;
        while (true) {
            setupConnection();
            InputStream inStream = NetUtils.getInputStream(socket);
            // This creates a socket with a write timeout. This timeout cannot be changed.
            OutputStream outStream = NetUtils.getOutputStream(socket, this.rpcClient.writeTO);
            // Write out the preamble -- MAGIC, version, and auth to use.
            writeConnectionHeaderPreamble(outStream);
            if (useSasl) {
                final InputStream in2 = inStream;
                final OutputStream out2 = outStream;
                UserGroupInformation ticket = getUGI();
                boolean continueSasl;
                if (ticket == null) {
                    throw new FatalConnectionException("ticket/user is null");
                }
                try {
                    continueSasl = ticket.doAs(new PrivilegedExceptionAction<Boolean>() {
                        @Override
                        public Boolean run() throws IOException {
                            return setupSaslConnection(in2, out2);
                        }
                    });
                } catch (Exception ex) {
                    ExceptionUtil.rethrowIfInterrupt(ex);
                    handleSaslConnectionFailure(numRetries++, MAX_RETRIES, ex, ticket);
                    continue;
                }
                if (continueSasl) {
                    // Sasl connect is successful. Let's set up Sasl i/o streams.
                    inStream = saslRpcClient.getInputStream(inStream);
                    outStream = saslRpcClient.getOutputStream(outStream);
                } else {
                    // fall back to simple auth because server told us so.
                    // do not change authMethod and useSasl here, we should start from secure when
                    // reconnecting because regionserver may change its sasl config after restart.
                }
            }
            this.in = new DataInputStream(new BufferedInputStream(inStream));
            this.out = new DataOutputStream(new BufferedOutputStream(outStream));
            // Now write out the connection header
            writeConnectionHeader();
            break;
        }
    } catch (Throwable t) {
        closeSocket();
        IOException e = ExceptionUtil.asInterrupt(t);
        if (e == null) {
            this.rpcClient.failedServers.addToFailedServers(remoteId.address);
            if (t instanceof LinkageError) {
                // probably the hbase hadoop version does not match the running hadoop version
                e = new DoNotRetryIOException(t);
            } else if (t instanceof IOException) {
                e = (IOException) t;
            } else {
                e = new IOException("Could not set up IO Streams to " + remoteId.address, t);
            }
        }
        throw e;
    }

    // start the receiver thread after the socket connection has been set up
    thread = new Thread(this, threadName);
    thread.setDaemon(true);
    thread.start();
}

From source file:org.apache.hadoop.distributedloadsimulator.sls.appmaster.MRAMSimulator.java

@Override
protected void sendContainerRequest() throws YarnException, IOException, InterruptedException {
    if (isFinished) {
        return;/*from  ww  w. j  a  v a  2s  . c o m*/
    }

    //LOG.info("HOP :: Send container request ");
    // send out request
    List<ResourceRequest> ask = null;
    if (isAMContainerRunning) {
        if (mapFinished != mapTotal) {
            // map phase
            if (!pendingMaps.isEmpty()) {
                ask = packageRequests(pendingMaps, PRIORITY_MAP);
                scheduledMaps.addAll(pendingMaps);
                pendingMaps.clear();
            } else if (!pendingFailedMaps.isEmpty() && scheduledMaps.isEmpty()) {
                ask = packageRequests(pendingFailedMaps, PRIORITY_MAP);
                // pendingFailedMaps.size()));
                scheduledMaps.addAll(pendingFailedMaps);
                pendingFailedMaps.clear();
            }
        } else if (reduceFinished != reduceTotal) {
            // reduce phase
            if (!pendingReduces.isEmpty()) {
                ask = packageRequests(pendingReduces, PRIORITY_REDUCE);
                scheduledReduces.addAll(pendingReduces);
                pendingReduces.clear();
            } else if (!pendingFailedReduces.isEmpty() && scheduledReduces.isEmpty()) {
                ask = packageRequests(pendingFailedReduces, PRIORITY_REDUCE);
                // pendingFailedReduces.size()));
                scheduledReduces.addAll(pendingFailedReduces);
                pendingFailedReduces.clear();
            }
        }
        if (firstRequest) {
            firstRequest = false;
            startRequestingContainers = System.currentTimeMillis();
        }
    }
    if (ask == null) {
        ask = new ArrayList<ResourceRequest>();
    }

    final AllocateRequest request = createAllocateRequest(ask);
    if (totalContainers == 0) {
        request.setProgress(1.0f);
    } else {
        request.setProgress((float) finishedContainers / totalContainers);
    }
    if (ask.size() > 0) {
        int nbContainers = 0;
        for (ResourceRequest r : ask) {
            nbContainers += r.getNumContainers();
        }
        LOG.info("application " + appId + " requesting containers " + nbContainers);
    }

    AllocateResponse response = null;
    UserGroupInformation ugi = UserGroupInformation.createProxyUser(appAttemptId.toString(),
            UserGroupInformation.getCurrentUser());
    ugi.setAuthenticationMethod(SaslRpcServer.AuthMethod.TOKEN);
    ugi.addCredentials(credentials);
    ugi.addToken(amRMToken);
    ugi.addTokenIdentifier(amRMToken.decodeIdentifier());
    response = ugi.doAs(new PrivilegedExceptionAction<AllocateResponse>() {

        @Override
        public AllocateResponse run() throws Exception {
            UserGroupInformation.getCurrentUser().addToken(amRMToken);
            InetSocketAddress resourceManagerAddress = conf.getSocketAddr(
                    YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
                    YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
            SecurityUtil.setTokenService(amRMToken, resourceManagerAddress);
            ApplicationMasterProtocol appMasterProtocol = ClientRMProxy.createRMProxy(conf,
                    ApplicationMasterProtocol.class, true);
            AllocateResponse response = appMasterProtocol.allocate(request);
            RPC.stopProxy(appMasterProtocol);
            return response;
        }
    });

    if (response != null) {
        responseQueue.put(response);
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.HdfsTargetConfigBean.java

public void destroy() {
    LOG.info("Destroy");
    try {// w  w w  . j  av  a  2s  . com
        getUGI().doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                if (currentWriters != null) {
                    currentWriters.closeAll();
                }
                if (lateWriters != null) {
                    lateWriters.closeAll();
                }
                if (loginUgi != null) {
                    getFileSystemForInitDestroy().close();
                }
                return null;
            }
        });
    } catch (Exception ex) {
        LOG.warn("Error while closing HDFS FileSystem URI='{}': {}", hdfsUri, ex.toString(), ex);
    }
}

From source file:com.alibaba.jstorm.yarn.appmaster.JstormMaster.java

void startTimelineClient(final Configuration conf) throws YarnException, IOException, InterruptedException {
    try {//from   w w  w .  ja va2s.  c  o m
        appSubmitterUgi.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,
                        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) {
                    // Creating the Timeline Client
                    timelineClient = TimelineClient.createTimelineClient();
                    timelineClient.init(conf);
                    timelineClient.start();
                } else {
                    timelineClient = null;
                    LOG.warn("Timeline service is not enabled");
                }
                return null;
            }
        });
    } catch (UndeclaredThrowableException e) {
        throw new YarnException(e.getCause());
    }
}

From source file:org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDeletes.java

@Test
public void testVisibilityLabelsWithDeleteColumnsWithNoMatchVisExpWithMultipleVersionsNoTimestamp()
        throws Exception {
    setAuths();//from  w w w  . j  av a 2 s . c  om
    final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    try (Table table = doPuts(tableName)) {
        TEST_UTIL.getAdmin().flush(tableName);
        PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                try (Connection connection = ConnectionFactory.createConnection(conf);
                        Table table = connection.getTable(tableName)) {
                    Delete d = new Delete(row1);
                    d.setCellVisibility(new CellVisibility(CONFIDENTIAL));
                    d.addColumns(fam, qual);
                    table.delete(d);

                    d = new Delete(row1);
                    d.setCellVisibility(new CellVisibility(SECRET));
                    d.addColumns(fam, qual);
                    table.delete(d);

                    d = new Delete(row1);
                    d.setCellVisibility(new CellVisibility(
                            "(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET + ")"));
                    d.addColumns(fam, qual);
                    table.delete(d);
                } catch (Throwable t) {
                    throw new IOException(t);
                }
                return null;
            }
        };
        SUPERUSER.runAs(actiona);
        Scan s = new Scan();
        s.setMaxVersions(5);
        s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
        ResultScanner scanner = table.getScanner(s);
        Result[] next = scanner.next(3);
        assertTrue(next.length == 2);
        CellScanner cellScanner = next[0].cellScanner();
        cellScanner.advance();
        Cell current = cellScanner.current();
        assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0,
                row1.length));
        cellScanner = next[1].cellScanner();
        cellScanner.advance();
        current = cellScanner.current();
        assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0,
                row2.length));
    }
}

From source file:org.apache.hadoop.ha.ZKFailoverController.java

/**
 * Request from graceful failover to cede active role. Causes
 * this ZKFC to transition its local node to standby, then quit
 * the election for the specified period of time, after which it
 * will rejoin iff it is healthy.//from   www .  j  a  v  a  2 s . c o m
 */
void cedeActive(final int millisToCede) throws AccessControlException, ServiceFailedException, IOException {
    try {
        UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                doCedeActive(millisToCede);
                return null;
            }
        });
    } catch (InterruptedException e) {
        throw new IOException(e);
    }
}

From source file:org.apache.hadoop.mapred.JobClient.java

/**
 * Get a filesystem handle.  We need this to prepare jobs
 * for submission to the MapReduce system.
 * /*from  w w w  .j  av  a  2 s .  c  o  m*/
 * @return the filesystem handle.
 * @throws IOException 
 */
public synchronized FileSystem getFs() throws IOException {
    if (this.fs == null) {
        try {
            this.fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
                public FileSystem run() throws IOException {
                    Path sysDir = getSystemDir();
                    return sysDir.getFileSystem(getConf());
                }
            });
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
    }
    return this.fs;
}

From source file:io.druid.security.kerberos.KerberosAuthenticator.java

@Override
public org.eclipse.jetty.client.HttpClient createEscalatedJettyClient(
        org.eclipse.jetty.client.HttpClient baseClient) {
    baseClient.getAuthenticationStore().addAuthentication(new Authentication() {
        @Override// w  w  w . j a v a  2 s .  com
        public boolean matches(String type, URI uri, String realm) {
            return true;
        }

        @Override
        public Result authenticate(final Request request, ContentResponse response,
                Authentication.HeaderInfo headerInfo, Attributes context) {
            return new Result() {
                @Override
                public URI getURI() {
                    return request.getURI();
                }

                @Override
                public void apply(Request request) {
                    try {
                        // No need to set cookies as they are handled by Jetty Http Client itself.
                        URI uri = request.getURI();
                        if (DruidKerberosUtil.needToSendCredentials(baseClient.getCookieStore(), uri)) {
                            log.debug(
                                    "No Auth Cookie found for URI[%s]. Existing Cookies[%s] Authenticating... ",
                                    uri, baseClient.getCookieStore().getCookies());
                            final String host = request.getHost();
                            DruidKerberosUtil.authenticateIfRequired(internalClientPrincipal,
                                    internalClientKeytab);
                            UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
                            String challenge = currentUser.doAs(new PrivilegedExceptionAction<String>() {
                                @Override
                                public String run() throws Exception {
                                    return DruidKerberosUtil.kerberosChallenge(host);
                                }
                            });
                            request.getHeaders().add(HttpHeaders.Names.AUTHORIZATION, "Negotiate " + challenge);
                        } else {
                            log.debug("Found Auth Cookie found for URI[%s].", uri);
                        }
                    } catch (Throwable e) {
                        Throwables.propagate(e);
                    }
                }
            };
        }
    });
    return baseClient;
}

From source file:io.hops.security.TestUsersGroups.java

public void setOwnerMultipleTimes(int cacheTime, int cacheSize) throws Exception {
    Configuration conf = new HdfsConfiguration();

    boolean cacheEnabled = (cacheTime != 0 && cacheSize != 0);

    conf.set(CommonConfigurationKeys.HOPS_UG_CACHE_SECS, Integer.toString(cacheTime));
    conf.set(CommonConfigurationKeys.HOPS_UG_CACHE_SIZE, Integer.toString(cacheSize));

    String userName = UserGroupInformation.getCurrentUser().getShortUserName();
    conf.set(String.format("hadoop.proxyuser.%s.hosts", userName), "*");
    conf.set(String.format("hadoop.proxyuser.%s.users", userName), "*");
    conf.set(String.format("hadoop.proxyuser.%s.groups", userName), "*");

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();//from   w  w  w.  j  av  a  2s .  c o  m

    DistributedFileSystem dfs = cluster.getFileSystem();

    Path base = new Path("/projects/project1");
    dfs.mkdirs(base);
    Path child = new Path(base, "dataset");
    dfs.mkdirs(child);

    dfs.setOwner(base, "testUser", "testGroup");

    removeGroup(UsersGroups.getGroupID("testGroup"));

    UserGroupInformation ugi;
    try {
        ugi = UserGroupInformation.createProxyUserForTesting("testUser", UserGroupInformation.getLoginUser(),
                new String[] { "testGroup" });
    } catch (Exception e) {
        e.getCause().printStackTrace();
        if (e.getCause() instanceof GroupNotFoundException) {
        } else {
            throw e;
        }
    }

    if (cacheEnabled) {
        UsersGroups.clearCache();
    }

    if (cacheEnabled) {
        //previous call to createProxyUserForTesting also creates the missing users
        //and groups. If cache is enable, then users are not created and the users/groups
        //will be returned from the cache. if the cache is disabled then the users/groups
        //will be created
        UsersGroups.addGroup("testGroup");
    }

    ugi = UserGroupInformation.createProxyUserForTesting("testUser", UserGroupInformation.getLoginUser(),
            new String[] { "testGroup" });

    FileSystem fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {

        @Override
        public FileSystem run() throws Exception {
            return cluster.getFileSystem();
        }
    });

    fs.mkdirs(new Path(base, "testdir"));

    dfs.setOwner(base, "testUser", "testGroup");

    cluster.shutdown();
}