List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction
PrivilegedExceptionAction
From source file:org.apache.atlas.hive.hook.HiveHook.java
@Override public void run(final HookContext hookContext) throws Exception { // clone to avoid concurrent access try {//ww w.ja v a2s . com final HiveEventContext event = new HiveEventContext(); event.setInputs(hookContext.getInputs()); event.setOutputs(hookContext.getOutputs()); event.setHookType(hookContext.getHookType()); final UserGroupInformation ugi = hookContext.getUgi() == null ? Utils.getUGI() : hookContext.getUgi(); event.setUgi(ugi); event.setUser(getUser(hookContext.getUserName(), hookContext.getUgi())); event.setOperation(OPERATION_MAP.get(hookContext.getOperationName())); event.setQueryId(hookContext.getQueryPlan().getQueryId()); event.setQueryStr(hookContext.getQueryPlan().getQueryStr()); event.setQueryStartTime(hookContext.getQueryPlan().getQueryStartTime()); event.setQueryType(hookContext.getQueryPlan().getQueryPlan().getQueryType()); event.setLineageInfo(hookContext.getLinfo()); if (executor == null) { collect(event); notifyAsPrivilegedAction(event); } else { executor.submit(new Runnable() { @Override public void run() { try { ugi.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { collect(event); return event; } }); notifyAsPrivilegedAction(event); } catch (Throwable e) { LOG.error("Atlas hook failed due to error ", e); } } }); } } catch (Throwable t) { LOG.error("Submitting to thread pool failed due to error ", t); } }
From source file:ca.nrc.cadc.web.SearchJobServlet.java
/** * Called by the server (via the <code>service</code> method) * to allow a servlet to handle a POST request. * * The HTTP POST method allows the client to send * data of unlimited length to the Web server a single time * and is useful when posting information such as * credit card numbers.//from w ww. ja v a 2 s . co m * *When overriding this method, read the request data, * write the response headers, get the response's writer or output * stream object, and finally, write the response data. It's best * to include content type and encoding. When using a * <code>PrintWriter</code> object to return the response, set the * content type before accessing the <code>PrintWriter</code> object. * *The servlet container must write the headers before committing the * response, because in HTTP the headers must be sent before the * response body. * *Where possible, set the Content-Length header (with the * {@link ServletResponse#setContentLength} method), * to allow the servlet container to use a persistent connection * to return its response to the client, improving performance. * The content length is automatically set if the entire response fits * inside the response buffer. * *When using HTTP 1.1 chunked encoding (which means that the response * has a Transfer-Encoding header), do not set the Content-Length header. * *This method does not need to be either safe or idempotent. * Operations requested through POST can have side effects for * which the user can be held accountable, for example, * updating stored data or buying items online. * *If the HTTP POST request is incorrectly formatted, * <code>doPost</code> returns an HTTP "Bad Request" message. * * @param request an {@link HttpServletRequest} object that * contains the request the client has made * of the servlet * @param response an {@link HttpServletResponse} object that * contains the response the servlet sends * to the client * @throws IOException if an input or output error is * detected when the servlet handles * the request * @throws ServletException if the request for the POST * could not be handled * @see ServletOutputStream * @see ServletResponse#setContentType */ @Override protected void doPost(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { try { final Subject subject = AuthenticationUtil.getSubject(request); if ((subject == null) || (subject.getPrincipals().isEmpty())) { processRequest(request, response); } else { Subject.doAs(subject, new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { processRequest(request, response); return null; } }); } } catch (TransientException ex) { // OutputStream not open, write an error response response.setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE); response.addHeader("Retry-After", Integer.toString(ex.getRetryDelay())); response.setContentType("text/plain"); PrintWriter w = response.getWriter(); w.println("failed to get or persist job state."); w.println(" reason: " + ex.getMessage()); w.close(); } catch (JobPersistenceException ex) { // OutputStream not open, write an error response response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); response.setContentType("text/plain"); PrintWriter w = response.getWriter(); w.println("failed to get or persist job state."); w.println(" reason: " + ex.getMessage()); w.close(); } catch (Throwable t) { response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); response.setContentType("text/plain"); PrintWriter w = response.getWriter(); w.println("Unable to proceed with job execution.\n"); w.println("Reason: " + t.getMessage()); w.close(); } }
From source file:org.apache.hadoop.crypto.key.kms.server.KMS.java
@POST @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}") @Consumes(MediaType.APPLICATION_JSON)// w ww .j a va 2 s . c o m @Produces(MediaType.APPLICATION_JSON) public Response rolloverKey(@PathParam("name") final String name, Map jsonMaterial) throws Exception { try { LOG.trace("Entering rolloverKey Method."); KMSWebApp.getAdminCallsMeter().mark(); UserGroupInformation user = HttpUserGroupInformation.get(); assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name); KMSClientProvider.checkNotEmpty(name, "name"); LOG.debug("Rolling key with name {}.", name); final String material = (String) jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD); if (material != null) { assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user, KMSOp.ROLL_NEW_VERSION, name); } KeyProvider.KeyVersion keyVersion = user.doAs(new PrivilegedExceptionAction<KeyVersion>() { @Override public KeyVersion run() throws Exception { KeyVersion keyVersion = (material != null) ? provider.rollNewVersion(name, Base64.decodeBase64(material)) : provider.rollNewVersion(name); provider.flush(); return keyVersion; } }); kmsAudit.ok(user, KMSOp.ROLL_NEW_VERSION, name, "UserProvidedMaterial:" + (material != null) + " NewVersion:" + keyVersion.getVersionName()); if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) { keyVersion = removeKeyMaterial(keyVersion); } Map json = KMSServerJSONUtils.toJSON(keyVersion); LOG.trace("Exiting rolloverKey Method."); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } catch (Exception e) { LOG.debug("Exception in rolloverKey.", e); throw e; } }
From source file:com.example.ManualSpnegoNegotiateServlet.java
/** * Use of Kerberos is wrapped in an HTTP auth-scheme of "Negotiate" [RFC 4559]. * * The auth-params exchanged use data formats defined for use with the GSS-API [RFC 2743]. In particular, they follow the formats set for the SPNEGO [RFC 4178] and * Kerberos [RFC 4121] mechanisms for GSSAPI. The "Negotiate" auth-scheme calls for the use of SPNEGO GSSAPI tokens that the specific mechanism type specifies. * * The current implementation of this protocol is limited to the use of SPNEGO with the Kerberos protocol. * * @param request/*from w w w .ja va 2 s .c o m*/ * @param response * @throws ServletException * * @return true upon successful authentication, false otherwise */ protected boolean attemptNegotiation(HttpServletRequest request, HttpServletResponse response) throws ServletException, UnsupportedEncodingException, IOException { log.debug("Attempting negotiation."); String header = request.getHeader("Authorization"); /** * Guard clause to check for Negotiate header. * * If the server receives a request for an access-protected object, and if an acceptable Authorization header has not been sent, the server responds with a "401 * Unauthorized" status code, and a "WWW-Authenticate:" header as per the framework described in [RFC 2616]. The initial WWW-Authenticate header will not carry * any gssapi-data. */ if (header == null || header.length() < 10 || !header.startsWith("Negotiate ")) { response.setHeader("WWW-Authenticate", "Negotiate"); response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); log.debug("Proper authorization header not found, returning challenge."); return false; } /** * A client may initiate a connection to the server with an "Authorization" header containing the initial token for the server. This form will bypass the initial * 401 error from the server when the client knows that the server will accept the Negotiate HTTP authentication type. */ log.debug("Authorization header found, continuing negotiation."); /** * The data following the word Negotiate is the GSS-API data to process. */ byte gssapiData[] = Base64.decode(header.substring(10)); log.debug("GSS API data: " + Arrays.toString(gssapiData)); /** * Guard clause to check for the unsupported NTLM authentication mechanism. */ if (isNtlmMechanism(gssapiData)) { log.warn("Got request for unsupported NTLM mechanism, aborting negotiation."); return false; } /** * The server attempts to establish a security context. Establishment may result in tokens that the server must return to the client. Tokens are BASE-64 encoded * GSS-API data. */ GSSContext gssContext = null; LoginContext loginContext = null; String outToken = null; try { final String domainUsername = "Zeus"; final String domainUserPassword = "Z3usP@55"; final CallbackHandler handler = SpnegoProvider.getUsernamePasswordHandler(domainUsername, domainUserPassword); loginContext = new LoginContext("spnego-server", handler); loginContext.login(); Subject subject = loginContext.getSubject(); Oid spnegoOid = new Oid("1.3.6.1.5.5.2"); // for spnego answers Oid kerbv5Oid = new Oid("1.2.840.113554.1.2.2"); // for chromium (they send a kerbv5 token instead of spnego) final Oid[] oids = new Oid[] { spnegoOid, kerbv5Oid }; final GSSManager manager = GSSManager.getInstance(); final PrivilegedExceptionAction<GSSCredential> action = new PrivilegedExceptionAction<GSSCredential>() { public GSSCredential run() throws GSSException { return manager.createCredential(null, GSSCredential.INDEFINITE_LIFETIME, oids, GSSCredential.ACCEPT_ONLY); } }; GSSCredential serverCreds = Subject.doAs(subject, action); log.debug("Mechs: " + Arrays.toString(serverCreds.getMechs())); gssContext = manager.createContext(serverCreds); log.debug("Context created. " + gssContext); byte tokenBytes[] = gssContext.acceptSecContext(gssapiData, 0, gssapiData.length); outToken = Base64.encode(tokenBytes); } catch (PrivilegedActionException ex) { log.error("", ex); } catch (LoginException ex) { log.error("", ex); } catch (GSSException gsse) { gsse.printStackTrace(); log.error("GSSException: " + gsse.getMessage()); log.error("GSSException major: " + gsse.getMajorString()); log.error("GSSException minor: " + gsse.getMinorString()); throw new ServletException(gsse); } /** * If the context is established, we can attempt to retrieve the name of the "context initiator." In the case of the Kerberos mechanism, the context initiator is * the Kerberos principal of the client. Additionally, the client may be delegating credentials. */ if (gssContext != null && gssContext.isEstablished()) { log.debug("Context established, attempting Kerberos principal retrieval."); try { Subject subject = new Subject(); GSSName clientGSSName = gssContext.getSrcName(); KerberosPrincipal clientPrincipal = new KerberosPrincipal(clientGSSName.toString()); subject.getPrincipals().add(clientPrincipal); log.info("Got client Kerberos principal: " + clientGSSName); response.getWriter().println("Hello, " + clientPrincipal); /** * Retrieve LogonInfo (for example, GroupSIDs) from the PAC Authorization Data * from a Kerberos Ticket that was issued by Active Directory. */ byte[] kerberosTokenData = gssapiData; try { SpnegoToken token = SpnegoToken.parse(gssapiData); kerberosTokenData = token.getMechanismToken(); } catch (DecodingException dex) { // Chromium bug: sends a Kerberos response instead of an spnego response with a Kerberos mechanism } catch (Exception ex) { log.error("", ex); } try { Object[] keyObjs = IteratorUtils .toArray(loginContext.getSubject().getPrivateCredentials(KerberosKey.class).iterator()); KerberosKey[] keys = new KerberosKey[keyObjs.length]; System.arraycopy(keyObjs, 0, keys, 0, keyObjs.length); KerberosToken token = new KerberosToken(kerberosTokenData, keys); log.info("Authorizations: "); for (KerberosAuthData authData : token.getTicket().getEncData().getUserAuthorizations()) { if (authData instanceof KerberosPacAuthData) { PacSid[] groupSIDs = ((KerberosPacAuthData) authData).getPac().getLogonInfo() .getGroupSids(); log.info("GroupSids: " + Arrays.toString(groupSIDs)); response.getWriter().println("Found group SIDs: " + Arrays.toString(groupSIDs)); } else { log.info("AuthData without PAC: " + authData.toString()); } } } catch (Exception ex) { log.error("", ex); } if (gssContext.getCredDelegState()) { GSSCredential delegateCredential = gssContext.getDelegCred(); GSSName delegateGSSName = delegateCredential.getName(); Principal delegatePrincipal = new KerberosPrincipal(delegateGSSName.toString()); subject.getPrincipals().add(delegatePrincipal); subject.getPrivateCredentials().add(delegateCredential); log.info("Got delegated Kerberos principal: " + delegateGSSName); } /** * A status code 200 status response can also carry a "WWW-Authenticate" response header containing the final leg of an authentication. In this case, the * gssapi-data will be present. */ if (outToken != null && outToken.length() > 0) { response.setHeader("WWW-Authenticate", "Negotiate " + outToken.getBytes()); response.setStatus(HttpServletResponse.SC_OK); log.debug("Returning final authentication data to client to complete context."); log.debug("Negotiation completed."); return true; } } catch (GSSException gsse) { log.error("GSSException: " + gsse.getMessage()); log.error("GSSException major: " + gsse.getMajorString()); log.error("GSSException minor: " + gsse.getMinorString()); response.addHeader("Client-Warning", gsse.getMessage()); response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); } } else { /** * Any returned code other than a success 2xx code represents an authentication error. If a 401 containing a "WWW-Authenticate" header with "Negotiate" and * gssapi-data is returned from the server, it is a continuation of the authentication request. */ if (outToken != null && outToken.length() > 0) { response.setHeader("WWW-Authenticate", "Negotiate " + outToken.getBytes()); response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); log.debug("Additional authentication processing required, returning token."); return false; } else { response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); log.warn("Kerberos negotiation failed."); } } log.debug("Negotiation completed."); return true; }
From source file:org.apache.hadoop.hdfs.security.TestDelegationToken.java
@Test public void testDelegationTokenWithDoAs() throws Exception { final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); final Token<DelegationTokenIdentifier> token = dfs.getDelegationToken(new Text("JobTracker")); final UserGroupInformation longUgi = UserGroupInformation.createRemoteUser("JobTracker/foo.com@FOO.COM"); final UserGroupInformation shortUgi = UserGroupInformation.createRemoteUser("JobTracker"); LOG.info("cluster at: " + dfs.getUri() + " token for: " + token.getService()); longUgi.doAs(new PrivilegedExceptionAction<Object>() { public Object run() throws IOException { final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); try { //try renew with long name dfs.renewDelegationToken(token); } catch (IOException e) { LOG.error("caught unexpected exception out of renew", e); Assert.fail("Could not renew delegation token for user " + longUgi); }/* w w w .j a va2 s .c om*/ return null; } }); shortUgi.doAs(new PrivilegedExceptionAction<Object>() { public Object run() throws IOException { final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); dfs.renewDelegationToken(token); return null; } }); longUgi.doAs(new PrivilegedExceptionAction<Object>() { public Object run() throws IOException { final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); try { //try cancel with long name dfs.cancelDelegationToken(token); } catch (IOException e) { Assert.fail("Could not cancel delegation token for user " + longUgi); } return null; } }); }
From source file:org.apache.hadoop.distributedloadsimulator.sls.appmaster.MRAMSimulator.java
/** * send out request for AM container/*from w ww.ja va 2 s.c om*/ * * @throws org.apache.hadoop.yarn.exceptions.YarnException * @throws java.io.IOException * @throws java.lang.InterruptedException */ protected void requestAMContainer() throws YarnException, IOException, InterruptedException { List<ResourceRequest> ask = new ArrayList<ResourceRequest>(); ResourceRequest amRequest = createResourceRequest( BuilderUtils.newResource(MR_AM_CONTAINER_RESOURCE_MEMORY_MB, MR_AM_CONTAINER_RESOURCE_VCORES), ResourceRequest.ANY, 1, 1); ask.add(amRequest); final AllocateRequest request = this.createAllocateRequest(ask); AllocateResponse response = null; UserGroupInformation ugi = UserGroupInformation.createProxyUser(appAttemptId.toString(), UserGroupInformation.getCurrentUser()); ugi.setAuthenticationMethod(SaslRpcServer.AuthMethod.TOKEN); ugi.addCredentials(credentials); ugi.addToken(amRMToken); ugi.addTokenIdentifier(amRMToken.decodeIdentifier()); response = ugi.doAs(new PrivilegedExceptionAction<AllocateResponse>() { @Override public AllocateResponse run() throws Exception { UserGroupInformation.getCurrentUser().addToken(amRMToken); InetSocketAddress resourceManagerAddress = conf.getSocketAddr( YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); SecurityUtil.setTokenService(amRMToken, resourceManagerAddress); ApplicationMasterProtocol appMasterProtocol = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class, true); AllocateResponse response = appMasterProtocol.allocate(request); RPC.stopProxy(appMasterProtocol); return response; } }); // waiting until the AM container is allocated while (true) { if (response != null && !response.getAllocatedContainers().isEmpty()) { // get AM container Container container = response.getAllocatedContainers().get(0); for (AMNMCommonObject remoteConnection : RemoteConnections) { if (remoteConnection.isNodeExist(container.getNodeId().toString())) { remoteConnection.addNewContainer(container.getId().toString(), container.getNodeId().toString(), container.getNodeHttpAddress(), container.getResource().getMemory(), container.getResource().getVirtualCores(), container.getPriority().getPriority(), -1L); } } // start AM container ContainersStartTimes.put(container.getId().toString(), System.currentTimeMillis()); amContainer = container; isAMContainerRunning = true; applicationMasterWaitTime = System.currentTimeMillis() - applicationStartTime; try { primaryRemoteConnection.addApplicationMasterWaitTime(applicationMasterWaitTime); } catch (RemoteException e) { LOG.error(e, e); } break; } // this sleep time is different from HeartBeat Thread.sleep(1000); // send out empty request sendContainerRequest(); response = responseQueue.take(); } }
From source file:org.apache.axiom.om.util.StAXUtils.java
public static XMLStreamReader createXMLStreamReader(StAXParserConfiguration configuration, final InputStream in) throws XMLStreamException { final XMLInputFactory inputFactory = getXMLInputFactory(configuration); try {/*from w w w .j av a 2s.c om*/ XMLStreamReader reader = (XMLStreamReader) AccessController .doPrivileged(new PrivilegedExceptionAction() { public Object run() throws XMLStreamException { return inputFactory.createXMLStreamReader(in); } }); if (isDebugEnabled) { log.debug("XMLStreamReader is " + reader.getClass().getName()); } return reader; } catch (PrivilegedActionException pae) { throw (XMLStreamException) pae.getException(); } }
From source file:org.apache.hadoop.ipc.MiniRPCBenchmark.java
long connectToServerUsingDelegationToken(final Configuration conf, final InetSocketAddress addr) throws IOException { MiniProtocol client = null;//from w w w . j a va 2 s.com try { long start = Time.now(); try { client = currentUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() { @Override public MiniProtocol run() throws IOException { return RPC.getProxy(MiniProtocol.class, MiniProtocol.versionID, addr, conf); } }); } catch (InterruptedException e) { e.printStackTrace(); } long end = Time.now(); return end - start; } finally { RPC.stopProxy(client); } }
From source file:com.trendmicro.hdfs.webdav.HDFSResource.java
@Override public boolean exists() { try {//from ww w . j a v a 2 s . com if (LOG.isDebugEnabled()) { LOG.debug("Testing existence of '" + path + "'"); } return user.doAs(new PrivilegedExceptionAction<Boolean>() { public Boolean run() throws Exception { return FileSystem.get(conf).exists(path); } }); } catch (IOException e) { throw new RuntimeException(e); } catch (InterruptedException e) { throw new RuntimeException(e); } }
From source file:org.apache.hadoop.hbase.thrift.TestThriftSpnegoHttpServer.java
private CloseableHttpClient createHttpClient() throws Exception { final Subject clientSubject = JaasKrbUtil.loginUsingKeytab(clientPrincipal, clientKeytab); final Set<Principal> clientPrincipals = clientSubject.getPrincipals(); // Make sure the subject has a principal assertFalse(clientPrincipals.isEmpty()); // Get a TGT for the subject (might have many, different encryption types). The first should // be the default encryption type. Set<KerberosTicket> privateCredentials = clientSubject.getPrivateCredentials(KerberosTicket.class); assertFalse(privateCredentials.isEmpty()); KerberosTicket tgt = privateCredentials.iterator().next(); assertNotNull(tgt);/*from w w w. j a v a2 s . c o m*/ // The name of the principal final String clientPrincipalName = clientPrincipals.iterator().next().getName(); return Subject.doAs(clientSubject, new PrivilegedExceptionAction<CloseableHttpClient>() { @Override public CloseableHttpClient run() throws Exception { // Logs in with Kerberos via GSS GSSManager gssManager = GSSManager.getInstance(); // jGSS Kerberos login constant Oid oid = new Oid("1.2.840.113554.1.2.2"); GSSName gssClient = gssManager.createName(clientPrincipalName, GSSName.NT_USER_NAME); GSSCredential credential = gssManager.createCredential(gssClient, GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); Lookup<AuthSchemeProvider> authRegistry = RegistryBuilder.<AuthSchemeProvider>create() .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); return HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry) .setDefaultCredentialsProvider(credentialsProvider).build(); } }); }