List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction
PrivilegedExceptionAction
From source file:com.streamsets.pipeline.stage.lib.hive.HiveMetastoreUtil.java
/** * Returns the hdfs paths where the avro schema is stored after serializing. * Path is appended with current time so as to have an ordering. * @param rootTableLocation Root Table Location * @return Hdfs Path String./*w w w . j a v a2 s. com*/ */ public static String serializeSchemaToHDFS(UserGroupInformation loginUGI, final FileSystem fs, final String rootTableLocation, final String schemaJson) throws StageException { final String folderPath = rootTableLocation + HiveMetastoreUtil.SEP + HiveMetastoreUtil.HDFS_SCHEMA_FOLDER_NAME; final Path schemasFolderPath = new Path(folderPath); final String path = folderPath + SEP + HiveMetastoreUtil.AVRO_SCHEMA + DateFormatUtils.format(new Date(System.currentTimeMillis()), "yyyy-MM-dd--HH_mm_ss"); try { loginUGI.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { if (!fs.exists(schemasFolderPath)) { fs.mkdirs(schemasFolderPath); } Path schemaFilePath = new Path(path); //This will never happen unless two HMS targets are writing, we will error out for this //and let user handle this via error record handling. if (!fs.exists(schemaFilePath)) { try (FSDataOutputStream os = fs.create(schemaFilePath)) { os.writeChars(schemaJson); } } else { LOG.error(Utils.format("Already schema file {} exists in HDFS", path)); throw new IOException("Already schema file exists"); } return null; } }); } catch (Exception e) { LOG.error("Error in Writing Schema to HDFS: " + e.toString(), e); throw new StageException(Errors.HIVE_18, path, e.getMessage()); } return path; }
From source file:org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDeletes.java
@Test public void testVisibilityLabelsWithDeleteColumnsWithMultipleVersions() throws Exception { setAuths();//from w ww . j a va 2s.c o m final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); try (Table table = doPuts(tableName)) { TEST_UTIL.getAdmin().flush(tableName); PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility( "(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET + ")")); d.addColumns(fam, qual, 125l); table.delete(d); } catch (Throwable t) { throw new IOException(t); } return null; } }; SUPERUSER.runAs(actiona); TEST_UTIL.getAdmin().flush(tableName); Scan s = new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET)); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); assertTrue(next.length == 2); CellScanner cellScanner = next[0].cellScanner(); cellScanner.advance(); Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); assertEquals(current.getTimestamp(), 127l); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); assertEquals(current.getTimestamp(), 126l); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); assertEquals(current.getTimestamp(), 125l); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); } }
From source file:org.apache.catalina.core.ApplicationContextFacade.java
/** * Executes the method of the specified <code>ApplicationContext</code> * @param method The method object to be invoked. * @param context The AppliationContext object on which the method * will be invoked// w w w. j a va 2 s . c o m * @param params The arguments passed to the called method. */ private Object executeMethod(final Method method, final ApplicationContext context, final Object[] params) throws PrivilegedActionException, IllegalAccessException, InvocationTargetException { if (System.getSecurityManager() != null) { return AccessController.doPrivileged(new PrivilegedExceptionAction() { public Object run() throws IllegalAccessException, InvocationTargetException { return method.invoke(context, params); } }); } else { return method.invoke(context, params); } }
From source file:org.apache.hadoop.mapred.JobLocalizer.java
public void localizeJobFiles(final JobID jobid, JobConf jConf, Path localJobFile, Path localJobTokenFile, final TaskUmbilicalProtocol taskTracker) throws IOException, InterruptedException { // Download the job.jar for this job from the system FS localizeJobJarFile(jConf);/*from w w w .ja va 2 s . c o m*/ jConf.set(JOB_LOCAL_CTXT, ttConf.get(JOB_LOCAL_CTXT)); //update the config some more jConf.set(TokenCache.JOB_TOKENS_FILENAME, localJobTokenFile.toString()); jConf.set(JobConf.MAPRED_LOCAL_DIR_PROPERTY, ttConf.get(JobConf.MAPRED_LOCAL_DIR_PROPERTY)); TaskTracker.resetNumTasksPerJvm(jConf); //setup the distributed cache final long[] sizes = downloadPrivateCache(jConf); if (sizes != null) { //the following doAs is required because the DefaultTaskController //calls the localizeJobFiles method in the context of the TaskTracker //process. The JVM authorization check would fail without this //doAs. In the LinuxTC case, this doesn't harm. UserGroupInformation ugi = UserGroupInformation.createRemoteUser(jobid.toString()); ugi.doAs(new PrivilegedExceptionAction<Object>() { public Object run() throws IOException { taskTracker.updatePrivateDistributedCacheSizes(jobid, sizes); return null; } }); } // Create job-acls.xml file in job userlog dir and write the needed // info for authorization of users for viewing task logs of this job. writeJobACLs(jConf, new Path(TaskLog.getJobDir(jobid).toURI().toString())); //write the updated jobConf file in the job directory JobLocalizer.writeLocalJobFile(localJobFile, jConf); }
From source file:org.apache.hadoop.hdfs.server.namenode.NameNode.java
@SuppressWarnings("deprecation") private void startHttpServer(final Configuration conf) throws IOException { final String infoAddr = NetUtils.getServerAddress(conf, "dfs.info.bindAddress", "dfs.info.port", "dfs.http.address"); final InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); if (UserGroupInformation.isSecurityEnabled()) { String httpsUser = SecurityUtil.getServerPrincipal( conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), infoSocAddr.getHostName()); if (httpsUser == null) { LOG.warn(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY + " not defined in config. Starting http server as " + SecurityUtil.getServerPrincipal(conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), serverAddress.getHostName()) + ": Kerberized SSL may be not function correctly."); } else {//from w ww . j av a 2 s. c om // Kerberized SSL servers must be run from the host principal... LOG.info("Logging in as " + httpsUser + " to start http server."); SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY, infoSocAddr.getHostName()); } } UserGroupInformation ugi = UserGroupInformation.getLoginUser(); try { this.httpServer = ugi.doAs(new PrivilegedExceptionAction<HttpServer>() { @Override public HttpServer run() throws IOException, InterruptedException { String infoHost = infoSocAddr.getHostName(); int infoPort = infoSocAddr.getPort(); httpServer = new HttpServer("hdfs", infoHost, infoPort, infoPort == 0, conf, SecurityUtil.getAdminAcls(conf, DFSConfigKeys.DFS_ADMIN)) { { if (WebHdfsFileSystem.isEnabled(conf, LOG)) { //add SPNEGO authentication filter for webhdfs final String name = "SPNEGO"; final String classname = AuthFilter.class.getName(); final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; Map<String, String> params = getAuthFilterParams(conf); defineFilter(webAppContext, name, classname, params, new String[] { pathSpec }); LOG.info("Added filter '" + name + "' (class=" + classname + ")"); // add webhdfs packages addJerseyResourcePackage(NamenodeWebHdfsMethods.class.getPackage().getName() + ";" + Param.class.getPackage().getName(), pathSpec); } } private Map<String, String> getAuthFilterParams(Configuration conf) throws IOException { Map<String, String> params = new HashMap<String, String>(); String principalInConf = conf .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY); if (principalInConf != null && !principalInConf.isEmpty()) { params.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SecurityUtil .getServerPrincipal(principalInConf, serverAddress.getHostName())); } String httpKeytab = conf.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY); if (httpKeytab != null && !httpKeytab.isEmpty()) { params.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, httpKeytab); } return params; } }; boolean certSSL = conf.getBoolean("dfs.https.enable", false); boolean useKrb = UserGroupInformation.isSecurityEnabled(); if (certSSL || useKrb) { boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); InetSocketAddress secInfoSocAddr = NetUtils .createSocketAddr(infoHost + ":" + conf.get("dfs.https.port", infoHost + ":" + 0)); Configuration sslConf = new Configuration(false); if (certSSL) { sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); } httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth, useKrb); // assume same ssl port for all datanodes InetSocketAddress datanodeSslPort = NetUtils .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 50475)); httpServer.setAttribute("datanode.https.port", datanodeSslPort.getPort()); } httpServer.setAttribute("name.node", NameNode.this); httpServer.setAttribute("name.node.address", getNameNodeAddress()); httpServer.setAttribute("name.system.image", getFSImage()); httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); httpServer.addInternalServlet("getDelegationToken", GetDelegationTokenServlet.PATH_SPEC, GetDelegationTokenServlet.class, true); httpServer.addInternalServlet("renewDelegationToken", RenewDelegationTokenServlet.PATH_SPEC, RenewDelegationTokenServlet.class, true); httpServer.addInternalServlet("cancelDelegationToken", CancelDelegationTokenServlet.PATH_SPEC, CancelDelegationTokenServlet.class, true); httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class, true); httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class, true); httpServer.addInternalServlet("listPaths", "/listPaths/*", ListPathsServlet.class, false); httpServer.addInternalServlet("data", "/data/*", FileDataServlet.class, false); httpServer.addInternalServlet("checksum", "/fileChecksum/*", FileChecksumServlets.RedirectServlet.class, false); httpServer.addInternalServlet("contentSummary", "/contentSummary/*", ContentSummaryServlet.class, false); httpServer.start(); // The web-server port can be ephemeral... ensure we have the correct info infoPort = httpServer.getPort(); httpAddress = new InetSocketAddress(infoHost, infoPort); conf.set("dfs.http.address", infoHost + ":" + infoPort); LOG.info("Web-server up at: " + infoHost + ":" + infoPort); return httpServer; } }); } catch (InterruptedException e) { throw new IOException(e); } finally { if (UserGroupInformation.isSecurityEnabled() && conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY) != null) { // Go back to being the correct Namenode principal LOG.info("Logging back in as " + SecurityUtil.getServerPrincipal(conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), serverAddress.getHostName()) + " following http server start."); SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, serverAddress.getHostName()); } } }
From source file:org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure.java
/** * Coprocessor Action.//from w ww. j a v a 2 s . c om * @param env MasterProcedureEnv * @param state the procedure state * @throws IOException * @throws InterruptedException */ private void runCoprocessorAction(final MasterProcedureEnv env, final DeleteColumnFamilyState state) throws IOException, InterruptedException { final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { user.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { switch (state) { case DELETE_COLUMN_FAMILY_PRE_OPERATION: cpHost.preDeleteColumnHandler(tableName, familyName); break; case DELETE_COLUMN_FAMILY_POST_OPERATION: cpHost.postDeleteColumnHandler(tableName, familyName); break; default: throw new UnsupportedOperationException(this + " unhandled state=" + state); } return null; } }); } }
From source file:org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsReplication.java
public static void addLabels() throws Exception { PrivilegedExceptionAction<VisibilityLabelsResponse> action = new PrivilegedExceptionAction<VisibilityLabelsResponse>() { public VisibilityLabelsResponse run() throws Exception { String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE, UNICODE_VIS_TAG }; try (Connection conn = ConnectionFactory.createConnection(conf)) { VisibilityClient.addLabels(conn, labels); } catch (Throwable t) { throw new IOException(t); }/*w w w . j a v a2 s . c om*/ return null; } }; SUPERUSER.runAs(action); }
From source file:org.apache.hadoop.crypto.key.kms.server.KMS.java
@GET @Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}") @Produces(MediaType.APPLICATION_JSON)//from w ww . j a v a 2s . c o m public Response getKeyVersion(@PathParam("versionName") final String versionName) throws Exception { try { LOG.trace("Entering getKeyVersion method."); UserGroupInformation user = HttpUserGroupInformation.get(); KMSClientProvider.checkNotEmpty(versionName, "versionName"); KMSWebApp.getKeyCallsMeter().mark(); assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION); LOG.debug("Getting key with version name {}.", versionName); KeyVersion keyVersion = user.doAs(new PrivilegedExceptionAction<KeyVersion>() { @Override public KeyVersion run() throws Exception { return provider.getKeyVersion(versionName); } }); if (keyVersion != null) { kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), ""); } Object json = KMSServerJSONUtils.toJSON(keyVersion); LOG.trace("Exiting getKeyVersion method."); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } catch (Exception e) { LOG.debug("Exception in getKeyVersion.", e); throw e; } }
From source file:org.apache.axis2.jaxws.util.WSDL4JWrapper.java
private URL getURLFromJAR(URLClassLoader urlLoader, URL relativeURL) { URL[] urlList = null;/*from w w w. j a va 2 s.c om*/ ResourceFinderFactory rff = (ResourceFinderFactory) MetadataFactoryRegistry .getFactory(ResourceFinderFactory.class); ResourceFinder cf = rff.getResourceFinder(); if (log.isDebugEnabled()) { log.debug("ResourceFinderFactory: " + rff.getClass().getName()); log.debug("ResourceFinder: " + cf.getClass().getName()); } urlList = cf.getURLs(urlLoader); if (urlList == null) { if (log.isDebugEnabled()) { log.debug("No URL's found in URL ClassLoader"); } throw ExceptionFactory.makeWebServiceException(Messages.getMessage("WSDL4JWrapperErr1")); } for (URL url : urlList) { if ("file".equals(url.getProtocol())) { // Insure that Windows spaces are properly handled in the URL final File f = new File(url.getPath().replaceAll("%20", " ")); // If file is not of type directory then its a jar file if (isAFile(f)) { try { JarFile jf = (JarFile) AccessController.doPrivileged(new PrivilegedExceptionAction() { public Object run() throws IOException { return new JarFile(f); } }); Enumeration<JarEntry> entries = jf.entries(); // read all entries in jar file and return the first // wsdl file that matches // the relative path while (entries.hasMoreElements()) { JarEntry je = entries.nextElement(); String name = je.getName(); if (name.endsWith(".wsdl")) { String relativePath = relativeURL.getPath(); if (relativePath.endsWith(name)) { String path = f.getAbsolutePath(); // This check is necessary because Unix/Linux file paths begin // with a '/'. When adding the prefix 'jar:file:/' we may end // up with '//' after the 'file:' part. This causes the URL // object to treat this like a remote resource if (path != null && path.indexOf("/") == 0) { path = path.substring(1, path.length()); } URL absoluteUrl = new URL("jar:file:/" + path + "!/" + je.getName()); return absoluteUrl; } } } } catch (Exception e) { throw ExceptionFactory.makeWebServiceException(e); } } } } return null; }
From source file:com.trendmicro.hdfs.webdav.HDFSResource.java
@Override public boolean isCollection() { try {/*from w w w. j a v a2s .c o m*/ return user.doAs(new PrivilegedExceptionAction<Boolean>() { public Boolean run() throws Exception { return FileSystem.get(conf).getFileStatus(path).isDir(); } }); } catch (IOException e) { throw new RuntimeException(e); } catch (InterruptedException e) { throw new RuntimeException(e); } }