Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:org.apache.axis2.builder.BuilderUtil.java

/**
 * Use the BOM Mark to identify the encoding to be used. Fall back to default encoding
 * specified//from w  ww . j  av  a  2 s .co m
 *
 * @param is              the InputStream of a message
 * @param charSetEncoding default character set encoding
 * @return a Reader with the correct encoding already set
 * @throws java.io.IOException
 */
public static Reader getReader(final InputStream is, final String charSetEncoding) throws IOException {
    final PushbackInputStream is2 = getPushbackInputStream(is);
    final String encoding = getCharSetEncoding(is2, charSetEncoding);
    InputStreamReader inputStreamReader;
    try {
        inputStreamReader = (InputStreamReader) AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() throws UnsupportedEncodingException {
                return new InputStreamReader(is2, encoding);
            }
        });
    } catch (PrivilegedActionException e) {
        throw (UnsupportedEncodingException) e.getException();
    }
    return new BufferedReader(inputStreamReader);
}

From source file:org.apache.hadoop.hdfs.TestAsyncDFS.java

@Test(timeout = 60000)
public void testAsyncAPIWithException() throws Exception {
    String group1 = "group1";
    String group2 = "group2";
    String user1 = "user1";
    UserGroupInformation ugi1;/*from w  ww  . ja  va 2s  .  c om*/

    // create fake mapping for the groups
    Map<String, String[]> u2gMap = new HashMap<String, String[]>(1);
    u2gMap.put(user1, new String[] { group1, group2 });
    DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2gMap);

    // Initiate all four users
    ugi1 = UserGroupInformation.createUserForTesting(user1, new String[] { group1, group2 });

    final Path parent = new Path("/test/async_api_exception/");
    final Path aclDir = new Path(parent, "aclDir");
    final Path src = new Path(parent, "src");
    final Path dst = new Path(parent, "dst");
    fs.mkdirs(aclDir, FsPermission.createImmutable((short) 0700));
    fs.mkdirs(src);

    AsyncDistributedFileSystem adfs1 = ugi1.doAs(new PrivilegedExceptionAction<AsyncDistributedFileSystem>() {
        @Override
        public AsyncDistributedFileSystem run() throws Exception {
            return cluster.getFileSystem().getAsyncDistributedFileSystem();
        }
    });

    Future<Void> retFuture;
    // test rename
    try {
        retFuture = adfs1.rename(src, dst, Rename.OVERWRITE);
        retFuture.get();
    } catch (ExecutionException e) {
        checkPermissionDenied(e, src, user1);
        assertTrue("Permission denied messages must carry the path parent",
                e.getMessage().contains(src.getParent().toUri().getPath()));
    }

    // test setPermission
    FsPermission fsPerm = new FsPermission(permGenerator.next());
    try {
        retFuture = adfs1.setPermission(src, fsPerm);
        retFuture.get();
    } catch (ExecutionException e) {
        checkPermissionDenied(e, src, user1);
    }

    // test setOwner
    try {
        retFuture = adfs1.setOwner(src, "user1", "group2");
        retFuture.get();
    } catch (ExecutionException e) {
        checkPermissionDenied(e, src, user1);
    }

    // test setAcl
    try {
        retFuture = adfs1.setAcl(aclDir, Lists.newArrayList(aclEntry(ACCESS, USER, ALL)));
        retFuture.get();
        fail("setAcl should fail with permission denied");
    } catch (ExecutionException e) {
        checkPermissionDenied(e, aclDir, user1);
    }

    // test getAclStatus
    try {
        Future<AclStatus> aclRetFuture = adfs1.getAclStatus(aclDir);
        aclRetFuture.get();
        fail("getAclStatus should fail with permission denied");
    } catch (ExecutionException e) {
        checkPermissionDenied(e, aclDir, user1);
    }
}

From source file:org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure.java

private void postTruncate(final MasterProcedureEnv env) throws IOException, InterruptedException {
    final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
    if (cpHost != null) {
        final TableName tableName = getTableName();
        user.doAs(new PrivilegedExceptionAction<Void>() {
            @Override/*  w w w  . ja  v a 2 s  .co  m*/
            public Void run() throws Exception {
                cpHost.postTruncateTableHandler(tableName);
                return null;
            }
        });
    }
}

From source file:org.apache.hadoop.hdfs.DFSTestUtil.java

/** // TODO: JGH Reformat this damn code
 *    * Get a FileSystem instance as specified user in a doAs block.
 *//*from ww w.  ja  v  a 2s.  c o m*/
static public FileSystem getFileSystemAs(UserGroupInformation ugi, final Configuration conf)
        throws IOException, InterruptedException {
    return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
        @Override
        public FileSystem run() throws Exception {
            return FileSystem.get(conf);
        }
    });
}

From source file:org.apache.hadoop.hbase.test.IntegrationTestWithCellVisibilityLoadAndVerify.java

protected void doVerify(final Configuration conf, final HTableDescriptor htd) throws Exception {
    System.out.println(//from   w  w w  .j a va2s .  c  o  m
            String.format("Verifying for auths %s, %s, %s, %s", CONFIDENTIAL, TOPSECRET, SECRET, PRIVATE));
    PrivilegedExceptionAction<Job> scanAction = new PrivilegedExceptionAction<Job>() {
        @Override
        public Job run() throws Exception {
            return doVerify(conf, htd, CONFIDENTIAL, TOPSECRET, SECRET, PRIVATE);
        }
    };
    Job job = USER1.runAs(scanAction);
    this.numRowsReadWithExp1 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_1).getValue();
    this.numRowsReadWithExp2 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_2).getValue();
    this.numRowsReadWithExp3 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_3).getValue();
    this.numRowsReadWithExp4 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_4).getValue();
    assertEquals(this.numRowsLoadedWithExp1, this.numRowsReadWithExp1);
    assertEquals(this.numRowsLoadedWithExp2, this.numRowsReadWithExp2);
    assertEquals(0, this.numRowsReadWithExp3);
    assertEquals(0, this.numRowsReadWithExp4);

    // PUBLIC label auth is not provided for user1 user.
    System.out.println(String.format("Verifying for auths %s, %s", PRIVATE, PUBLIC));
    scanAction = new PrivilegedExceptionAction<Job>() {
        @Override
        public Job run() throws Exception {
            return doVerify(conf, htd, PRIVATE, PUBLIC);
        }
    };
    job = USER1.runAs(scanAction);
    this.numRowsReadWithExp1 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_1).getValue();
    this.numRowsReadWithExp2 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_2).getValue();
    this.numRowsReadWithExp3 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_3).getValue();
    this.numRowsReadWithExp4 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_4).getValue();
    assertEquals(0, this.numRowsReadWithExp1);
    assertEquals(0, this.numRowsReadWithExp2);
    assertEquals(0, this.numRowsReadWithExp3);
    assertEquals(this.numRowsLoadWithExp4, this.numRowsReadWithExp4);

    // Normal user only having PUBLIC label auth and can view only those cells.
    System.out.println(String.format("Verifying for auths %s, %s", PRIVATE, PUBLIC));
    scanAction = new PrivilegedExceptionAction<Job>() {
        @Override
        public Job run() throws Exception {
            return doVerify(conf, htd, PRIVATE, PUBLIC);
        }
    };
    job = USER2.runAs(scanAction);
    this.numRowsReadWithExp1 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_1).getValue();
    this.numRowsReadWithExp2 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_2).getValue();
    this.numRowsReadWithExp3 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_3).getValue();
    this.numRowsReadWithExp4 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_4).getValue();
    assertEquals(0, this.numRowsReadWithExp1);
    assertEquals(0, this.numRowsReadWithExp2);
    assertEquals(this.numRowsLoadWithExp3, this.numRowsReadWithExp3);
    assertEquals(0, this.numRowsReadWithExp4);
}

From source file:org.apache.hadoop.mapred.QueueManagerTestUtils.java

static Job submitSleepJob(final int numMappers, final int numReducers, final long mapSleepTime,
        final long reduceSleepTime, boolean shouldComplete, String userInfo, String queueName,
        Configuration clientConf) throws IOException, InterruptedException, ClassNotFoundException {
    clientConf.set(JTConfig.JT_IPC_ADDRESS, "localhost:" + miniMRCluster.getJobTrackerPort());
    UserGroupInformation ugi;/*from w w  w .j  a  v  a 2 s.  c om*/
    if (userInfo != null) {
        String[] splits = userInfo.split(",");
        String[] groups = new String[splits.length - 1];
        System.arraycopy(splits, 1, groups, 0, splits.length - 1);
        ugi = UserGroupInformation.createUserForTesting(splits[0], groups);
    } else {
        ugi = UserGroupInformation.getCurrentUser();
    }
    if (queueName != null) {
        clientConf.set(JobContext.QUEUE_NAME, queueName);
    }
    final SleepJob sleep = new SleepJob();
    sleep.setConf(clientConf);

    Job job = ugi.doAs(new PrivilegedExceptionAction<Job>() {
        public Job run() throws IOException {
            return sleep.createJob(numMappers, numReducers, mapSleepTime, (int) mapSleepTime, reduceSleepTime,
                    (int) reduceSleepTime);
        }
    });
    if (shouldComplete) {
        job.waitForCompletion(false);
    } else {
        job.submit();
        // miniMRCluster.getJobTrackerRunner().getJobTracker().jobsToComplete()[]
        Cluster cluster = new Cluster(miniMRCluster.createJobConf());
        JobStatus[] status = miniMRCluster.getJobTrackerRunner().getJobTracker().jobsToComplete();
        JobID id = status[status.length - 1].getJobID();
        Job newJob = cluster.getJob(id);
        cluster.close();
        return newJob;
    }
    return job;
}

From source file:CalendarController.java

public void init() {
    log("Applet init, applet is " + this.hashCode());
    try {//from  w  w  w .j a v a2  s.c  om
        AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
            public Object run() throws Exception {
                String userdir = System.getProperty("user.home");
                String dbname = userdir + "/" + DBNAME;

                startConsole(userdir);

                DatabaseManager.initDatabase(dbname, "user", "secret", false);
                log("Database initialized, " + "database directory is " + dbname);

                return null;
            }
        });
    } catch (PrivilegedActionException e) {
        e.getException().printStackTrace();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestHStore.java

/**
 * Test we do not lose data if we fail a flush and then close.
 * Part of HBase-10466/*from   w  w  w  .j  a  v  a2 s  .c  o m*/
 * @throws Exception
 */
@Test
public void testFlushSizeAccounting() throws Exception {
    LOG.info("Setting up a faulty file system that cannot write in " + this.name.getMethodName());
    final Configuration conf = HBaseConfiguration.create();
    // Only retry once.
    conf.setInt("hbase.hstore.flush.retries.number", 1);
    User user = User.createUserForTesting(conf, this.name.getMethodName(), new String[] { "foo" });
    // Inject our faulty LocalFileSystem
    conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class);
    user.runAs(new PrivilegedExceptionAction<Object>() {
        @Override
        public Object run() throws Exception {
            // Make sure it worked (above is sensitive to caching details in hadoop core)
            FileSystem fs = FileSystem.get(conf);
            assertEquals(FaultyFileSystem.class, fs.getClass());
            FaultyFileSystem ffs = (FaultyFileSystem) fs;

            // Initialize region
            init(name.getMethodName(), conf);

            MemStoreSize size = store.memstore.getFlushableSize();
            assertEquals(0, size.getDataSize());
            LOG.info("Adding some data");
            MemStoreSize kvSize = new MemStoreSize();
            store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), kvSize);
            // add the heap size of active (mutable) segment
            kvSize.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD);
            size = store.memstore.getFlushableSize();
            assertEquals(kvSize, size);
            // Flush.  Bug #1 from HBASE-10466.  Make sure size calculation on failed flush is right.
            try {
                LOG.info("Flushing");
                flushStore(store, id++);
                fail("Didn't bubble up IOE!");
            } catch (IOException ioe) {
                assertTrue(ioe.getMessage().contains("Fault injected"));
            }
            // due to snapshot, change mutable to immutable segment
            kvSize.incMemStoreSize(0, CSLMImmutableSegment.DEEP_OVERHEAD_CSLM - MutableSegment.DEEP_OVERHEAD);
            size = store.memstore.getFlushableSize();
            assertEquals(kvSize, size);
            MemStoreSize kvSize2 = new MemStoreSize();
            store.add(new KeyValue(row, family, qf2, 2, (byte[]) null), kvSize2);
            kvSize2.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD);
            // Even though we add a new kv, we expect the flushable size to be 'same' since we have
            // not yet cleared the snapshot -- the above flush failed.
            assertEquals(kvSize, size);
            ffs.fault.set(false);
            flushStore(store, id++);
            size = store.memstore.getFlushableSize();
            // Size should be the foreground kv size.
            assertEquals(kvSize2, size);
            flushStore(store, id++);
            size = store.memstore.getFlushableSize();
            assertEquals(0, size.getDataSize());
            assertEquals(MutableSegment.DEEP_OVERHEAD, size.getHeapSize());
            return null;
        }
    });
}

From source file:org.apache.drill.exec.store.hive.HiveMetadataProvider.java

/**
 * Gets list of input splits based on table location.
 * These input splits are grouped logically by file name
 * if skip header / footer logic should be applied later on.
 *
 * @param properties table or partition properties
 * @param sd storage descriptor/* ww  w. j  a v a 2  s .  c  o  m*/
 * @param partition hive partition
 * @return list of logically grouped input splits
 */
private List<LogicalInputSplit> splitInputWithUGI(final Properties properties, final StorageDescriptor sd,
        final Partition partition) {
    watch.start();
    try {
        return ugi.doAs(new PrivilegedExceptionAction<List<LogicalInputSplit>>() {
            public List<LogicalInputSplit> run() throws Exception {
                final List<LogicalInputSplit> splits = Lists.newArrayList();
                final JobConf job = new JobConf(hiveConf);
                HiveUtilities.addConfToJob(job, properties);
                HiveUtilities.verifyAndAddTransactionalProperties(job, sd);
                job.setInputFormat(HiveUtilities.getInputFormatClass(job, sd, hiveReadEntry.getTable()));
                final Path path = new Path(sd.getLocation());
                final FileSystem fs = path.getFileSystem(job);
                if (fs.exists(path)) {
                    FileInputFormat.addInputPath(job, path);
                    final InputFormat<?, ?> format = job.getInputFormat();
                    InputSplit[] inputSplits = format.getSplits(job, 1);

                    // if current table with text input format and has header / footer,
                    // we need to make sure that splits of the same file are grouped together
                    if (TextInputFormat.class.getCanonicalName().equals(sd.getInputFormat())
                            && HiveUtilities.hasHeaderOrFooter(hiveReadEntry.getTable())) {
                        Multimap<Path, FileSplit> inputSplitMultimap = transformFileSplits(inputSplits);
                        for (Collection<FileSplit> logicalInputSplit : inputSplitMultimap.asMap().values()) {
                            splits.add(new LogicalInputSplit(logicalInputSplit, partition));
                        }
                    } else {
                        for (final InputSplit split : inputSplits) {
                            splits.add(new LogicalInputSplit(split, partition));
                        }
                    }
                }
                return splits;
            }
        });
    } catch (final InterruptedException | IOException e) {
        final String errMsg = String.format("Failed to create input splits: %s", e.getMessage());
        logger.error(errMsg, e);
        throw new DrillRuntimeException(errMsg, e);
    } finally {
        logger.trace("Took {} s to get splits from {}", watch.elapsed(TimeUnit.NANOSECONDS) / 1000,
                sd.getLocation());
        watch.stop();
    }
}

From source file:org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure.java

private void postDelete(final MasterProcedureEnv env) throws IOException, InterruptedException {
    deleteTableStates(env, tableName);/*from  www . jav  a2 s . c om*/

    final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
    if (cpHost != null) {
        final TableName tableName = this.tableName;
        user.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                cpHost.postDeleteTableHandler(tableName);
                return null;
            }
        });
    }
}