Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:com.trendmicro.hdfs.webdav.HDFSResource.java

@Override
public void move(final DavResource resource) throws DavException {
    final HDFSResource dfsResource = (HDFSResource) resource;
    final Path destPath = dfsResource.getPath();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Moving '" + path.toUri().getPath() + "' to '" + destPath.toUri().getPath() + "'");
    }/*  w  ww .  j av a  2  s  .co m*/
    try {
        user.doAs(new PrivilegedExceptionAction<Void>() {
            public Void run() throws Exception {
                FileSystem.get(conf).rename(path, destPath);
                return null;
            }
        });
    } catch (IOException e) {
        throw new RuntimeException(e);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.axis2.jaxws.util.WSDL4JWrapper.java

private static WSDLReader getWSDLReader() throws WSDLException {
    // Keep this method private
    WSDLReader reader;// w w  w  . j a  v a  2  s . c om
    try {
        reader = (WSDLReader) AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() throws WSDLException {
                WSDLFactory factory = WSDLFactory.newInstance();
                return factory.newWSDLReader();
            }
        });
    } catch (PrivilegedActionException e) {
        throw (WSDLException) e.getException();
    }
    WSDLReaderConfigurator configurator = (WSDLReaderConfigurator) MetadataFactoryRegistry
            .getFactory(WSDLReaderConfigurator.class);
    if (configurator != null) {
        if (log.isDebugEnabled()) {
            log.debug("Calling configureReaderInstance with: " + configurator.getClass().getName());
        }
        configurator.configureReaderInstance(reader);
    }
    return reader;
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestWALReplay.java

/**
 * HRegion test case that is made of a major compacted HFile (created with three bulk loaded
 * files) and an edit in the memstore./*  w  w  w . j  a  v a2  s  . c  om*/
 * This is for HBASE-10958 "[dataloss] Bulk loading with seqids can prevent some log entries
 * from being replayed"
 * @throws IOException
 * @throws IllegalAccessException
 * @throws NoSuchFieldException
 * @throws IllegalArgumentException
 * @throws SecurityException
 */
@Test
public void testCompactedBulkLoadedFiles() throws IOException, SecurityException, IllegalArgumentException,
        NoSuchFieldException, IllegalAccessException, InterruptedException {
    final TableName tableName = TableName.valueOf("testCompactedBulkLoadedFiles");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString());
    deleteDir(basedir);
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
    HRegion.closeHRegion(region2);
    HLog wal = createWAL(this.conf);
    HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);

    // Add an edit so something in the WAL
    byte[] row = tableName.getName();
    byte[] family = htd.getFamilies().iterator().next().getName();
    region.put((new Put(row)).add(family, family, family));
    wal.sync();

    List<Pair<byte[], String>> hfs = new ArrayList<Pair<byte[], String>>(1);
    for (int i = 0; i < 3; i++) {
        Path f = new Path(basedir, "hfile" + i);
        HFileTestUtil.createHFile(this.conf, fs, f, family, family, Bytes.toBytes(i + "00"),
                Bytes.toBytes(i + "50"), 10);
        hfs.add(Pair.newPair(family, f.toString()));
    }
    region.bulkLoadHFiles(hfs, true);
    final int rowsInsertedCount = 31;
    assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));

    // major compact to turn all the bulk loaded files into one normal file
    region.compactStores(true);
    assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));

    // Now 'crash' the region by stealing its wal
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString());
    user.runAs(new PrivilegedExceptionAction() {
        public Object run() throws Exception {
            runWALSplit(newConf);
            HLog wal2 = createWAL(newConf);

            HRegion region2 = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd,
                    wal2);
            long seqid2 = region2.getOpenSeqNum();
            assertTrue(seqid2 > -1);
            assertEquals(rowsInsertedCount, getScannedCount(region2.getScanner(new Scan())));

            // I can't close wal1.  Its been appropriated when we split.
            region2.close();
            wal2.closeAndDelete();
            return null;
        }
    });
}

From source file:org.apache.hadoop.mapred.JobInProgress.java

JobInProgress(JobTracker jobtracker, final JobConf default_conf, JobInfo jobInfo, int rCount, Credentials ts)
        throws IOException, InterruptedException {
    try {//from   www.j a  va  2s .  c  o  m
        this.restartCount = rCount;
        this.jobId = JobID.downgrade(jobInfo.getJobID());
        String url = "http://" + jobtracker.getJobTrackerMachine() + ":" + jobtracker.getInfoPort()
                + "/jobdetails.jsp?jobid=" + jobId;
        this.jobtracker = jobtracker;
        this.status = new JobStatus(jobId, 0.0f, 0.0f, JobStatus.PREP);
        this.status.setUsername(jobInfo.getUser().toString());
        this.jobtracker.getInstrumentation().addPrepJob(conf, jobId);
        // Add the queue-level metric below (after the profile has been initialized)
        this.startTime = jobtracker.getClock().getTime();
        status.setStartTime(startTime);
        this.localFs = jobtracker.getLocalFileSystem();

        this.tokenStorage = ts;
        // use the user supplied token to add user credentials to the conf
        jobSubmitDir = jobInfo.getJobSubmitDir();
        user = jobInfo.getUser().toString();
        userUGI = UserGroupInformation.createRemoteUser(user);
        if (ts != null) {
            for (Token<? extends TokenIdentifier> token : ts.getAllTokens()) {
                userUGI.addToken(token);
            }
        }

        fs = userUGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
            public FileSystem run() throws IOException {
                return jobSubmitDir.getFileSystem(default_conf);
            }
        });

        /** check for the size of jobconf **/
        Path submitJobFile = JobSubmissionFiles.getJobConfPath(jobSubmitDir);
        FileStatus fstatus = fs.getFileStatus(submitJobFile);
        if (fstatus.getLen() > jobtracker.MAX_JOBCONF_SIZE) {
            throw new IOException("Exceeded max jobconf size: " + fstatus.getLen() + " limit: "
                    + jobtracker.MAX_JOBCONF_SIZE);
        }
        this.localJobFile = default_conf.getLocalPath(JobTracker.SUBDIR + "/" + jobId + ".xml");
        Path jobFilePath = JobSubmissionFiles.getJobConfPath(jobSubmitDir);
        jobFile = jobFilePath.toString();
        fs.copyToLocalFile(jobFilePath, localJobFile);
        conf = new JobConf(localJobFile);
        if (conf.getUser() == null) {
            this.conf.setUser(user);
        }
        if (!conf.getUser().equals(user)) {
            String desc = "The username " + conf.getUser() + " obtained from the "
                    + "conf doesn't match the username " + user + " the user " + "authenticated as";
            AuditLogger.logFailure(user, Operation.SUBMIT_JOB.name(), conf.getUser(), jobId.toString(), desc);
            throw new IOException(desc);
        }

        this.priority = conf.getJobPriority();
        this.status.setJobPriority(this.priority);
        String queueName = conf.getQueueName();
        this.profile = new JobProfile(user, jobId, jobFile, url, conf.getJobName(), queueName);

        Queue queue = this.jobtracker.getQueueManager().getQueue(queueName);
        if (queue == null) {
            throw new IOException("Queue \"" + queueName + "\" does not exist");
        }
        this.queueMetrics = queue.getMetrics();
        this.queueMetrics.addPrepJob(conf, jobId);

        this.submitHostName = conf.getJobSubmitHostName();
        this.submitHostAddress = conf.getJobSubmitHostAddress();
        this.numMapTasks = conf.getNumMapTasks();
        this.numReduceTasks = conf.getNumReduceTasks();

        this.memoryPerMap = conf.getMemoryForMapTask();
        this.memoryPerReduce = conf.getMemoryForReduceTask();

        this.taskCompletionEvents = new ArrayList<TaskCompletionEvent>(numMapTasks + numReduceTasks + 10);

        // Construct the jobACLs
        status.setJobACLs(jobtracker.getJobACLsManager().constructJobACLs(conf));

        this.mapFailuresPercent = conf.getMaxMapTaskFailuresPercent();
        this.reduceFailuresPercent = conf.getMaxReduceTaskFailuresPercent();

        this.maxTaskFailuresPerTracker = conf.getMaxTaskFailuresPerTracker();

        hasSpeculativeMaps = conf.getMapSpeculativeExecution();
        hasSpeculativeReduces = conf.getReduceSpeculativeExecution();
        // a limit on the input size of the reduce.
        // we check to see if the estimated input size of 
        // of each reduce is less than this value. If not
        // we fail the job. A value of -1 just means there is no
        // limit set.
        reduce_input_limit = -1L;
        this.maxLevel = jobtracker.getNumTaskCacheLevels();
        this.anyCacheLevel = this.maxLevel + 1;
        this.nonLocalMaps = new LinkedList<TaskInProgress>();
        this.failedMaps = new TreeSet<TaskInProgress>(failComparator);
        this.nonLocalRunningMaps = new LinkedHashSet<TaskInProgress>();
        this.runningMapCache = new IdentityHashMap<Node, Set<TaskInProgress>>();
        this.nonRunningReduces = new TreeSet<TaskInProgress>(failComparator);
        this.runningReduces = new LinkedHashSet<TaskInProgress>();
        this.resourceEstimator = new ResourceEstimator(this);
        this.reduce_input_limit = conf.getLong("mapreduce.reduce.input.limit", DEFAULT_REDUCE_INPUT_LIMIT);
        // register job's tokens for renewal
        DelegationTokenRenewal.registerDelegationTokensForRenewal(jobInfo.getJobID(), ts, jobtracker.getConf());

        // Check task limits
        checkTaskLimits();
    } finally {
        //close all FileSystems that was created above for the current user
        //At this point, this constructor is called in the context of an RPC, and
        //hence the "current user" is actually referring to the kerberos
        //authenticated user (if security is ON).
        FileSystem.closeAllForUGI(UserGroupInformation.getCurrentUser());
    }
}

From source file:org.apache.hadoop.fs.TestFileSystem.java

public void testFsCache() throws Exception {
    {/*from  ww w. j a  v a2s.co m*/
        long now = System.currentTimeMillis();
        String[] users = new String[] { "foo", "bar" };
        final Configuration conf = new Configuration();
        FileSystem[] fs = new FileSystem[users.length];

        for (int i = 0; i < users.length; i++) {
            UserGroupInformation ugi = UserGroupInformation.createRemoteUser(users[i]);
            fs[i] = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
                public FileSystem run() throws IOException {
                    return FileSystem.get(conf);
                }
            });
            for (int j = 0; j < i; j++) {
                assertFalse(fs[j] == fs[i]);
            }
        }
        FileSystem.closeAll();
    }

    {
        try {
            runTestCache(NameNode.DEFAULT_PORT);
        } catch (java.net.BindException be) {
            LOG.warn("Cannot test NameNode.DEFAULT_PORT (=" + NameNode.DEFAULT_PORT + ")", be);
        }

        runTestCache(0);
    }
}

From source file:org.apache.bsf.BSFManager.java

/**
 * Execute the given script of the given language.
 *
 * @param lang     language identifier//from w w w  .  j ava  2  s  .  com
 * @param source   (context info) the source of this expression
 (e.g., filename)
 * @param lineNo   (context info) the line number in source for expr
 * @param columnNo (context info) the column number in source for expr
 * @param script   the script to execute
 *
 * @exception BSFException if anything goes wrong while running the script
 */
public void exec(String lang, String source, int lineNo, int columnNo, Object script) throws BSFException {
    logger.debug("BSFManager:exec");

    final BSFEngine e = loadScriptingEngine(lang);
    final String sourcef = source;
    final int lineNof = lineNo, columnNof = columnNo;
    final Object scriptf = script;

    try {
        AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() throws Exception {
                e.exec(sourcef, lineNof, columnNof, scriptf);
                return null;
            }
        });
    } catch (PrivilegedActionException prive) {

        logger.error("Exception :", prive);
        throw (BSFException) prive.getException();
    }
}

From source file:org.apache.hadoop.crypto.key.kms.server.KMS.java

@SuppressWarnings({ "rawtypes", "unchecked" })
@GET// w  w w  .java 2 s  .  c  o  m
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" + KMSRESTConstants.EEK_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response generateEncryptedKeys(@PathParam("name") final String name,
        @QueryParam(KMSRESTConstants.EEK_OP) String edekOp,
        @DefaultValue("1") @QueryParam(KMSRESTConstants.EEK_NUM_KEYS) final int numKeys) throws Exception {
    try {
        LOG.trace("Entering generateEncryptedKeys method.");
        UserGroupInformation user = HttpUserGroupInformation.get();
        KMSClientProvider.checkNotEmpty(name, "name");
        KMSClientProvider.checkNotNull(edekOp, "eekOp");
        LOG.debug("Generating encrypted key with name {}," + " the edek Operation is {}.", name, edekOp);

        Object retJSON;
        if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
            LOG.debug("edek Operation is Generate.");
            assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.GENERATE_EEK, name);

            final List<EncryptedKeyVersion> retEdeks = new LinkedList<EncryptedKeyVersion>();
            try {

                user.doAs(new PrivilegedExceptionAction<Void>() {
                    @Override
                    public Void run() throws Exception {
                        LOG.debug("Generated Encrypted key for {} number of " + "keys.", numKeys);
                        for (int i = 0; i < numKeys; i++) {
                            retEdeks.add(provider.generateEncryptedKey(name));
                        }
                        return null;
                    }
                });

            } catch (Exception e) {
                LOG.error("Exception in generateEncryptedKeys:", e);
                throw new IOException(e);
            }
            kmsAudit.ok(user, KMSOp.GENERATE_EEK, name, "");
            retJSON = new ArrayList();
            for (EncryptedKeyVersion edek : retEdeks) {
                ((ArrayList) retJSON).add(KMSServerJSONUtils.toJSON(edek));
            }
        } else {
            StringBuilder error;
            error = new StringBuilder("IllegalArgumentException Wrong ");
            error.append(KMSRESTConstants.EEK_OP);
            error.append(" value, it must be ");
            error.append(KMSRESTConstants.EEK_GENERATE);
            error.append(" or ");
            error.append(KMSRESTConstants.EEK_DECRYPT);
            LOG.error(error.toString());
            throw new IllegalArgumentException(error.toString());
        }
        KMSWebApp.getGenerateEEKCallsMeter().mark();
        LOG.trace("Exiting generateEncryptedKeys method.");
        return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON).build();
    } catch (Exception e) {
        LOG.debug("Exception in generateEncryptedKeys.", e);
        throw e;
    }
}

From source file:org.apache.hadoop.hbase.master.procedure.DisableTableProcedure.java

/**
 * Coprocessor Action./*ww w .  j  a va  2 s  .  c o  m*/
 * @param env MasterProcedureEnv
 * @param state the procedure state
 * @throws IOException
 * @throws InterruptedException
 */
private void runCoprocessorAction(final MasterProcedureEnv env, final DisableTableState state)
        throws IOException, InterruptedException {
    final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
    if (cpHost != null) {
        user.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                switch (state) {
                case DISABLE_TABLE_PRE_OPERATION:
                    cpHost.preDisableTableHandler(tableName);
                    break;
                case DISABLE_TABLE_POST_OPERATION:
                    cpHost.postDisableTableHandler(tableName);
                    break;
                default:
                    throw new UnsupportedOperationException(this + " unhandled state=" + state);
                }
                return null;
            }
        });
    }
}

From source file:org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure.java

/**
 * Coprocessor Action.// www . j  av  a  2s . c  o m
 * @param env MasterProcedureEnv
 * @param state the procedure state
 * @throws IOException
 * @throws InterruptedException
 */
private void runCoprocessorAction(final MasterProcedureEnv env, final ModifyTableState state)
        throws IOException, InterruptedException {
    final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
    if (cpHost != null) {
        user.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                switch (state) {
                case MODIFY_TABLE_PRE_OPERATION:
                    cpHost.preModifyTableHandler(getTableName(), modifiedHTableDescriptor);
                    break;
                case MODIFY_TABLE_POST_OPERATION:
                    cpHost.postModifyTableHandler(getTableName(), modifiedHTableDescriptor);
                    break;
                default:
                    throw new UnsupportedOperationException(this + " unhandled state=" + state);
                }
                return null;
            }
        });
    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestHRegion.java

/**
 * Test we do not lose data if we fail a flush and then close.
 * Part of HBase-10466.  Tests the following from the issue description:
 * "Bug 1: Wrong calculation of HRegion.memstoreSize: When a flush fails, data to be flushed is
 * kept in each MemStore's snapshot and wait for next flush attempt to continue on it. But when
 * the next flush succeeds, the counter of total memstore size in HRegion is always deduced by
 * the sum of current memstore sizes instead of snapshots left from previous failed flush. This
 * calculation is problematic that almost every time there is failed flush, HRegion.memstoreSize
 * gets reduced by a wrong value. If region flush could not proceed for a couple cycles, the size
 * in current memstore could be much larger than the snapshot. It's likely to drift memstoreSize
 * much smaller than expected. In extreme case, if the error accumulates to even bigger than
 * HRegion's memstore size limit, any further flush is skipped because flush does not do anything
 * if memstoreSize is not larger than 0."
 * @throws Exception//from www . j  a  v  a2 s . c o  m
 */
@Test(timeout = 60000)
public void testFlushSizeAccounting() throws Exception {
    final Configuration conf = HBaseConfiguration.create(CONF);
    // Only retry once.
    conf.setInt("hbase.hstore.flush.retries.number", 1);
    final User user = User.createUserForTesting(conf, this.name.getMethodName(), new String[] { "foo" });
    // Inject our faulty LocalFileSystem
    conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class);
    user.runAs(new PrivilegedExceptionAction<Object>() {
        @Override
        public Object run() throws Exception {
            // Make sure it worked (above is sensitive to caching details in hadoop core)
            FileSystem fs = FileSystem.get(conf);
            Assert.assertEquals(FaultyFileSystem.class, fs.getClass());
            FaultyFileSystem ffs = (FaultyFileSystem) fs;
            HRegion region = null;
            try {
                // Initialize region
                region = initHRegion(tableName, name.getMethodName(), conf, COLUMN_FAMILY_BYTES);
                long size = region.getMemstoreSize().get();
                Assert.assertEquals(0, size);
                // Put one item into memstore.  Measure the size of one item in memstore.
                Put p1 = new Put(row);
                p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[]) null));
                region.put(p1);
                final long sizeOfOnePut = region.getMemstoreSize().get();
                // Fail a flush which means the current memstore will hang out as memstore 'snapshot'.
                try {
                    LOG.info("Flushing");
                    region.flushcache();
                    Assert.fail("Didn't bubble up IOE!");
                } catch (DroppedSnapshotException dse) {
                    // What we are expecting
                }
                // Make it so all writes succeed from here on out
                ffs.fault.set(false);
                // Check sizes.  Should still be the one entry.
                Assert.assertEquals(sizeOfOnePut, region.getMemstoreSize().get());
                // Now add two entries so that on this next flush that fails, we can see if we
                // subtract the right amount, the snapshot size only.
                Put p2 = new Put(row);
                p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[]) null));
                p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[]) null));
                region.put(p2);
                Assert.assertEquals(sizeOfOnePut * 3, region.getMemstoreSize().get());
                // Do a successful flush.  It will clear the snapshot only.  Thats how flushes work.
                // If already a snapshot, we clear it else we move the memstore to be snapshot and flush
                // it
                region.flushcache();
                // Make sure our memory accounting is right.
                Assert.assertEquals(sizeOfOnePut * 2, region.getMemstoreSize().get());
            } finally {
                HRegion.closeHRegion(region);
            }
            return null;
        }
    });
    FileSystem.closeAllForUGI(user.getUGI());
}