Example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction

List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction

Introduction

In this page you can find the example usage for java.security PrivilegedExceptionAction PrivilegedExceptionAction.

Prototype

PrivilegedExceptionAction

Source Link

Usage

From source file:org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDeletes.java

@Test
public void testVisibilityLabelsWithDeleteColumnsWithMultipleVersionsNoTimestamp() throws Exception {
    setAuths();/* ww  w  .ja  v a 2s. c o  m*/
    final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    try (Table table = doPuts(tableName)) {
        TEST_UTIL.getAdmin().flush(tableName);
        PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                try (Connection connection = ConnectionFactory.createConnection(conf);
                        Table table = connection.getTable(tableName)) {
                    Delete d1 = new Delete(row1);
                    d1.setCellVisibility(new CellVisibility(CONFIDENTIAL));
                    d1.addColumns(fam, qual);

                    table.delete(d1);

                    Delete d2 = new Delete(row1);
                    d2.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET));
                    d2.addColumns(fam, qual);
                    table.delete(d2);

                    Delete d3 = new Delete(row1);
                    d3.setCellVisibility(new CellVisibility(
                            "(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET + ")"));
                    d3.addColumns(fam, qual);
                    table.delete(d3);
                } catch (Throwable t) {
                    throw new IOException(t);
                }
                return null;
            }
        };
        SUPERUSER.runAs(actiona);
        Scan s = new Scan();
        s.setMaxVersions(5);
        s.setAuthorizations(new Authorizations(SECRET, PRIVATE, CONFIDENTIAL, TOPSECRET));
        ResultScanner scanner = table.getScanner(s);
        Result[] next = scanner.next(3);
        assertEquals(1, next.length);
        CellScanner cellScanner = next[0].cellScanner();
        cellScanner.advance();
        Cell current = cellScanner.current();
        assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0,
                row2.length));
    }
}

From source file:com.trendmicro.hdfs.webdav.HDFSResource.java

@Override
public void removeMember(final DavResource resource) throws DavException {
    final HDFSResource dfsResource = (HDFSResource) resource;
    final Path destPath = dfsResource.getPath();
    try {/*from   w  w w . j  a v a  2  s .  c o m*/
        if (LOG.isDebugEnabled()) {
            LOG.debug("Deleting '" + destPath.toUri().getPath() + "'");
        }
        boolean success = user.doAs(new PrivilegedExceptionAction<Boolean>() {
            public Boolean run() throws Exception {
                return FileSystem.get(conf).delete(destPath, true);
            }
        });
        if (!success) {
            throw new DavException(DavServletResponse.SC_INTERNAL_SERVER_ERROR);
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
}

From source file:at.irian.myfaces.wscope.renderkit.html.WsServerSideStateCacheImpl.java

protected Object deserializeView(Object state) {
    if (log.isLoggable(Level.FINEST)) {
        log.finest("Entering deserializeView");
    }/*from  w  w w .  ja  v a  2 s. c o m*/

    if (state instanceof byte[]) {
        if (log.isLoggable(Level.FINEST)) {
            log.finest("Processing deserializeView - deserializing serialized state. Bytes : "
                    + ((byte[]) state).length);
        }

        try {
            ByteArrayInputStream bais = new ByteArrayInputStream((byte[]) state);
            InputStream is = bais;
            if (is.read() == COMPRESSED_FLAG) {
                is = new GZIPInputStream(is);
            }
            ObjectInputStream ois = null;
            try {
                final ObjectInputStream in = new MyFacesObjectInputStream(is);
                ois = in;
                Object object = null;
                if (System.getSecurityManager() != null) {
                    object = AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
                        public Object run()
                                throws PrivilegedActionException, IOException, ClassNotFoundException {
                            //return new Object[] {in.readObject(), in.readObject()};
                            return in.readObject();
                        }
                    });
                } else {
                    //object = new Object[] {in.readObject(), in.readObject()};
                    object = in.readObject();
                }
                return object;
            } finally {
                if (ois != null) {
                    ois.close();
                    ois = null;
                }
            }
        } catch (PrivilegedActionException e) {
            log.log(Level.SEVERE, "Exiting deserializeView - Could not deserialize state: " + e.getMessage(),
                    e);
            return null;
        } catch (IOException e) {
            log.log(Level.SEVERE, "Exiting deserializeView - Could not deserialize state: " + e.getMessage(),
                    e);
            return null;
        } catch (ClassNotFoundException e) {
            log.log(Level.SEVERE, "Exiting deserializeView - Could not deserialize state: " + e.getMessage(),
                    e);
            return null;
        }
    } else if (state instanceof Object[]) {
        if (log.isLoggable(Level.FINEST)) {
            log.finest("Exiting deserializeView - state not serialized.");
        }

        return state;
    } else if (state == null) {
        log.severe("Exiting deserializeView - this method should not be called with a null-state.");
        return null;
    } else {
        log.severe("Exiting deserializeView - this method should not be called with a state of type : "
                + state.getClass());
        return null;
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.ImageServlet.java

@Override
protected void doPut(final HttpServletRequest request, final HttpServletResponse response)
        throws ServletException, IOException {
    try {/*from ww  w  .  j  ava2 s . com*/
        ServletContext context = getServletContext();
        final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context);
        final Configuration conf = (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF);
        final PutImageParams parsedParams = new PutImageParams(request, response, conf);
        final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();

        validateRequest(context, conf, request, response, nnImage, parsedParams.getStorageInfoString());

        UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction<Void>() {

            @Override
            public Void run() throws Exception {

                final long txid = parsedParams.getTxId();

                final NameNodeFile nnf = parsedParams.getNameNodeFile();

                if (!nnImage.addToCheckpointing(txid)) {
                    response.sendError(HttpServletResponse.SC_CONFLICT,
                            "Either current namenode is checkpointing or another"
                                    + " checkpointer is already in the process of "
                                    + "uploading a checkpoint made at transaction ID " + txid);
                    return null;
                }
                try {
                    if (nnImage.getStorage().findImageFile(nnf, txid) != null) {
                        response.sendError(HttpServletResponse.SC_CONFLICT,
                                "Either current namenode has checkpointed or "
                                        + "another checkpointer already uploaded an " + "checkpoint for txid "
                                        + txid);
                        return null;
                    }

                    InputStream stream = request.getInputStream();
                    try {
                        long start = monotonicNow();
                        MD5Hash downloadImageDigest = TransferFsImage.handleUploadImageRequest(request, txid,
                                nnImage.getStorage(), stream, parsedParams.getFileSize(), getThrottler(conf));
                        nnImage.saveDigestAndRenameCheckpointImage(nnf, txid, downloadImageDigest);
                        // Metrics non-null only when used inside name node
                        if (metrics != null) {
                            long elapsed = monotonicNow() - start;
                            metrics.addPutImage(elapsed);
                        }
                        // Now that we have a new checkpoint, we might be able to
                        // remove some old ones.
                        nnImage.purgeOldStorage(nnf);
                    } finally {
                        stream.close();
                    }
                } finally {
                    nnImage.removeFromCheckpointing(txid);
                }
                return null;
            }

        });
    } catch (Throwable t) {
        String errMsg = "PutImage failed. " + StringUtils.stringifyException(t);
        response.sendError(HttpServletResponse.SC_GONE, errMsg);
        throw new IOException(errMsg);
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods.java

/** Handle HTTP POST request. */
@POST/*from  w w  w . j  av a2  s.c o m*/
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Consumes({ "*/*" })
@Produces({ MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON })
public Response post(@Context final UserGroupInformation ugi,
        @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) final DelegationParam delegation,
        @QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT) final UserParam username,
        @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) final DoAsParam doAsUser,
        @PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
        @QueryParam(PostOpParam.NAME) @DefaultValue(PostOpParam.DEFAULT) final PostOpParam op,
        @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT) final BufferSizeParam bufferSize)
        throws IOException, InterruptedException {

    init(ugi, delegation, username, doAsUser, path, op, bufferSize);

    return ugi.doAs(new PrivilegedExceptionAction<Response>() {
        @Override
        public Response run() throws IOException, URISyntaxException {
            REMOTE_ADDRESS.set(request.getRemoteAddr());
            try {

                final String fullpath = path.getAbsolutePath();
                final NameNode namenode = (NameNode) context.getAttribute("name.node");

                switch (op.getValue()) {
                case APPEND: {
                    final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser, fullpath,
                            op.getValue(), -1L, bufferSize);
                    return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
                }
                default:
                    throw new UnsupportedOperationException(op + " is not supported");
                }

            } finally {
                REMOTE_ADDRESS.set(null);
            }
        }
    });
}

From source file:org.apache.bsf.BSFManager.java

/**
 * Execute the given script of the given language, attempting to
 * emulate an interactive session w/ the language.
 *
 * @param lang     language identifier/*from  ww w .  j  a  v a  2  s  . c o  m*/
 * @param source   (context info) the source of this expression
 *                 (e.g., filename)
 * @param lineNo   (context info) the line number in source for expr
 * @param columnNo (context info) the column number in source for expr
 * @param script   the script to execute
 *
 * @exception BSFException if anything goes wrong while running the script
 */
public void iexec(String lang, String source, int lineNo, int columnNo, Object script) throws BSFException {
    logger.debug("BSFManager:iexec");

    final BSFEngine e = loadScriptingEngine(lang);
    final String sourcef = source;
    final int lineNof = lineNo, columnNof = columnNo;
    final Object scriptf = script;

    try {
        AccessController.doPrivileged(new PrivilegedExceptionAction() {
            public Object run() throws Exception {
                e.iexec(sourcef, lineNof, columnNof, scriptf);
                return null;
            }
        });
    } catch (PrivilegedActionException prive) {

        logger.error("Exception :", prive);
        throw (BSFException) prive.getException();
    }
}

From source file:org.apache.hadoop.hive.common.FileUtils.java

public static boolean isOwnerOfFileHierarchy(final FileSystem fs, final FileStatus fileStatus,
        final String userName, final boolean recurse) throws IOException, InterruptedException {
    UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(userName,
            UserGroupInformation.getLoginUser());
    try {//from w  ww .ja  v a  2 s . c  om
        boolean isOwner = proxyUser.doAs(new PrivilegedExceptionAction<Boolean>() {
            @Override
            public Boolean run() throws Exception {
                FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf());
                return checkIsOwnerOfFileHierarchy(fsAsUser, fileStatus, userName, recurse);
            }
        });
        return isOwner;
    } finally {
        FileSystem.closeAllForUGI(proxyUser);
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.HdfsTargetConfigBean.java

public void init(Stage.Context context, List<Stage.ConfigIssue> issues) {
    boolean hadoopFSValidated = validateHadoopFS(context, issues);

    lateRecordsLimitSecs = initTimeConfigs(context, "lateRecordsLimit", lateRecordsLimit, Groups.LATE_RECORDS,
            issues);/*  ww  w. j ava  2  s  .c  o m*/
    if (idleTimeout != null && !idleTimeout.isEmpty()) {
        idleTimeSecs = initTimeConfigs(context, "idleTimeout", idleTimeout, Groups.OUTPUT_FILES, issues);
    }
    if (maxFileSize < 0) {
        issues.add(context.createConfigIssue(Groups.LATE_RECORDS.name(),
                HDFS_TARGET_CONFIG_BEAN_PREFIX + "maxFileSize", Errors.HADOOPFS_08));
    }

    if (maxRecordsPerFile < 0) {
        issues.add(context.createConfigIssue(Groups.LATE_RECORDS.name(),
                HDFS_TARGET_CONFIG_BEAN_PREFIX + "maxRecordsPerFile", Errors.HADOOPFS_09));
    }

    if (uniquePrefix == null) {
        uniquePrefix = "";
    }

    dataGeneratorFormatConfig.init(context, dataFormat, Groups.OUTPUT_FILES.name(),
            HDFS_TARGET_CONFIG_BEAN_PREFIX + "dataGeneratorFormatConfig", issues);

    SequenceFile.CompressionType compressionType = (seqFileCompressionType != null)
            ? seqFileCompressionType.getType()
            : null;
    try {
        switch (compression) {
        case OTHER:
            try {
                Class klass = Thread.currentThread().getContextClassLoader().loadClass(otherCompression);
                if (CompressionCodec.class.isAssignableFrom(klass)) {
                    compressionCodec = ((Class<? extends CompressionCodec>) klass).newInstance();
                } else {
                    throw new StageException(Errors.HADOOPFS_04, otherCompression);
                }
            } catch (Exception ex1) {
                throw new StageException(Errors.HADOOPFS_05, otherCompression, ex1.toString(), ex1);
            }
            break;
        case NONE:
            break;
        default:
            try {
                compressionCodec = compression.getCodec().newInstance();
            } catch (IllegalAccessException | InstantiationException ex) {
                LOG.info("Error: " + ex.getMessage(), ex.toString(), ex);
                issues.add(context.createConfigIssue(Groups.OUTPUT_FILES.name(), null, Errors.HADOOPFS_48,
                        ex.toString(), ex));
            }
            break;
        }
        if (compressionCodec != null) {
            if (compressionCodec instanceof Configurable) {
                ((Configurable) compressionCodec).setConf(hdfsConfiguration);
            }
        }
    } catch (StageException ex) {
        LOG.info("Validation Error: " + ex.getMessage(), ex.toString(), ex);
        issues.add(context.createConfigIssue(Groups.OUTPUT_FILES.name(), null, ex.getErrorCode(), ex.toString(),
                ex));
    }

    if (hadoopFSValidated) {
        try {
            // Creating RecordWriterManager for dirPathTemplate
            RecordWriterManager mgr = new RecordWriterManager(new URI(hdfsUri), hdfsConfiguration, uniquePrefix,
                    dirPathTemplate, TimeZone.getTimeZone(timeZoneID), lateRecordsLimitSecs,
                    maxFileSize * MEGA_BYTE, maxRecordsPerFile, fileType, compressionCodec, compressionType,
                    keyEl, dataGeneratorFormatConfig.getDataGeneratorFactory(), (Target.Context) context,
                    "dirPathTemplate");

            if (idleTimeSecs > 0) {
                mgr.setIdleTimeoutSeconds(idleTimeSecs);
            }

            // validate if the dirPathTemplate can be resolved by Els constants
            if (mgr.validateDirTemplate(Groups.OUTPUT_FILES.name(), "dirPathTemplate",
                    HDFS_TARGET_CONFIG_BEAN_PREFIX + "dirPathTemplate", issues)) {
                String newDirPath = mgr.getDirPath(new Date()).toString();
                if (validateHadoopDir( // permission check on the output directory
                        context, HDFS_TARGET_CONFIG_BEAN_PREFIX + "dirPathTemplate", Groups.OUTPUT_FILES.name(),
                        newDirPath, issues)) {
                    currentWriters = new ActiveRecordWriters(mgr);
                }
            }
        } catch (Exception ex) {
            LOG.info("Validation Error: " + Errors.HADOOPFS_11.getMessage(), ex.toString(), ex);
            issues.add(context.createConfigIssue(Groups.OUTPUT_FILES.name(), null, Errors.HADOOPFS_11,
                    ex.toString(), ex));
        }

        // Creating RecordWriterManager for Late Records
        if (lateRecordsDirPathTemplate != null && !lateRecordsDirPathTemplate.isEmpty()) {
            try {
                RecordWriterManager mgr = new RecordWriterManager(new URI(hdfsUri), hdfsConfiguration,
                        uniquePrefix, lateRecordsDirPathTemplate, TimeZone.getTimeZone(timeZoneID),
                        lateRecordsLimitSecs, maxFileSize * MEGA_BYTE, maxRecordsPerFile, fileType,
                        compressionCodec, compressionType, keyEl,
                        dataGeneratorFormatConfig.getDataGeneratorFactory(), (Target.Context) context,
                        "lateRecordsDirPathTemplate");

                if (idleTimeSecs > 0) {
                    mgr.setIdleTimeoutSeconds(idleTimeSecs);
                }

                // validate if the lateRecordsDirPathTemplate can be resolved by Els constants
                if (mgr.validateDirTemplate(Groups.OUTPUT_FILES.name(), "lateRecordsDirPathTemplate",
                        HDFS_TARGET_CONFIG_BEAN_PREFIX + "lateRecordsDirPathTemplate", issues)) {
                    String newLateRecordPath = mgr.getDirPath(new Date()).toString();
                    if (lateRecordsAction == LateRecordsAction.SEND_TO_LATE_RECORDS_FILE
                            && lateRecordsDirPathTemplate != null && !lateRecordsDirPathTemplate.isEmpty()
                            && validateHadoopDir( // permission check on the late record directory
                                    context, HDFS_TARGET_CONFIG_BEAN_PREFIX + "lateRecordsDirPathTemplate",
                                    Groups.LATE_RECORDS.name(), newLateRecordPath, issues)) {
                        lateWriters = new ActiveRecordWriters(mgr);
                    }
                }
            } catch (Exception ex) {
                issues.add(context.createConfigIssue(Groups.LATE_RECORDS.name(), null, Errors.HADOOPFS_17,
                        ex.toString(), ex));
            }
        }
    }

    timeDriverElEval = context.createELEval("timeDriver");
    try {
        ELVars variables = context.createELVars();
        RecordEL.setRecordInContext(variables, context.createRecord("validationConfigs"));
        TimeNowEL.setTimeNowInContext(variables, new Date());
        context.parseEL(timeDriver);
        timeDriverElEval.eval(variables, timeDriver, Date.class);
    } catch (ELEvalException ex) {
        issues.add(context.createConfigIssue(Groups.OUTPUT_FILES.name(),
                HDFS_TARGET_CONFIG_BEAN_PREFIX + "timeDriver", Errors.HADOOPFS_19, ex.toString(), ex));
    }

    if (issues.isEmpty()) {

        try {
            getUGI().doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    FileSystem fs = getFileSystemForInitDestroy();
                    getCurrentWriters().commitOldFiles(fs);
                    if (getLateWriters() != null) {
                        getLateWriters().commitOldFiles(fs);
                    }
                    return null;
                }
            });
        } catch (Exception ex) {
            issues.add(context.createConfigIssue(null, null, Errors.HADOOPFS_23, ex.toString(), ex));
        }
        toHdfsRecordsCounter = context.createCounter("toHdfsRecords");
        toHdfsRecordsMeter = context.createMeter("toHdfsRecords");
        lateRecordsCounter = context.createCounter("lateRecords");
        lateRecordsMeter = context.createMeter("lateRecords");
    }
}

From source file:com.trendmicro.hdfs.webdav.HDFSResource.java

@Override
public void spool(final OutputContext context) throws IOException {
    if (!isCollection())
        try {/*w  w w .j av a 2 s .co  m*/
            user.doAs(new PrivilegedExceptionAction<Void>() {
                public Void run() throws Exception {
                    InputStream input = FileSystem.get(conf).open(path);
                    try {
                        IOUtils.copyBytes(input, context.getOutputStream(), conf, false);
                    } finally {
                        input.close();
                    }
                    return null;
                }
            });
        } catch (InterruptedException e) {
            throw new IOException(e);
        }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.java

/**
 * HRegion test case that is made of a major compacted HFile (created with three bulk loaded
 * files) and an edit in the memstore.//from  ww w .j a  v a  2  s  . co m
 * This is for HBASE-10958 "[dataloss] Bulk loading with seqids can prevent some log entries
 * from being replayed"
 * @throws IOException
 * @throws IllegalAccessException
 * @throws NoSuchFieldException
 * @throws IllegalArgumentException
 * @throws SecurityException
 */
@Test
public void testCompactedBulkLoadedFiles() throws IOException, SecurityException, IllegalArgumentException,
        NoSuchFieldException, IllegalAccessException, InterruptedException {
    final TableName tableName = TableName.valueOf("testCompactedBulkLoadedFiles");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString());
    deleteDir(basedir);
    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region2);
    WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);

    // Add an edit so something in the WAL
    byte[] row = tableName.getName();
    byte[] family = htd.getFamilies().iterator().next().getName();
    region.put((new Put(row)).addColumn(family, family, family));
    wal.sync();

    List<Pair<byte[], String>> hfs = new ArrayList<Pair<byte[], String>>(1);
    for (int i = 0; i < 3; i++) {
        Path f = new Path(basedir, "hfile" + i);
        HFileTestUtil.createHFile(this.conf, fs, f, family, family, Bytes.toBytes(i + "00"),
                Bytes.toBytes(i + "50"), 10);
        hfs.add(Pair.newPair(family, f.toString()));
    }
    region.bulkLoadHFiles(hfs, true, null);
    final int rowsInsertedCount = 31;
    assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));

    // major compact to turn all the bulk loaded files into one normal file
    region.compact(true);
    assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));

    // Now 'crash' the region by stealing its wal
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, tableName.getNameAsString());
    user.runAs(new PrivilegedExceptionAction() {
        @Override
        public Object run() throws Exception {
            runWALSplit(newConf);
            WAL wal2 = createWAL(newConf, hbaseRootDir, logName);

            HRegion region2 = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd,
                    wal2);
            long seqid2 = region2.getOpenSeqNum();
            assertTrue(seqid2 > -1);
            assertEquals(rowsInsertedCount, getScannedCount(region2.getScanner(new Scan())));

            // I can't close wal1.  Its been appropriated when we split.
            region2.close();
            wal2.close();
            return null;
        }
    });
}