Example usage for org.apache.hadoop.fs FileSystem setPermission

List of usage examples for org.apache.hadoop.fs FileSystem setPermission

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem setPermission.

Prototype

public void setPermission(Path p, FsPermission permission) throws IOException 

Source Link

Document

Set permission of a path.

Usage

From source file:com.inmobi.conduit.distcp.tools.util.DistCpUtils.java

License:Apache License

/**
 * Preserve attribute on file matching that of the file status being sent
 * as argument. Barring the block size, all the other attributes are preserved
 * by this function//from w  w  w .j a v a2 s.  co  m
 *
 * @param targetFS - File system
 * @param path - Path that needs to preserve original file status
 * @param srcFileStatus - Original file status
 * @param attributes - Attribute set that need to be preserved
 * @throws IOException - Exception if any (particularly relating to group/owner
 *                       change or any transient error)
 */
public static void preserve(FileSystem targetFS, Path path, FileStatus srcFileStatus,
        EnumSet<FileAttribute> attributes) throws IOException {

    FileStatus targetFileStatus = targetFS.getFileStatus(path);
    String group = targetFileStatus.getGroup();
    String user = targetFileStatus.getOwner();
    boolean chown = false;

    if (attributes.contains(FileAttribute.PERMISSION)
            && !srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) {
        targetFS.setPermission(path, srcFileStatus.getPermission());
    }

    if (attributes.contains(FileAttribute.REPLICATION) && !targetFileStatus.isDir()
            && srcFileStatus.getReplication() != targetFileStatus.getReplication()) {
        targetFS.setReplication(path, srcFileStatus.getReplication());
    }

    if (attributes.contains(FileAttribute.GROUP) && !group.equals(srcFileStatus.getGroup())) {
        group = srcFileStatus.getGroup();
        chown = true;
    }

    if (attributes.contains(FileAttribute.USER) && !user.equals(srcFileStatus.getOwner())) {
        user = srcFileStatus.getOwner();
        chown = true;
    }

    if (chown) {
        targetFS.setOwner(path, user, group);
    }
}

From source file:com.inmobi.conduit.distcp.tools.util.TestDistCpUtils.java

License:Apache License

@Test
public void testPreserve() {
    try {/*from   w w  w .  j  av a 2 s .c  om*/
        FileSystem fs = FileSystem.get(config);
        EnumSet<FileAttribute> attributes = EnumSet.noneOf(FileAttribute.class);

        Path path = new Path("/tmp/abc");
        Path src = new Path("/tmp/src");
        fs.mkdirs(path);
        fs.mkdirs(src);
        FileStatus srcStatus = fs.getFileStatus(src);

        FsPermission noPerm = new FsPermission((short) 0);
        fs.setPermission(path, noPerm);
        fs.setOwner(path, "nobody", "nobody");

        DistCpUtils.preserve(fs, path, srcStatus, attributes);
        FileStatus target = fs.getFileStatus(path);
        Assert.assertEquals(target.getPermission(), noPerm);
        Assert.assertEquals(target.getOwner(), "nobody");
        Assert.assertEquals(target.getGroup(), "nobody");

        attributes.add(FileAttribute.PERMISSION);
        DistCpUtils.preserve(fs, path, srcStatus, attributes);
        target = fs.getFileStatus(path);
        Assert.assertEquals(target.getPermission(), srcStatus.getPermission());
        Assert.assertEquals(target.getOwner(), "nobody");
        Assert.assertEquals(target.getGroup(), "nobody");

        attributes.add(FileAttribute.GROUP);
        attributes.add(FileAttribute.USER);
        DistCpUtils.preserve(fs, path, srcStatus, attributes);
        target = fs.getFileStatus(path);
        Assert.assertEquals(target.getPermission(), srcStatus.getPermission());
        Assert.assertEquals(target.getOwner(), srcStatus.getOwner());
        Assert.assertEquals(target.getGroup(), srcStatus.getGroup());

        fs.delete(path, true);
        fs.delete(src, true);
    } catch (IOException e) {
        LOG.error("Exception encountered ", e);
        Assert.fail("Preserve test failure");
    }
}

From source file:com.inmobi.conduit.distcp.tools.util.TestDistCpUtils.java

License:Apache License

public static String createTestSetup(String baseDir, FileSystem fs, FsPermission perm) throws IOException {
    String base = getBase(baseDir);
    fs.mkdirs(new Path(base + "/newTest/hello/world1"));
    fs.mkdirs(new Path(base + "/newTest/hello/world2/newworld"));
    fs.mkdirs(new Path(base + "/newTest/hello/world3/oldworld"));
    fs.setPermission(new Path(base + "/newTest"), perm);
    fs.setPermission(new Path(base + "/newTest/hello"), perm);
    fs.setPermission(new Path(base + "/newTest/hello/world1"), perm);
    fs.setPermission(new Path(base + "/newTest/hello/world2"), perm);
    fs.setPermission(new Path(base + "/newTest/hello/world2/newworld"), perm);
    fs.setPermission(new Path(base + "/newTest/hello/world3"), perm);
    fs.setPermission(new Path(base + "/newTest/hello/world3/oldworld"), perm);
    createFile(fs, base + "/newTest/1");
    createFile(fs, base + "/newTest/hello/2");
    createFile(fs, base + "/newTest/hello/world3/oldworld/3");
    createFile(fs, base + "/newTest/hello/world2/4");
    return base;// ww  w . j a v a  2s .  c  o m
}

From source file:com.inmobi.conduit.purge.DataPurgerServiceTest.java

License:Apache License

public void testDataPurger() throws Exception {
    AbstractService.clearHCatInMemoryMaps();
    LOG.info("Check data purger does not stop when unable to delete a path");
    ConduitConfigParser configparser = new ConduitConfigParser("test-dps-conduit_X_5.xml");
    ConduitConfig config = configparser.getConfig();

    for (Cluster cluster : config.getClusters().values()) {

        FileSystem fs = FileSystem.getLocal(new Configuration());
        fs.delete(new Path(cluster.getRootDir()), true);

        Calendar date1 = new GregorianCalendar(Calendar.getInstance().getTimeZone());
        date1.add(Calendar.HOUR, -7);
        createTestPurgefiles(fs, cluster, date1, false);
        Calendar date2 = new GregorianCalendar(Calendar.getInstance().getTimeZone());
        date2.add(Calendar.HOUR, -6);
        createTestPurgefiles(fs, cluster, date2, false);
        ArrayList<Path> pathsToProcess = new ArrayList<Path>();
        Path[] paths = getLocalCommitPath(fs, cluster, date2);
        for (Path path : paths) {
            fs.setPermission(path, new FsPermission("000"));
            pathsToProcess.add(path);//from   w  w  w  .  j a v a 2s .c  o m
        }
        paths = getMergeCommitPath(fs, cluster, date2);
        for (Path path : paths) {
            fs.setPermission(path, new FsPermission("000"));
            pathsToProcess.add(path);
        }
        Calendar date3 = new GregorianCalendar(Calendar.getInstance().getTimeZone());
        date3.add(Calendar.HOUR, -5);
        createTestPurgefiles(fs, cluster, date3, false);

        TestDataPurgerService service = new TestDataPurgerService(config, cluster);

        service.runOnce();

        verifyPurgefiles(fs, cluster, date1, false, false);
        verifyPurgefiles(fs, cluster, date2, true, false);
        verifyPurgefiles(fs, cluster, date3, false, false);
        for (Path p : pathsToProcess) {
            fs.setPermission(p, new FsPermission("755"));
        }
        fs.delete(new Path(cluster.getRootDir()), true);
        fs.close();
    }

    Assert.assertEquals(ConduitMetrics.<SlidingTimeWindowGauge>getMetric("DataPurgerService",
            "purgePaths.count", DataPurgerService.class.getName()).getValue().longValue(), 9);
    Assert.assertEquals(ConduitMetrics.<SlidingTimeWindowGauge>getMetric("DataPurgerService",
            "deleteFailures.count", DataPurgerService.class.getName()).getValue().longValue(), 0);
}

From source file:com.kylinolap.job.hadoop.hbase.BulkLoadJob.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    Options options = new Options();

    try {//from  w  ww .  jav  a  2  s  .com
        options.addOption(OPTION_INPUT_PATH);
        options.addOption(OPTION_HTABLE_NAME);
        options.addOption(OPTION_CUBE_NAME);
        parseOptions(options, args);

        String tableName = getOptionValue(OPTION_HTABLE_NAME).toUpperCase();
        // e.g
        // /tmp/kylin-3f150b00-3332-41ca-9d3d-652f67f044d7/test_kylin_cube_with_slr_ready_2_segments/hfile/
        // end with "/"
        String input = getOptionValue(OPTION_INPUT_PATH);

        Configuration conf = HBaseConfiguration.create(getConf());
        FileSystem fs = FileSystem.get(conf);

        String cubeName = getOptionValue(OPTION_CUBE_NAME).toUpperCase();
        KylinConfig config = KylinConfig.getInstanceFromEnv();
        CubeManager cubeMgr = CubeManager.getInstance(config);
        CubeInstance cube = cubeMgr.getCube(cubeName);
        CubeDesc cubeDesc = cube.getDescriptor();
        FsPermission permission = new FsPermission((short) 0777);
        for (HBaseColumnFamilyDesc cf : cubeDesc.getHBaseMapping().getColumnFamily()) {
            String cfName = cf.getName();
            fs.setPermission(new Path(input + cfName), permission);
        }

        String[] newArgs = new String[2];
        newArgs[0] = input;
        newArgs[1] = tableName;

        log.debug("Start to run LoadIncrementalHFiles");
        int ret = ToolRunner.run(new LoadIncrementalHFiles(conf), newArgs);
        log.debug("End to run LoadIncrementalHFiles");
        return ret;
    } catch (Exception e) {
        printUsage(options);
        e.printStackTrace(System.err);
        log.error(e.getLocalizedMessage(), e);
        return 2;
    }
}

From source file:com.kylinolap.job.hadoop.invertedindex.IIBulkLoadJob.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    Options options = new Options();

    try {/*from ww  w  .  j  a  va 2  s .c  om*/
        options.addOption(OPTION_INPUT_PATH);
        options.addOption(OPTION_HTABLE_NAME);
        options.addOption(OPTION_CUBE_NAME);
        parseOptions(options, args);

        String tableName = getOptionValue(OPTION_HTABLE_NAME);
        String input = getOptionValue(OPTION_INPUT_PATH);
        String cubeName = getOptionValue(OPTION_CUBE_NAME);

        FileSystem fs = FileSystem.get(getConf());
        FsPermission permission = new FsPermission((short) 0777);
        fs.setPermission(new Path(input, InvertedIndexDesc.HBASE_FAMILY), permission);

        int hbaseExitCode = ToolRunner.run(new LoadIncrementalHFiles(getConf()),
                new String[] { input, tableName });

        CubeManager mgr = CubeManager.getInstance(KylinConfig.getInstanceFromEnv());
        CubeInstance cube = mgr.getCube(cubeName);
        CubeSegment seg = cube.getFirstSegment();
        seg.setStorageLocationIdentifier(tableName);
        seg.setStatus(CubeSegmentStatusEnum.READY);
        mgr.updateCube(cube);

        return hbaseExitCode;

    } catch (Exception e) {
        printUsage(options);
        e.printStackTrace(System.err);
        return 2;
    }
}

From source file:com.lightboxtechnologies.spectrum.ExtractData.java

License:Apache License

protected static void chmodR(FileSystem fs, Path p) throws IOException {
    final FsPermission perm = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
    final FileStatus[] list = fs.listStatus(p);
    for (FileStatus f : list) {
        if (f.isDir()) {
            chmodR(fs, f.getPath());// w w  w .j  a  v a  2s.c  om
        }
        fs.setPermission(f.getPath(), perm);
    }
    fs.setPermission(p, perm);
}

From source file:com.mellanox.r4h.DistributedFileSystem.java

License:Apache License

@Override
public void setPermission(Path p, final FsPermission permission) throws IOException {
    statistics.incrementWriteOps(1);//w w  w. j  a v  a2s  . c om
    Path absF = fixRelativePart(p);
    new FileSystemLinkResolver<Void>() {
        @Override
        public Void doCall(final Path p) throws IOException, UnresolvedLinkException {
            dfs.setPermission(getPathName(p), permission);
            return null;
        }

        @Override
        public Void next(final FileSystem fs, final Path p) throws IOException {
            fs.setPermission(p, permission);
            return null;
        }
    }.resolve(this, absF);
}

From source file:com.moz.fiji.mapreduce.tools.FijiBulkLoad.java

License:Apache License

/**
 * Helper method used by recursiveGrantAllReadWritePermissions to actually grant the
 * additional read and write permissions to all.  It deals with FileStatus objects
 * since that is the object that supports listStatus.
 *
 * @param hdfs The FileSystem on which the file exists.
 * @param status The status of the file whose permissions are checked and on whose children
 *     this method is called recursively.
 * @throws IOException on IOException./*from   w  w w.ja  v  a  2  s  .c  o m*/
 */
private void recursiveGrantAllReadWritePermissions(FileSystem hdfs, FileStatus status) throws IOException {
    final FsPermission currentPermissions = status.getPermission();
    if (!currentPermissions.getOtherAction().implies(FsAction.READ_WRITE)) {
        LOG.info("Adding a+rw to permissions for {}: {}", status.getPath(), currentPermissions);
        hdfs.setPermission(status.getPath(),
                new FsPermission(currentPermissions.getUserAction(),
                        currentPermissions.getGroupAction().or(FsAction.READ_WRITE),
                        currentPermissions.getOtherAction().or(FsAction.READ_WRITE)));
    }
    // Recurse into any files and directories in the path.
    // We must use listStatus because listFiles does not list subdirectories.
    FileStatus[] subStatuses = hdfs.listStatus(status.getPath());
    for (FileStatus subStatus : subStatuses) {
        if (!subStatus.equals(status)) {
            recursiveGrantAllReadWritePermissions(hdfs, subStatus);
        }
    }
}

From source file:com.phantom.hadoop.examples.pi.Util.java

License:Apache License

/** Create a directory. */
static boolean createNonexistingDirectory(FileSystem fs, Path dir) throws IOException {
    if (fs.exists(dir)) {
        Util.err.println("dir (= " + dir + ") already exists.");
        return false;
    } else if (!fs.mkdirs(dir)) {
        throw new IOException("Cannot create working directory " + dir);
    }//from w  ww  . jav a2s . com
    fs.setPermission(dir, new FsPermission((short) 0777));
    return true;
}