List of usage examples for org.apache.hadoop.fs FileSystem mkdirs
public abstract boolean mkdirs(Path f, FsPermission permission) throws IOException;
From source file:com.cloudera.hoop.fs.FSMkdirs.java
License:Open Source License
/** * Executes the filesystem operation./*from w ww . j a v a 2 s .co m*/ * * @param fs filesystem instance to use. * @return <code>true</code> if the mkdirs operation was successful, * <code>false</code> otherwise. * @throws IOException thrown if an IO error occured. */ @Override public JSONObject execute(FileSystem fs) throws IOException { FsPermission fsPermission = FSUtils.getPermission(permission); boolean mkdirs = fs.mkdirs(path, fsPermission); return FSUtils.toJSON("mkdirs", mkdirs); }
From source file:com.cloudera.recordbreaker.analyzer.DataQuery.java
License:Open Source License
String grabTable(DataDescriptor desc) throws SQLException, IOException { // Set up Hive table Path p = desc.getFilename();/*from w w w .j a v a 2 s . com*/ String tablename = tableCache.get(p); if (tablename == null) { tablename = "datatable" + Math.abs(r.nextInt()); Statement stmt = hiveCon.createStatement(); try { String creatTxt = desc.getHiveCreateTableStatement(tablename); LOG.info("Create: " + creatTxt); stmt.execute(creatTxt); tables.put(p, tablename); } finally { stmt.close(); } // Copy avro version of data into secret location prior to Hive import FileSystem fs = FileSystem.get(conf); Path tmpTables = new Path(tmpTablesDir); if (!fs.exists(tmpTables)) { fs.mkdirs(tmpTables, new FsPermission("-rwxrwxrwx")); } Path secretDst = new Path(tmpTables, "r" + r.nextInt()); LOG.info("Preparing Avro data at " + secretDst); desc.prepareAvroFile(fs, fs, secretDst, conf); fs.setPermission(secretDst, new FsPermission("-rwxrwxrwx")); // Import data stmt = hiveCon.createStatement(); try { LOG.info("Import data into Hive: " + desc.getHiveImportDataStatement(tablename, secretDst)); stmt.execute(desc.getHiveImportDataStatement(tablename, secretDst)); isLoaded.add(p); } finally { stmt.close(); } // Refresh impala metadata stmt = impalaCon.createStatement(); try { try { LOG.info("Rebuilding Impala metadata..."); stmt.execute("INVALIDATE METADATA"); } catch (Exception iex) { LOG.info("Impala metadata rebuild failed: " + iex.toString()); } } finally { stmt.close(); } // Insert into table cache tableCache.put(p, tablename); } return tablename; }
From source file:com.ibm.bi.dml.runtime.util.MapReduceTool.java
License:Open Source License
/** * /*from w w w . j a v a2 s . c o m*/ * @param dir * @param permissions * @throws IOException */ public static void createDirIfNotExistOnHDFS(String dir, String permissions) throws IOException { Path path = new Path(dir); try { FileSystem fs = FileSystem.get(_rJob); if (!fs.exists(path)) { char[] c = permissions.toCharArray(); short sU = (short) ((c[0] - 48) * 64); short sG = (short) ((c[1] - 48) * 8); short sO = (short) ((c[2] - 48)); short mode = (short) (sU + sG + sO); FsPermission perm = new FsPermission(mode); fs.mkdirs(path, perm); } } catch (Exception ex) { throw new IOException("Failed in creating a non existing dir on HDFS", ex); } //NOTE: we depend on the configured umask, setting umask in job or fspermission has no effect //similarly setting dfs.datanode.data.dir.perm as no effect either. }
From source file:com.liferay.hadoop.store.HDFSStore.java
License:Open Source License
@Override public void addDirectory(long companyId, long repositoryId, String dirName) throws PortalException, SystemException { Path fullPath = HadoopManager.getFullDirPath(companyId, repositoryId, dirName); try {/* ww w.ja v a2 s . c om*/ FileSystem fileSystem = HadoopManager.getFileSystem(); fileSystem.mkdirs(fullPath, FsPermission.getDefault()); } catch (IOException ioe) { throw new SystemException(ioe); } }
From source file:com.mellanox.r4h.DistributedFileSystem.java
License:Apache License
private boolean mkdirsInternal(Path f, final FsPermission permission, final boolean createParent) throws IOException { statistics.incrementWriteOps(1);/*from w ww .j av a2s .c om*/ Path absF = fixRelativePart(f); return new FileSystemLinkResolver<Boolean>() { @Override public Boolean doCall(final Path p) throws IOException, UnresolvedLinkException { return dfs.mkdirs(getPathName(p), permission, createParent); } @Override public Boolean next(final FileSystem fs, final Path p) throws IOException { // FileSystem doesn't have a non-recursive mkdir() method // Best we can do is error out if (!createParent) { throw new IOException("FileSystem does not support non-recursive" + "mkdir"); } return fs.mkdirs(p, permission); } }.resolve(this, absF); }
From source file:com.trendmicro.hdfs.webdav.test.TestCopySimple.java
License:Apache License
@BeforeClass public static void setup() throws Exception { Configuration conf = minicluster.getConfiguration(); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups", "users"); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts", "localhost"); conf.set("hadoop.webdav.authentication.type", "simple"); conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true); minicluster.startMiniCluster(gatewayUser); LOG.info("Gateway started on port " + minicluster.getGatewayPort()); FsPermission.setUMask(conf, new FsPermission((short) 0)); FileSystem fs = minicluster.getTestFileSystem(); Path path = new Path("/test"); assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]); ownerUser.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { FileSystem fs = minicluster.getTestFileSystem(); assertTrue(fs.mkdirs(new Path("/test/rw"), new FsPermission(FsAction.ALL, FsAction.WRITE_EXECUTE, FsAction.NONE))); assertTrue(fs.mkdirs(new Path("/test/ro"), new FsPermission(FsAction.READ_EXECUTE, FsAction.NONE, FsAction.NONE))); assertTrue(fs.mkdirs(new Path("/test/public"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); FSDataOutputStream os = fs.create(new Path("/test/rw/file1"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null);//w w w . j ava 2s . c om assertNotNull(os); os.write(testData.getBytes()); os.close(); return null; } }); }
From source file:com.trendmicro.hdfs.webdav.test.TestDeleteSimple.java
License:Apache License
@BeforeClass public static void setup() throws Exception { Configuration conf = minicluster.getConfiguration(); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups", "users"); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts", "localhost"); conf.set("hadoop.webdav.authentication.type", "simple"); conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true); minicluster.startMiniCluster(gatewayUser); LOG.info("Gateway started on port " + minicluster.getGatewayPort()); FsPermission.setUMask(conf, new FsPermission((short) 0)); FileSystem fs = minicluster.getTestFileSystem(); Path path = new Path("/test"); assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]); ownerUser.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { FileSystem fs = minicluster.getTestFileSystem(); assertTrue(fs.mkdirs(new Path("/test/private"), new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE))); assertTrue(fs.mkdirs(new Path("/test/public"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); FSDataOutputStream os = fs.create(new Path("/test/private/file1"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null);/*from www. j a v a2 s .co m*/ assertNotNull(os); os.write(testData.getBytes()); os.close(); os = fs.create(new Path("/test/private/file2"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testData.getBytes()); os.close(); os = fs.create(new Path("/test/public/file3"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.READ), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testData.getBytes()); os.close(); os = fs.create(new Path("/test/public/file4"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.READ), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testData.getBytes()); os.close(); return null; } }); }
From source file:com.trendmicro.hdfs.webdav.test.TestGetSimple.java
License:Apache License
@BeforeClass public static void setup() throws Exception { Configuration conf = minicluster.getConfiguration(); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups", "users"); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts", "localhost"); conf.set("hadoop.webdav.authentication.type", "simple"); conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true); minicluster.startMiniCluster(gatewayUser); LOG.info("Gateway started on port " + minicluster.getGatewayPort()); FsPermission.setUMask(conf, new FsPermission((short) 0)); FileSystem fs = minicluster.getTestFileSystem(); Path path = new Path("/test"); assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]); ownerUser.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { FileSystem fs = minicluster.getTestFileSystem(); FSDataOutputStream os;// ww w. jav a 2s . c om os = fs.create(new Path("/test/pubdata"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testPublicData.getBytes()); os.close(); os = fs.create(new Path("/test/privdata"), new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testPrivateData.getBytes()); os.close(); return null; } }); }
From source file:com.trendmicro.hdfs.webdav.test.TestMkcolSimple.java
License:Apache License
@BeforeClass public static void setup() throws Exception { Configuration conf = minicluster.getConfiguration(); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups", "users"); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts", "localhost"); conf.set("hadoop.webdav.authentication.type", "simple"); conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true); minicluster.startMiniCluster(gatewayUser); LOG.info("Gateway started on port " + minicluster.getGatewayPort()); FsPermission.setUMask(conf, new FsPermission((short) 0)); FileSystem fs = minicluster.getTestFileSystem(); Path path = new Path("/test"); assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]); ownerUser.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { FileSystem fs = minicluster.getTestFileSystem(); assertTrue(fs.mkdirs(new Path("/test/private"), new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE))); assertTrue(fs.mkdirs(new Path("/test/public"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); return null; }//from ww w .j a v a2 s . c o m }); }
From source file:com.trendmicro.hdfs.webdav.test.TestMoveSimple.java
License:Apache License
@BeforeClass public static void setup() throws Exception { Configuration conf = minicluster.getConfiguration(); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".groups", "users"); conf.set("hadoop.proxyuser." + UserGroupInformation.getCurrentUser().getShortUserName() + ".hosts", "localhost"); conf.set("hadoop.webdav.authentication.type", "simple"); conf.setBoolean("hadoop.webdav.authentication.simple.anonymous.allowed", true); minicluster.startMiniCluster(gatewayUser); LOG.info("Gateway started on port " + minicluster.getGatewayPort()); FsPermission.setUMask(conf, new FsPermission((short) 0)); FileSystem fs = minicluster.getTestFileSystem(); Path path = new Path("/test"); assertTrue(fs.mkdirs(path, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); fs.setOwner(path, ownerUser.getShortUserName(), ownerUser.getGroupNames()[0]); ownerUser.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { FileSystem fs = minicluster.getTestFileSystem(); assertTrue(fs.mkdirs(new Path("/test/owner"), new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.NONE))); assertTrue(fs.mkdirs(new Path("/test/public"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))); FSDataOutputStream os = fs.create(new Path("/test/owner/file1"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null);/*from w w w . ja va 2 s.co m*/ assertNotNull(os); os.write(testData.getBytes()); os.close(); os = fs.create(new Path("/test/public/file1"), new FsPermission(FsAction.ALL, FsAction.READ, FsAction.NONE), true, 4096, (short) 1, 65536, null); assertNotNull(os); os.write(testData.getBytes()); os.close(); return null; } }); }