Example usage for org.apache.hadoop.fs FileSystem getWorkingDirectory

List of usage examples for org.apache.hadoop.fs FileSystem getWorkingDirectory

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getWorkingDirectory.

Prototype

public abstract Path getWorkingDirectory();

Source Link

Document

Get the current working directory for the given FileSystem

Usage

From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java

License:Open Source License

private void testWorkingdirectory() throws Exception {
    FileSystem fs = FileSystem.get(getHadoopConf());
    Path workingDir = fs.getWorkingDirectory();
    fs.close();//  ww  w  . j  a  v a2s.c om

    Configuration conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    Path hoopWorkingDir = fs.getWorkingDirectory();
    fs.close();
    Assert.assertEquals(hoopWorkingDir.toUri().getPath(), workingDir.toUri().getPath());

    conf = new Configuration();
    conf.set("fs.http.impl", HoopFileSystem.class.getName());
    fs = FileSystem.get(getJettyURL().toURI(), conf);
    fs.setWorkingDirectory(new Path("/tmp"));
    workingDir = fs.getWorkingDirectory();
    fs.close();
    Assert.assertEquals(workingDir.toUri().getPath(), new Path("/tmp").toUri().getPath());
}

From source file:com.datasalt.utils.mapred.joiner.MultiJoiner.java

License:Apache License

private void addChanneledInputInner(Integer channel, Path location, Class<? extends Object> channelClass,
        Class<? extends InputFormat> inputFormat, Class<? extends MultiJoinChanneledMapper> mapper)
        throws IOException {

    FileSystem fS = location.getFileSystem(getJob().getConfiguration());
    if (!location.toString().startsWith("/")) {
        // relative path
        location = new Path(fS.getWorkingDirectory(), location);
    } else {/*from  w  ww . j  av  a  2  s .com*/
        // absolute path
        location = new Path(fS.getUri() + location.toString());
    }
    addInOrder(channel + "", MultiJoinChanneledMapper.MULTIJOINER_CHANNELED_CHANNELS,
            getJob().getConfiguration());
    addInOrder(location.toString(), MultiJoinChanneledMapper.MULTIJOINER_CHANNELED_FILES,
            getJob().getConfiguration());
    System.out.println("Adding file " + location + " with mapper " + mapper.getName());
    MultipleInputs.addInputPath(getJob(), location, inputFormat, mapper);
}

From source file:com.google.mr4c.sources.MapFileSourceLocalTest.java

License:Open Source License

@Before
public void setUp() throws Exception {
    FileSystem fs = FileSystem.getLocal(new Configuration());
    m_src = new MapFileSource(fs, new Path(fs.getWorkingDirectory(), m_dir));
    m_tester = new ArchiveSourceTester();
}

From source file:com.ibm.jaql.JaqlScriptTestCase.java

License:Apache License

protected void runScript(Mode mode) throws Exception {
    String testLabel = script + "." + mode;
    try {//from  ww  w.j a  v a2  s.c  om
        String runMode = System.getProperty("test." + mode, "true");
        if (!runMode.equals("true")) {
            System.err.println("\nSkipping disabled jaql test " + testLabel + " (test." + mode + "=" + runMode
                    + " != true)\n");
            return;
        }

        String jaqlHome = System.getProperty("jaql.home", ".");
        jaqlHome = new File(jaqlHome).getAbsolutePath().toString().replace('\\', '/') + "/";

        String scriptDir = jaqlHome + getScriptDir();
        String[] moduleDirs = getModuleDirs();
        String queriesName = scriptDir + script + "Queries.txt";
        String goldName = scriptDir + testLabel + ".gold";

        if (!new File(goldName).exists()) {
            // look for the mode-independent gold file
            if (mode == Mode.COUNT) {
                System.err.println("\nSkipping jaql count test " + testLabel + " (no gold file)\n");
                return;
            }
            goldName = scriptDir + script + ".gold";
            if (!new File(goldName).exists()) {
                Assert.fail("\nNo gold file for jaql test " + testLabel + "at path: " + goldName);
                return;
            }
        }

        System.err.println("\nRunning jaql test " + testLabel + "\n");

        String outDir = jaqlHome + "build/test/";
        String workDir = outDir + "run." + testLabel + "/";
        String outName = workDir + testLabel + ".out";
        new File(workDir).mkdirs();

        // Set the default directories
        System.setProperty("jaql.local.dir", workDir);
        Configuration conf = new Configuration();
        LocalFileSystem lfs = FileSystem.getLocal(conf);
        lfs.setWorkingDirectory(new Path(workDir));
        FileSystem fs = FileSystem.get(conf);
        if (!(fs instanceof LocalFileSystem)) {
            String user = UnixUserGroupInformation.login(conf).getUserName();
            fs.setWorkingDirectory(new Path("/temp/" + user + "/com.ibm.jaql/test/" + script));
        }
        // mapred.working.dir is automatically set from the fs, but only once. 
        // When running multiple tests in the same JVM, it only picks up the first setting.
        if (Globals.getJobConf() != null) {
            Globals.getJobConf().setWorkingDirectory(fs.getWorkingDirectory());
        }

        // make tests work the same on windows as unix.
        System.setProperty("line.separator", "\n");
        final FastPrintWriter resultStream = new FastPrintWriter(new FileWriter(outName));
        Reader queryReader = new InputStreamReader(new FileInputStream(queriesName), "UTF-8");
        queryReader = new EchoedReader(queryReader, new FastPrintStream(System.err));
        queryReader = new EchoedReader(queryReader, resultStream);

        ClassLoaderMgr.reset();

        Jaql jaql = new Jaql(queriesName, queryReader);
        jaql.setModulePath(moduleDirs);

        if (mode == Mode.COUNT) {
            final Class<?>[] exprsToCount = new Class<?>[] { AbstractReadExpr.class, AbstractWriteExpr.class,
                    MapReduceBaseExpr.class, };
            jaql.setExplainHandler(new CountExplainHandler(resultStream, exprsToCount));
            jaql.setExplainOnly(true);
        } else if (mode == Mode.DECOMPILE) {
            jaql.setExplainHandler(new DecompileExplainHandler(System.err));
            jaql.setExplainOnly(true);
        }

        jaql.setExceptionHandler(new TestExceptionHandler(resultStream, jaqlHome));
        jaql.enableRewrite(mode != Mode.NO_REWRITE);
        boolean schemaPrinting = "schemaPrinting".equals(script);
        jaql.setJaqlPrinter(new TestPrinter(resultStream, schemaPrinting));

        String extJar = getExtensionJar();
        if (extJar != null)
            jaql.addJar(jaqlHome + extJar);
        jaql.setVar(DATADIR_NAME, DATADIR_VALUE);

        // run the script
        jaql.run();

        // finish up
        jaql.close();
        queryReader.close();
        resultStream.close();

        // compare with expected output
        boolean diff = compareResults(outName, goldName);
        if (diff) {
            String msg = "Found differences during jaql test " + testLabel;
            System.err.println("\n" + msg);
            Assert.fail(msg);
        }

        System.err.println("\nSuccessfully ran jaql test " + testLabel + "\n");
    } catch (Exception e) {
        e.printStackTrace(System.err);
        System.err.println("\n\nFailure of jaql test " + testLabel);
        Assert.fail(e.getMessage());
    }
}

From source file:com.inmobi.conduit.local.LocalStreamServiceTest.java

License:Apache License

private void createMockForFileSystem(FileSystem fs, Cluster cluster) throws Exception {
    FileStatus[] files = createTestData(2, "/conduit/data/stream", true);

    FileStatus[] stream1 = createTestData(2, "/conduit/data/stream1/collector", true);

    FileStatus[] stream3 = createTestData(NUMBER_OF_FILES, "/conduit/data/stream1/collector1/file", true);

    FileStatus[] stream4 = createTestData(NUMBER_OF_FILES, "/conduit/data/stream1/collector2/file", true);

    FileStatus[] stream2 = createTestData(2, "/conduit/data/stream2/collector", true);

    FileStatus[] stream5 = createTestData(NUMBER_OF_FILES, "/conduit/data/stream2/collector1/file", true);

    FileStatus[] stream6 = createTestData(NUMBER_OF_FILES, "/conduit/data/stream2/collector2/file", true);

    when(fs.getWorkingDirectory()).thenReturn(new Path("/tmp/"));
    when(fs.getUri()).thenReturn(new URI("localhost"));
    when(fs.listStatus(cluster.getDataDir())).thenReturn(files);
    when(fs.listStatus(new Path("/conduit/data/stream1"))).thenReturn(stream1);

    when(fs.listStatus(new Path("/conduit/data/stream1/collector1"),
            any(LocalStreamService.CollectorPathFilter.class))).thenReturn(stream3);
    when(fs.listStatus(new Path("/conduit/data/stream2"))).thenReturn(stream2);
    when(fs.listStatus(new Path("/conduit/data/stream1/collector2"),
            any(LocalStreamService.CollectorPathFilter.class))).thenReturn(stream4);
    when(fs.listStatus(new Path("/conduit/data/stream2/collector1"),
            any(LocalStreamService.CollectorPathFilter.class))).thenReturn(stream5);
    when(fs.listStatus(new Path("/conduit/data/stream2/collector2"),
            any(LocalStreamService.CollectorPathFilter.class))).thenReturn(stream6);

    Path file = mock(Path.class);
    when(file.makeQualified(any(FileSystem.class))).thenReturn(new Path("/conduit/data/stream1/collector1/"));
}

From source file:com.inmobi.databus.local.LocalStreamServiceTest.java

License:Apache License

private void createMockForFileSystem(FileSystem fs, Cluster cluster) throws Exception {
    FileStatus[] files = createTestData(2, "/databus/data/stream", true);

    FileStatus[] stream1 = createTestData(2, "/databus/data/stream1/collector", true);

    FileStatus[] stream3 = createTestData(number_files, "/databus/data/stream1/collector1/file", true);

    FileStatus[] stream4 = createTestData(number_files, "/databus/data/stream1/collector2/file", true);

    FileStatus[] stream2 = createTestData(2, "/databus/data/stream2/collector", true);

    FileStatus[] stream5 = createTestData(number_files, "/databus/data/stream2/collector1/file", true);

    FileStatus[] stream6 = createTestData(number_files, "/databus/data/stream2/collector2/file", true);

    when(fs.getWorkingDirectory()).thenReturn(new Path("/tmp/"));
    when(fs.getUri()).thenReturn(new URI("localhost"));
    when(fs.listStatus(cluster.getDataDir())).thenReturn(files);
    when(fs.listStatus(new Path("/databus/data/stream1"))).thenReturn(stream1);

    when(fs.listStatus(new Path("/databus/data/stream1/collector1"), any(CollectorPathFilter.class)))
            .thenReturn(stream3);/* w w  w.j  a v a 2  s.c  o m*/
    when(fs.listStatus(new Path("/databus/data/stream2"))).thenReturn(stream2);
    when(fs.listStatus(new Path("/databus/data/stream1/collector2"), any(CollectorPathFilter.class)))
            .thenReturn(stream4);
    when(fs.listStatus(new Path("/databus/data/stream2/collector1"), any(CollectorPathFilter.class)))
            .thenReturn(stream5);
    when(fs.listStatus(new Path("/databus/data/stream2/collector2"), any(CollectorPathFilter.class)))
            .thenReturn(stream6);

    Path file = mock(Path.class);
    when(file.makeQualified(any(FileSystem.class))).thenReturn(new Path("/databus/data/stream1/collector1/"));
}

From source file:com.inmobi.grid.fs.s4fs.NativeS4FileSystem.java

License:Apache License

/**
 * fs.default.name in conf is the HDFS store which has credential file for
 * s3n in /user/<name> with bucket name.crd
 * the .crd file contains access:secret in a single line.
 *//*ww w.j ava2s. c  om*/
@Override
public void initialize(URI uri, Configuration conf) throws IOException {

    this.uri = uri;

    if (new Path(conf.get("fs.default.name")).toUri().getScheme().equals("s4")) {
        // currently illegal to set fs.default.name to s4;
        // without this, below code causes recursive call.
        return;
    }

    FileSystem fs = FileSystem.get(conf);
    Path nnWorkingDir = fs.getHomeDirectory();

    if (!fs.exists(nnWorkingDir)) {
        throw new IOException("Users home directory does not exist: " + fs.getWorkingDirectory());
    }

    String scheme = uri.getScheme();
    String bucket = uri.getAuthority();

    Path credFile = new Path(nnWorkingDir, bucket + ".crd");
    if (!fs.exists(credFile)) {
        throw new IOException(credFile.toString() + " does not exists");
    }

    StringBuilder sb = new StringBuilder(getCredentialFromFile(fs, credFile)).append("@").append(bucket);
    String bucketWithAccess = uri.toString().replaceFirst(scheme, "s3n");
    bucketWithAccess = bucketWithAccess.replaceFirst(bucket, sb.toString());

    super.initialize(new Path(bucketWithAccess).toUri(), conf);

}

From source file:com.scaleoutsoftware.soss.hserver.hadoop.DistributedCacheManager.java

License:Apache License

/**
 * Set up the distributed cache by localizing the resources, and updating
 * the configuration with references to the localized resources.
 * @param conf job configuration//from  ww w .  j  av a 2 s . co m
 * @throws IOException
 */
public void setup(Configuration conf) throws IOException {
    //If we are not 0th worker, wait for 0th worker to set up the cache
    if (InvocationWorker.getIgWorkerIndex() > 0 && InvocationWorker.getNumberOfWorkers() > 1) {
        try {
            InvocationWorker.getSynchronizationBarrier().waitForComplete(ACTION_NAME, SYNCHRONIZATION_WAIT_MS,
                    WAIT_GRANULARITY_MS);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
        return;
    }

    File workDir = new File(System.getProperty("user.dir"));

    // Generate YARN local resources objects corresponding to the distributed
    // cache configuration
    Map<String, LocalResource> localResources = new LinkedHashMap<String, LocalResource>();
    MRApps.setupDistributedCache(conf, localResources);

    //CODE CHANGE FROM ORIGINAL FILE:
    //We need to clear the resources from jar files, since they are distributed through the IG.
    //
    Iterator<Map.Entry<String, LocalResource>> iterator = localResources.entrySet().iterator();
    while (iterator.hasNext()) {
        Entry<String, LocalResource> entry = iterator.next();
        if (entry.getKey().endsWith(".jar")) {
            iterator.remove();
        }
    }

    // Generating unique numbers for FSDownload.

    AtomicLong uniqueNumberGenerator = new AtomicLong(System.currentTimeMillis());

    // Find which resources are to be put on the local classpath
    Map<String, Path> classpaths = new HashMap<String, Path>();
    Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf);
    if (archiveClassPaths != null) {
        for (Path p : archiveClassPaths) {
            FileSystem remoteFS = p.getFileSystem(conf);
            p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
            classpaths.put(p.toUri().getPath().toString(), p);
        }
    }

    Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf);
    if (fileClassPaths != null) {
        for (Path p : fileClassPaths) {
            FileSystem remoteFS = p.getFileSystem(conf);
            p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
            classpaths.put(p.toUri().getPath().toString(), p);
        }
    }

    // Localize the resources
    LocalDirAllocator localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR);
    FileContext localFSFileContext = FileContext.getLocalFSFileContext();
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

    ExecutorService exec = null;
    try {
        ThreadFactory tf = new ThreadFactoryBuilder()
                .setNameFormat("LocalDistributedCacheManager Downloader #%d").build();
        exec = Executors.newCachedThreadPool(tf);
        Path destPath = localDirAllocator.getLocalPathForWrite(".", conf);
        Map<LocalResource, Future<Path>> resourcesToPaths = Maps.newHashMap();
        for (LocalResource resource : localResources.values()) {
            Callable<Path> download = new FSDownload(localFSFileContext, ugi, conf,
                    new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())), resource);
            Future<Path> future = exec.submit(download);
            resourcesToPaths.put(resource, future);
        }
        for (Entry<String, LocalResource> entry : localResources.entrySet()) {
            LocalResource resource = entry.getValue();
            Path path;
            try {
                path = resourcesToPaths.get(resource).get();
            } catch (InterruptedException e) {
                throw new IOException(e);
            } catch (ExecutionException e) {
                throw new IOException(e);
            }
            String pathString = path.toUri().toString();
            String link = entry.getKey();
            String target = new File(path.toUri()).getPath();
            symlink(workDir, target, link);

            if (resource.getType() == LocalResourceType.ARCHIVE) {
                localArchives.add(pathString);
            } else if (resource.getType() == LocalResourceType.FILE) {
                localFiles.add(pathString);
            } else if (resource.getType() == LocalResourceType.PATTERN) {
                //PATTERN is not currently used in local mode
                throw new IllegalArgumentException(
                        "Resource type PATTERN is not " + "implemented yet. " + resource.getResource());
            }
            Path resourcePath;
            try {
                resourcePath = ConverterUtils.getPathFromYarnURL(resource.getResource());
            } catch (URISyntaxException e) {
                throw new IOException(e);
            }
            LOG.info(String.format("Localized %s as %s", resourcePath, path));
            String cp = resourcePath.toUri().getPath();
            if (classpaths.keySet().contains(cp)) {
                localClasspaths.add(path.toUri().getPath().toString());
            }
        }
    } finally {
        if (exec != null) {
            exec.shutdown();
        }
    }
    // Update the configuration object with localized data.
    if (!localArchives.isEmpty()) {
        conf.set(MRJobConfig.CACHE_LOCALARCHIVES,
                StringUtils.arrayToString(localArchives.toArray(new String[localArchives.size()])));
    }
    if (!localFiles.isEmpty()) {
        conf.set(MRJobConfig.CACHE_LOCALFILES,
                StringUtils.arrayToString(localFiles.toArray(new String[localArchives.size()])));
    }
    setupCalled = true;

    //If we are  0th worker, signal action complete
    if (InvocationWorker.getIgWorkerIndex() == 0 && InvocationWorker.getNumberOfWorkers() > 1) {
        try {
            InvocationWorker.getSynchronizationBarrier().signalComplete(ACTION_NAME);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }

}

From source file:com.splout.db.dnode.TestFetcher.java

License:Open Source License

@Test
public void testHdfsFetching() throws IOException, URISyntaxException, InterruptedException {
    Configuration conf = new Configuration();
    FileSystem fS = FileSystem.getLocal(conf);

    SploutConfiguration testConfig = SploutConfiguration.getTestConfig();
    testConfig.setProperty(FetcherProperties.TEMP_DIR, "tmp-dir-" + TestFetcher.class.getName());
    Fetcher fetcher = new Fetcher(testConfig);

    Path path = new Path("tmp-" + TestFetcher.class.getName());
    OutputStream oS = fS.create(path);
    oS.write("This is what happens when you don't know what to write".getBytes());
    oS.close();//  w w  w .j a  va2s. c o  m

    File f = fetcher.fetch(new Path(fS.getWorkingDirectory(), path.getName()).toUri().toString());

    assertTrue(f.exists());
    assertTrue(f.isDirectory());

    File file = new File(f, "tmp-" + TestFetcher.class.getName());
    assertTrue(file.exists());

    assertEquals("This is what happens when you don't know what to write",
            Files.toString(file, Charset.defaultCharset()));

    fS.delete(path, true);
    FileUtils.deleteDirectory(f);
}

From source file:com.splout.db.dnode.TestFetcher.java

License:Open Source License

@Test
public void testHdfsFetchingInterrupted() throws IOException, URISyntaxException, InterruptedException {
    Configuration conf = new Configuration();
    final FileSystem fS = FileSystem.getLocal(conf);

    SploutConfiguration testConfig = SploutConfiguration.getTestConfig();
    testConfig.setProperty(FetcherProperties.TEMP_DIR, "tmp-dir-" + TestFetcher.class.getName());
    final Fetcher fetcher = new Fetcher(testConfig);

    final Path path = new Path("tmp-" + TestFetcher.class.getName());
    OutputStream oS = fS.create(path);
    oS.write("This is what happens when you don't know what to write".getBytes());
    oS.close();/*from   w w  w .  jav  a2 s.co  m*/

    Thread t = new Thread() {
        @Override
        public void run() {
            try {
                try {
                    File f = fetcher
                            .fetch(new Path(fS.getWorkingDirectory(), path.getName()).toUri().toString());
                } catch (IOException e) {
                    e.printStackTrace();
                } catch (URISyntaxException e) {
                    e.printStackTrace();
                }
                fail("An InterruptedException was expected.");
            } catch (InterruptedException e) {
                // Everything good.
            }
        }
    };
    // We interrupt the thread before starting so we are sure that the interruption check
    // will be seen even if the file to copy is very small.
    t.interrupt();
    t.start();
}