Example usage for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY

List of usage examples for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY.

Prototype

String FS_DEFAULT_NAME_KEY

To view the source code for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY.

Click Source Link

Usage

From source file:org.apache.accumulo.start.classloader.vfs.providers.HdfsFileSystem.java

License:Apache License

/**
 * @see org.apache.commons.vfs2.provider.AbstractFileSystem#resolveFile(org.apache.commons.vfs2.FileName)
 *//*from w  ww.  j  a v a2 s . c  o m*/
@Override
public FileObject resolveFile(final FileName name) throws FileSystemException {

    synchronized (this) {
        if (null == this.fs) {
            final String hdfsUri = name.getRootURI();
            final Configuration conf = new Configuration(true);
            conf.set(org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY, hdfsUri);
            this.fs = null;
            try {
                fs = org.apache.hadoop.fs.FileSystem.get(conf);
            } catch (final IOException e) {
                log.error("Error connecting to filesystem " + hdfsUri, e);
                throw new FileSystemException("Error connecting to filesystem " + hdfsUri, e);
            }
        }
    }

    boolean useCache = (null != getContext().getFileSystemManager().getFilesCache());
    FileObject file;
    if (useCache) {
        file = this.getFileFromCache(name);
    } else {
        file = null;
    }
    if (null == file) {
        String path = null;
        try {
            path = URLDecoder.decode(name.getPath(), "UTF-8");
        } catch (final UnsupportedEncodingException e) {
            path = name.getPath();
        }
        final Path filePath = new Path(path);
        file = new HdfsFileObject((AbstractFileName) name, this, fs, filePath);
        if (useCache) {
            this.putFileToCache(file);
        }

    }

    /**
     * resync the file information if requested
     */
    if (getFileSystemManager().getCacheStrategy().equals(CacheStrategy.ON_RESOLVE)) {
        file.refresh();
    }

    return file;
}

From source file:org.apache.ambari.servicemonitor.jobs.BulkFileJobSubmitter.java

License:Apache License

private int exec() throws Exception {
    CommandLine commandLine = getCommandLine();
    Configuration conf = getConf();

    String outputdir = OptionHelper.getStringOption(commandLine, "o", "bulkjob");
    outputPath = new Path(outputdir);

    if (commandLine.hasOption('x')) {
        //delete the filesystem dir. This will 
        deleteOutputDirectories = true;//  w ww  .  j  a v  a 2  s .c  o m
    }
    jobs = OptionHelper.getIntOption(commandLine, "j", 1);
    int delay = OptionHelper.getIntOption(commandLine, "l", 1000);
    doneSignal = new CountDownLatch(jobs);

    templateConf = new JobConf(conf);

    String jtURI = MonitorUtils.extractJobTrackerParameter(templateConf);
    LOG.info("Submitting " + (jobs >= 0 ? jobs : "unlimited") + " jobs with a delay of " + delay + " millis"
            + " to JT " + jtURI + " and filesystem " + templateConf.get(FileSystem.FS_DEFAULT_NAME_KEY));

    int jobCount = 0;
    ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(POOL_SIZE);
    int toSubmit = jobs;
    long started, finished;
    started = System.currentTimeMillis();

    while (toSubmit > 0) {
        scheduler.submit(new JobWorker("instance-" + (++jobCount)));
        Thread.sleep(delay);
        toSubmit--;
    }
    LOG.info("All jobs scheduled in local queue");
    //here all the jobs are submitted, await their completion.
    doneSignal.await();
    finished = System.currentTimeMillis();
    int s = successes.get();
    int f = failures.get();
    long execDuration = totalExecDuration.get();
    long elapsedTime = finished - started;
    LOG.info("Completed. Successes = " + s + " out of " + jobs + " success rate= " + (s * 100) / (jobs) + "% "
            + " total execTime " + MonitorUtils.millisToHumanTime(execDuration) + " " + " elapsed Time "
            + MonitorUtils.millisToHumanTime(elapsedTime));

    return f == 0 ? 0 : 1;
}

From source file:org.apache.ambari.servicemonitor.unit.BaseLocalClusterTestCase.java

License:Apache License

/**
 * Create a Conf bonded to the DFS entry
 * @return a new conf file with fs.default.name set to the mini DFS cluster
 *///from w ww  .jav a2  s.c o  m
protected Configuration createDFSBondedConfiguration() {
    assertDFSCluster();
    Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, getDFSClusterURI());
    return conf;
}

From source file:org.apache.ambari.servicemonitor.unit.NonBlockingLsDirNoClusterTest.java

License:Apache License

/**
 * Verify the lsdir command with non blocking set will fail after the first attempt
 * if there is no cluster up// www.  j a v  a 2 s  . c o m
 * @throws Throwable
 */
@Test
public void testClientRunNoCluster() throws Throwable {
    LsDir lsDir = new LsDir();
    Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:8020");
    int outcome = ToolRunnerPlus.exec(conf, lsDir, "-d", "/", "--attempts", "1");
    LOG.info(lsDir.toString());
    assertTrue("operation returned success", 0 != outcome);
}

From source file:org.apache.ambari.servicemonitor.unit.TestGenericParser.java

License:Apache License

@Test
public void testCreate() throws Throwable {
    GenericOptionsParser parser = createParser("-jt", JT, "-fs", FS);
    Configuration configuration = parser.getConfiguration();
    assertConfigHas(configuration, HadoopKeys.MAPRED_JOB_TRACKER, JT);
    assertConfigHas(configuration, FileSystem.FS_DEFAULT_NAME_KEY, FS);
}

From source file:org.apache.ambari.servicemonitor.unit.TestGenericParser.java

License:Apache License

@Test
public void testToolRunnerArgs() throws Throwable {
    TestTool tool = runTool("-jt", JT, "-fs", FS, "-D", "a=b");
    Configuration configuration = tool.getConf();
    assertConfigHas(configuration, JobKeys.MAPRED_JOB_TRACKER, JT);
    assertConfigHas(configuration, FileSystem.FS_DEFAULT_NAME_KEY, FS);
    assertConfigHas(configuration, "a", "b");
}

From source file:org.apache.ambari.servicemonitor.utils.DFSUtils.java

License:Apache License

public static MiniMRCluster createMRCluster(JobConf conf, String fsURI) throws IOException {
    String logdir = System.getProperty("java.io.tmpdir") + "/mrcluster/logs";
    System.setProperty("hadoop.log.dir", logdir);
    conf.set("hadoop.job.history.location", "file:///" + logdir + "/history");
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, fsURI);
    return new MiniMRCluster(3, fsURI, 1, null, null, conf);
}

From source file:org.apache.commons.vfs2.provider.hdfs.test.HdfsFileProviderTest.java

License:Apache License

@BeforeClass
public static void setUp() throws Exception {
    Logger.getRootLogger().setLevel(Level.ERROR);

    // Put the MiniDFSCluster directory in the target directory
    File data = new File("target/test/hdfstestdata").getAbsoluteFile();
    data.mkdirs();/*ww  w.  ja va2s.com*/
    System.setProperty("test.build.data", data.toString());
    FileUtils.cleanDirectory(data);

    // Setup HDFS
    conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, HDFS_URI);
    conf.set("hadoop.security.token.service.use_ip", "true");
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024 * 1024); // 1M blocksize

    setUmask(conf);

    cluster = new MiniDFSCluster(PORT, conf, 1, true, true, true, null, null, null, null);
    cluster.waitActive();

    // Set up the VFS
    manager = new DefaultFileSystemManager();
    manager.addProvider("hdfs", new HdfsFileProvider());
    manager.init();
    hdfs = cluster.getFileSystem();
}

From source file:org.apache.drill.exec.cache.TestWriteToDisk.java

License:Apache License

@Test
@SuppressWarnings("static-method")
public void test() throws Exception {
    final List<ValueVector> vectorList = Lists.newArrayList();
    final DrillConfig config = DrillConfig.create();
    try (final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
            final Drillbit bit = new Drillbit(config, serviceSet)) {
        bit.run();//  w  ww .j av a 2s.c  o m
        final DrillbitContext context = bit.getContext();

        final MaterializedField intField = MaterializedField.create("int",
                Types.required(TypeProtos.MinorType.INT));
        final MaterializedField binField = MaterializedField.create("binary",
                Types.required(TypeProtos.MinorType.VARBINARY));
        try (final IntVector intVector = (IntVector) TypeHelper.getNewVector(intField, context.getAllocator());
                final VarBinaryVector binVector = (VarBinaryVector) TypeHelper.getNewVector(binField,
                        context.getAllocator())) {
            AllocationHelper.allocate(intVector, 4, 4);
            AllocationHelper.allocate(binVector, 4, 5);
            vectorList.add(intVector);
            vectorList.add(binVector);

            intVector.getMutator().setSafe(0, 0);
            binVector.getMutator().setSafe(0, "ZERO".getBytes());
            intVector.getMutator().setSafe(1, 1);
            binVector.getMutator().setSafe(1, "ONE".getBytes());
            intVector.getMutator().setSafe(2, 2);
            binVector.getMutator().setSafe(2, "TWO".getBytes());
            intVector.getMutator().setSafe(3, 3);
            binVector.getMutator().setSafe(3, "THREE".getBytes());
            intVector.getMutator().setValueCount(4);
            binVector.getMutator().setValueCount(4);

            VectorContainer container = new VectorContainer();
            container.addCollection(vectorList);
            container.setRecordCount(4);
            WritableBatch batch = WritableBatch.getBatchNoHVWrap(container.getRecordCount(), container, false);
            VectorAccessibleSerializable wrap = new VectorAccessibleSerializable(batch, context.getAllocator());

            Configuration conf = new Configuration();
            conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");

            final VectorAccessibleSerializable newWrap = new VectorAccessibleSerializable(
                    context.getAllocator());
            try (final FileSystem fs = FileSystem.get(conf)) {
                final File tempDir = Files.createTempDir();
                tempDir.deleteOnExit();
                final Path path = new Path(tempDir.getAbsolutePath(), "drillSerializable");
                try (final FSDataOutputStream out = fs.create(path)) {
                    wrap.writeToStream(out);
                    out.close();
                }

                try (final FSDataInputStream in = fs.open(path)) {
                    newWrap.readFromStream(in);
                }
            }

            final VectorAccessible newContainer = newWrap.get();
            for (VectorWrapper<?> w : newContainer) {
                try (ValueVector vv = w.getValueVector()) {
                    int values = vv.getAccessor().getValueCount();
                    for (int i = 0; i < values; i++) {
                        final Object o = vv.getAccessor().getObject(i);
                        if (o instanceof byte[]) {
                            System.out.println(new String((byte[]) o));
                        } else {
                            System.out.println(o);
                        }
                    }
                }
            }
        }
    }
}

From source file:org.apache.drill.exec.client.DumpCatTest.java

License:Apache License

@Test
public void testDumpCat(@Injectable final DrillbitContext bitContext,
        @Injectable UserClientConnection connection) throws Throwable {

    mockDrillbitContext(bitContext);// w w  w .j a v  a  2  s.c  om

    final PhysicalPlanReader reader = defaultPhysicalPlanReader(c);
    final PhysicalPlan plan = reader.readPhysicalPlan(
            Files.toString(FileUtils.getResourceAsFile("/trace/simple_trace.json"), Charsets.UTF_8));
    final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
    final FragmentContext context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(),
            connection, registry);
    final SimpleRootExec exec = new SimpleRootExec(
            ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));

    while (exec.next()) {
    }

    if (context.getFailureCause() != null) {
        throw context.getFailureCause();
    }
    assertTrue(!context.isFailed());

    exec.close();

    FragmentHandle handle = context.getHandle();

    /* Form the file name to which the trace output will dump the record batches */
    String qid = QueryIdHelper.getQueryId(handle.getQueryId());

    final int majorFragmentId = handle.getMajorFragmentId();
    final int minorFragmentId = handle.getMinorFragmentId();

    final String logLocation = c.getString(ExecConstants.TRACE_DUMP_DIRECTORY);

    System.out.println("Found log location: " + logLocation);

    final String filename = String.format("%s//%s_%d_%d_mock-scan", logLocation, qid, majorFragmentId,
            minorFragmentId);

    System.out.println("File Name: " + filename);

    final Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, c.getString(ExecConstants.TRACE_DUMP_FILESYSTEM));

    final FileSystem fs = FileSystem.get(conf);
    final Path path = new Path(filename);
    assertTrue("Trace file does not exist", fs.exists(path));

    final DumpCat dumpCat = new DumpCat();

    //Test Query mode
    try (final FileInputStream input = new FileInputStream(filename)) {
        dumpCat.doQuery(input);
    }

    //Test Batch mode
    try (final FileInputStream input = new FileInputStream(filename)) {
        dumpCat.doBatch(input, 0, true);
    }
}