Example usage for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY

List of usage examples for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY.

Prototype

String FS_DEFAULT_NAME_KEY

To view the source code for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY.

Click Source Link

Usage

From source file:com.splicemachine.si.testsetup.HBaseSITestEnv.java

License:Apache License

private void startCluster(Configuration conf) throws Exception {
    int basePort = getNextBasePort();
    // -> MapR work-around
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
    conf.set("fs.default.name", "file:///");
    conf.set("fs.hdfs.client", "org.apache.hadoop.hdfs.DistributedFileSystem");
    System.setProperty("zookeeper.sasl.client", "false");
    System.setProperty("zookeeper.sasl.serverconfig", "fake");
    // <- MapR work-around
    conf.setInt("hbase.master.port", basePort);
    conf.setInt("hbase.master.info.port", basePort + 1);
    conf.setInt("hbase.regionserver.port", basePort + 2);
    conf.setInt("hbase.regionserver.info.port", basePort + 3);

    testUtility = new HBaseTestingUtility(conf);

    Configuration configuration = testUtility.getConfiguration();
    // -> MapR work-around
    configuration.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
    configuration.set("fs.default.name", "file:///");
    configuration.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
    configuration.set("fs.hdfs.client", "org.apache.hadoop.hdfs.DistributedFileSystem");
    System.setProperty("zookeeper.sasl.client", "false");
    System.setProperty("zookeeper.sasl.serverconfig", "fake");
    // <- MapR work-around
    configuration.setInt("hbase.master.port", basePort);
    configuration.setInt("hbase.master.info.port", basePort + 1);
    configuration.setInt("hbase.regionserver.port", basePort + 2);
    configuration.setInt("hbase.regionserver.info.port", basePort + 3);
    if (FileSystem.class.getProtectionDomain().getCodeSource().getLocation().getPath().contains("mapr")) {
        testUtility.startMiniCluster(1);
    } else {/*  w w w .ja v a  2 s  .c  o m*/
        testUtility.startMiniZKCluster();
        testUtility.startMiniHBaseCluster(1, 1, null, null, false);
    }
    ZkUtils.getZkManager().initialize(HConfiguration.getConfiguration());
    ZkUtils.initializeZookeeper();
}

From source file:com.splicemachine.test.SpliceTestPlatformConfig.java

License:Apache License

public static Configuration create(String hbaseRootDirUri, Integer masterPort, Integer masterInfoPort,
        Integer regionServerPort, Integer regionServerInfoPort, Integer derbyPort, boolean failTasksRandomly) {

    Configuration config = HConfiguration.unwrapDelegate();

    config.set(SQLConfiguration.STORAGE_FACTORY_HOME, hbaseRootDirUri);

    //// w  ww. j a  v a 2  s . c  o  m
    // Coprocessors
    //
    config.set("hbase.coprocessor.regionserver.classes", getRegionServerCoprocessorsAsString());
    config.set("hbase.coprocessor.region.classes", getRegionCoprocessorsAsString());
    config.set("hbase.coprocessor.master.classes", getMasterCoprocessorsAsString());

    //
    // Networking
    //
    config.set("hbase.zookeeper.quorum", "127.0.0.1:2181");
    config.setInt("hbase.master.port", masterPort);
    config.setInt("hbase.master.info.port", masterInfoPort);
    config.setInt("hbase.regionserver.port", regionServerPort);
    config.setInt("hbase.regionserver.info.port", regionServerInfoPort);
    config.setInt("hbase.master.jmx.port", HConfiguration.DEFAULT_JMX_BIND_PORT); // this is set because the HBase master and regionserver are running on the same machine and in the same JVM
    config.setInt(SQLConfiguration.NETWORK_BIND_PORT, derbyPort);
    config.setClass(DefaultStoreEngine.DEFAULT_COMPACTOR_CLASS_KEY, SpliceDefaultCompactor.class,
            Compactor.class);
    // config.setClass(ConsistencyControlUtils.MVCC_IMPL, SIMultiVersionConsistencyControl.class, ConsistencyControl.class);
    config.setClass(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, SpliceDefaultCompactionPolicy.class,
            CompactionPolicy.class);

    //
    // Networking -- interfaces
    //
    // force use of loop back interface on MacOSX, else don't set it
    //        if (System.getProperty("os.name").contains("Mac") ) {
    //            String interfaceName = "lo0";
    //            config.set("hbase.zookeeper.dns.interface", interfaceName);
    //            config.set("hbase.master.dns.interface", interfaceName);
    //            config.set("hbase.regionserver.dns.interface", interfaceName);
    //        }

    //
    // File System
    //
    config.set("fs.defaultFS", "file:///"); // MapR Hack, tells it local filesystem // fs.default.name is deprecated
    config.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
    config.setDouble("yarn.nodemanager.resource.io-spindles", 2.0);
    config.set("fs.default.name", "file:///");
    config.set("yarn.nodemanager.container-executor.class",
            "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor");

    // Must allow Cygwin instance to config its own rootURI
    if (!"CYGWIN".equals(hbaseRootDirUri)) {
        config.set("hbase.rootdir", hbaseRootDirUri);
    }

    //
    // Threads, timeouts
    //
    config.setLong("hbase.rpc.timeout", MINUTES.toMillis(2));
    config.setLong("hbase.client.scanner.timeout.period", MINUTES.toMillis(2)); // hbase.regionserver.lease.period is deprecated
    config.setLong("hbase.client.operation.timeout", MINUTES.toMillis(2));
    config.setLong("hbase.regionserver.handler.count", 200);
    config.setLong("hbase.regionserver.msginterval", 1000);
    config.setLong("hbase.master.event.waiting.time", 20);
    config.setLong("hbase.master.lease.thread.wakefrequency", SECONDS.toMillis(3));
    //        config.setBoolean("hbase.master.loadbalance.bytable",true);
    config.setInt("hbase.balancer.period", 5000);

    config.setLong("hbase.server.thread.wakefrequency", SECONDS.toMillis(1));
    config.setLong("hbase.client.pause", 100);

    //
    // Compaction Controls
    //
    config.setLong("hbase.hstore.compaction.min", 5); // min number of eligible files before we compact
    config.setLong("hbase.hstore.compaction.max", 10); // max files to be selected for a single minor compaction
    config.setLong("hbase.hstore.compaction.min.size", 16 * MiB); // store files smaller than this will always be eligible for minor compaction.  HFiles this size or larger are evaluated by hbase.hstore.compaction.ratio to determine if they are eligible
    config.setLong("hbase.hstore.compaction.max.size", 248 * MiB); // store files larger than this will be excluded from compaction
    config.setFloat("hbase.hstore.compaction.ratio", 1.25f); // default is 1.2f, at one point we had this set to 0.25f and 25f (which was likely a typo)

    //
    // Memstore, store files, splits
    //
    config.setLong(HConstants.HREGION_MAX_FILESIZE, 32 * MiB); // hbase.hregion.max.filesize
    config.setLong("hbase.hregion.memstore.flush.size", 128 * MiB); // was 512 MiB
    config.setLong("hbase.hregion.memstore.block.multiplier", 4);
    config.setFloat("hbase.regionserver.global.memstore.size", 0.25f); // set mem store to 25% of heap
    config.setLong("hbase.hstore.blockingStoreFiles", 20);
    //        config.set("hbase.regionserver.region.split.policy", "org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy"); // change default split policy.  this makes more sense for a standalone/single regionserver

    // Support SI
    //config.setClass(HConstants.MVCC_IMPL, SIMultiVersionConsistencyControl.class, ConsistencyControl.class);

    //
    // HFile
    //
    config.setInt("hfile.index.block.max.size", 16 * 1024); // 16KiB
    config.setFloat("hfile.block.cache.size", 0.25f); // set block cache to 25% of heap
    config.setFloat("io.hfile.bloom.error.rate", (float) 0.005);
    config.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, true); // hfile.block.bloom.cacheonwrite
    //config.set("hbase.master.hfilecleaner.plugins", getHFileCleanerAsString());
    config.set("hbase.master.hfilecleaner.plugins", getHFileCleanerAsString());
    //
    // Misc
    //
    config.set("hbase.cluster.distributed", "true"); // don't start zookeeper for us
    config.set("hbase.master.distributed.log.splitting", "false"); // TODO: explain why we are setting this

    // AWS Credentials for test...
    //

    config.set(ACCESS_KEY, "AKIAJ6HBMCK5ALHVBFPQ");
    config.set(SECRET_KEY, "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");

    //
    // Splice
    //

    config.setLong("splice.ddl.drainingWait.maximum", SECONDS.toMillis(15)); // wait 15 seconds before bailing on bad ddl statements
    config.setLong("splice.ddl.maxWaitSeconds", 120000);
    //
    // Snapshots
    //
    config.setBoolean("hbase.snapshot.enabled", true);

    HConfiguration.reloadConfiguration(config);
    return HConfiguration.unwrapDelegate();
}

From source file:com.splicemachine.test.SpliceTestYarnPlatform.java

License:Apache License

private void configForTesting() throws URISyntaxException {
    yarnSiteConfigURL = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
    if (yarnSiteConfigURL == null) {
        throw new RuntimeException("Could not find 'yarn-site.xml' file in classpath");
    } else {//from  w w  w.  jav  a2s .  c  o  m
        LOG.info("Found 'yarn-site.xml' at " + yarnSiteConfigURL.toURI().toString());
    }

    conf = new YarnConfiguration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
    conf.setDouble("yarn.nodemanager.resource.io-spindles", 2.0);
    conf.set("fs.default.name", "file:///");
    conf.set("yarn.nodemanager.container-executor.class",
            "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor");
    System.setProperty("zookeeper.sasl.client", "false");
    System.setProperty("zookeeper.sasl.serverconfig", "fake");

    conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, DEFAULT_HEARTBEAT_INTERVAL);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    conf.set("yarn.application.classpath", new File(yarnSiteConfigURL.getPath()).getParent());
}

From source file:com.thinkbiganalytics.kylo.catalog.file.DefaultCatalogFileManager.java

License:Apache License

/**
 * Constructs a {@code CatalogFileManager} using the specified Kylo data directory.
 *//*from www .j  a v a  2s. c o  m*/
@Autowired
public DefaultCatalogFileManager(@Nonnull final PathValidator pathValidator) {
    this.pathValidator = pathValidator;

    defaultConf = new Configuration();
    defaultConf.size(); // causes defaults to be loaded
    defaultConf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///"); // Spark uses file:/// as default FileSystem
}

From source file:common.NameNode.java

License:Apache License

public static InetSocketAddress getAddress(Configuration conf) {
    URI filesystemURI = FileSystem.getDefaultUri(conf);
    String authority = filesystemURI.getAuthority();
    if (authority == null) {
        throw new IllegalArgumentException(
                String.format("Invalid URI for NameNode address (check %s): %s has no authority.",
                        FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
    }/*from w  ww.  j  a v  a2  s.c  o  m*/
    if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(filesystemURI.getScheme())) {
        throw new IllegalArgumentException(
                String.format("Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
                        FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(), FSConstants.HDFS_URI_SCHEME));
    }
    return getAddress(authority);
}

From source file:eu.edisonproject.classification.tfidf.mapreduce.CompetencesDistanceDriver.java

License:Apache License

@Override
public int run(String[] args) {
    try {/*from   w w w. j  av  a  2 s  .co m*/
        Configuration conf = HBaseConfiguration.create();
        //additional output using TextOutputFormat.
        conf.set("file.names", args[3]);

        Job job = Job.getInstance(conf);
        //TableMapReduceUtil.addDependencyJars(job); 
        job.setJarByClass(CompetencesDistanceDriver.class);
        //This row must be changed
        job.setJobName("Words Group By Title Driver");

        Path inPath = new Path(args[0]);
        Path outPath = new Path(args[1]);

        Path competencesPath = new Path(args[2]);
        Path competencesPathHDFS = competencesPath;
        FileSystem fs = FileSystem.get(conf);

        if (!conf.get(FileSystem.FS_DEFAULT_NAME_KEY).startsWith("file")) {
            competencesPathHDFS = new Path(competencesPath.getName());
            if (!fs.exists(competencesPathHDFS)) {
                fs.mkdirs(competencesPathHDFS);
                File[] stats = new File(competencesPath.toString()).listFiles();
                for (File stat : stats) {
                    Path filePath = new Path(stat.getAbsolutePath());
                    if (FilenameUtils.getExtension(filePath.getName()).endsWith("csv")) {
                        Path dest = new Path(competencesPathHDFS.toUri() + "/" + filePath.getName());
                        fs.copyFromLocalFile(filePath, dest);
                    }
                }
            }
        }
        job.addCacheFile(competencesPathHDFS.toUri());

        FileInputFormat.setInputPaths(job, inPath);

        FileOutputFormat.setOutputPath(job, outPath);
        fs.delete(outPath, true);

        job.setMapperClass(CompetencesDistanceMapper.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        job.setReducerClass(CompetencesDistanceReducer.class);
        //            job.setOutputFormatClass(TableOutputFormat.class);
        //            job.getConfiguration().set(TableOutputFormat.OUTPUT_TABLE, "jobpostcompetence");
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        String[] fileNames = args[3].split(",");
        for (String n : fileNames) {
            MultipleOutputs.addNamedOutput(job, n, TextOutputFormat.class, Text.class, Text.class);
        }

        return (job.waitForCompletion(true) ? 0 : 1);
    } catch (IOException | IllegalStateException | IllegalArgumentException | InterruptedException
            | ClassNotFoundException ex) {
        Logger.getLogger(CompetencesDistanceDriver.class.getName()).log(Level.SEVERE, null, ex);
    }
    return 0;
}

From source file:eu.edisonproject.classification.tfidf.mapreduce.TermWordFrequency.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    Configuration jobconf = getConf();
    Job job = Job.getInstance(jobconf);//  w w  w  .  j  a  v a2 s  . c  o m
    FileSystem fs = FileSystem.get(jobconf);
    fs.delete(new Path(args[1]), true);
    Path dictionary = new Path(args[0]);
    Path dictionaryHdfs = dictionary;

    Path localDocs = new Path(args[2]);
    Path hdfsDocs = localDocs;

    Path stopwordsLocal = new Path(args[3]);
    Path stopwordsHDFS = stopwordsLocal;
    if (!jobconf.get(FileSystem.FS_DEFAULT_NAME_KEY).startsWith("file")) {
        dictionaryHdfs = new Path(dictionary.getName());
        if (!fs.exists(dictionaryHdfs)) {
            fs.copyFromLocalFile(dictionary, dictionaryHdfs);
        }
        hdfsDocs = new Path(localDocs.getName());
        fs.mkdirs(hdfsDocs);
        fs.deleteOnExit(hdfsDocs);

        File[] stats = new File(localDocs.toString()).listFiles();

        for (File stat : stats) {
            Path filePath = new Path(stat.getAbsolutePath());
            if (FilenameUtils.getExtension(filePath.getName()).endsWith("txt")) {
                Path dest = new Path(hdfsDocs.toUri() + "/" + filePath.getName());
                fs.copyFromLocalFile(filePath, dest);
            }
        }
        stopwordsHDFS = new Path(stopwordsLocal.getName());
        if (!fs.exists(stopwordsHDFS)) {
            fs.copyFromLocalFile(stopwordsLocal, stopwordsHDFS);
        }
    }

    FileStatus stopwordsStatus = fs.getFileStatus(stopwordsHDFS);
    stopwordsHDFS = stopwordsStatus.getPath();
    job.addCacheFile(stopwordsHDFS.toUri());

    job.addCacheFile(hdfsDocs.toUri());

    job.setJarByClass(TermWordFrequency.class);
    job.setJobName("Word Frequency Term Driver");

    FileInputFormat.setInputPaths(job, dictionaryHdfs);
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    //        job.setInputFormatClass(TextInputFormat.class);
    job.setInputFormatClass(NLineInputFormat.class);
    NLineInputFormat.addInputPath(job, dictionaryHdfs);
    NLineInputFormat.setNumLinesPerSplit(job, Integer.valueOf(args[4]));
    NLineInputFormat.setMaxInputSplitSize(job, 500);

    job.setMapperClass(TermWordFrequencyMapper.class);

    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Integer.class);
    job.setReducerClass(TermWordFrequencyReducer.class);

    return (job.waitForCompletion(true) ? 0 : 1);

}

From source file:eu.edisonproject.classification.tfidf.mapreduce.WordFrequencyInDocDriver.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    //        itemset = new LinkedList<String>();
    //        BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(args[2])));
    //        String line;
    //        while ((line = br.readLine()) != null) {
    //            String[] components = line.split("/");
    //            itemset.add(components[0]);
    //        }/*from w ww .  j a v  a2s  .  c  o  m*/
    Configuration conf = getConf();

    Job job = Job.getInstance(conf);
    job.setJarByClass(WordFrequencyInDocDriver.class);
    job.setJobName("Word Frequency In Doc Driver");

    FileSystem fs = FileSystem.get(conf);
    fs.delete(new Path(args[1]), true);
    Path in = new Path(args[0]);
    Path inHdfs = in;

    Path dictionaryLocal = new Path(args[2]);
    Path dictionaryHDFS = dictionaryLocal;

    Path stopwordsLocal = new Path(args[3]);
    Path stopwordsHDFS = stopwordsLocal;

    if (!conf.get(FileSystem.FS_DEFAULT_NAME_KEY).startsWith("file")) {
        inHdfs = new Path(in.getName());
        fs.delete(inHdfs, true);
        fs.copyFromLocalFile(in, inHdfs);
        fs.deleteOnExit(inHdfs);

        dictionaryHDFS = new Path(dictionaryLocal.getName());
        if (!fs.exists(dictionaryHDFS)) {
            fs.copyFromLocalFile(dictionaryLocal, dictionaryHDFS);
        }
        stopwordsHDFS = new Path(stopwordsLocal.getName());
        if (!fs.exists(stopwordsHDFS)) {
            fs.copyFromLocalFile(stopwordsLocal, stopwordsHDFS);
        }
    }

    FileStatus dictionaryStatus = fs.getFileStatus(dictionaryHDFS);
    dictionaryHDFS = dictionaryStatus.getPath();
    job.addCacheFile(dictionaryHDFS.toUri());

    FileStatus stopwordsStatus = fs.getFileStatus(stopwordsHDFS);
    stopwordsHDFS = stopwordsStatus.getPath();
    job.addCacheFile(stopwordsHDFS.toUri());

    FileInputFormat.setInputPaths(job, inHdfs);
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    job.setInputFormatClass(AvroKeyInputFormat.class);
    job.setMapperClass(WordFrequencyInDocMapper.class);
    AvroJob.setInputKeySchema(job, Document.getClassSchema());
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Integer.class);
    job.setReducerClass(WordFrequencyInDocReducer.class);
    return (job.waitForCompletion(true) ? 0 : 1);
}

From source file:eu.edisonproject.training.tfidf.mapreduce.TermWordFrequency.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    Configuration jobconf = getConf();

    FileSystem fs = FileSystem.get(jobconf);
    fs.delete(new Path(args[1]), true);
    Path in = new Path(args[0]);
    Path inHdfs = in;//from  ww  w . j  av  a 2s  . com
    if (!jobconf.get(FileSystem.FS_DEFAULT_NAME_KEY).startsWith("file")) {
        inHdfs = new Path(in.getName());
        fs.delete(inHdfs, true);
        fs.copyFromLocalFile(in, inHdfs);
        fs.deleteOnExit(inHdfs);
        FileStatus inHdfsStatus = fs.getFileStatus(inHdfs);
        //            Logger.getLogger(TermWordFrequency.class.getName()).log(Level.INFO, "Copied: {0} to: {1}", new Object[]{in.toUri(), inHdfsStatus.getPath().toUri()});
    }

    Job job = Job.getInstance(jobconf);
    Path stopwordsLocal = new Path(args[3]);
    stopwords = new Path(stopwordsLocal.getName());
    fs.delete(stopwords, true);
    fs.copyFromLocalFile(stopwordsLocal, stopwords);
    fs.deleteOnExit(stopwords);

    FileStatus stopwordsStatus = fs.getFileStatus(stopwords);
    stopwords = stopwordsStatus.getPath();
    job.addCacheFile(stopwords.toUri());

    Path localDocs = new Path(args[2]);
    Path hdfsDocs = new Path(localDocs.getName());
    fs.mkdirs(hdfsDocs);
    hdfsDocs = fs.getFileStatus(hdfsDocs).getPath();
    fs.delete(hdfsDocs, true);
    //        FileStatus[] stats = fs.listStatus(localDocs);
    File[] stats = new File(localDocs.toString()).listFiles();

    for (File stat : stats) {
        //        for (FileStatus stat : stats) {
        Path filePath = new Path(stat.getAbsolutePath());
        if (FilenameUtils.getExtension(filePath.getName()).endsWith("txt")) {
            Path dest = new Path(hdfsDocs.toUri() + "/" + filePath.getName());
            fs.copyFromLocalFile(filePath, dest);
        }
    }

    job.addCacheFile(hdfsDocs.toUri());

    job.setJarByClass(TermWordFrequency.class);
    job.setJobName("Word Frequency Term Driver");

    FileInputFormat.setInputPaths(job, inHdfs);
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    //         job.setInputFormatClass(TextInputFormat.class);
    job.setInputFormatClass(NLineInputFormat.class);
    NLineInputFormat.addInputPath(job, inHdfs);
    NLineInputFormat.setNumLinesPerSplit(job, Integer.valueOf(args[4]));
    NLineInputFormat.setMaxInputSplitSize(job, 500);
    Logger.getLogger(TermWordFrequency.class.getName()).log(Level.INFO, "Num. of lines: {0}",
            NLineInputFormat.getNumLinesPerSplit(job));

    job.setMapperClass(TermWordFrequencyMapper.class);

    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Integer.class);
    job.setReducerClass(TermWordFrequencyReducer.class);

    return (job.waitForCompletion(true) ? 0 : 1);

}

From source file:org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader.java

License:Apache License

public static ClassLoader getClassLoader() throws IOException {
    ReloadingClassLoader localLoader = loader;
    while (null == localLoader) {
        synchronized (lock) {
            if (null == loader) {

                FileSystemManager vfs = generateVfs();

                // Set up the 2nd tier class loader
                if (null == parent) {
                    parent = AccumuloClassLoader.getClassLoader();
                }//from   w  ww .  j  a  va2 s. co m

                FileObject[] vfsCP = resolve(vfs,
                        AccumuloClassLoader.getAccumuloString(VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY, ""));

                if (vfsCP.length == 0) {
                    localLoader = createDynamicClassloader(parent);
                    loader = localLoader;
                    return localLoader.getClassLoader();
                }

                // Create the Accumulo Context ClassLoader using the DEFAULT_CONTEXT
                localLoader = createDynamicClassloader(new VFSClassLoader(vfsCP, vfs, parent));
                loader = localLoader;

                // An HDFS FileSystem and Configuration object were created for each unique HDFS namespace in the call to resolve above.
                // The HDFS Client did us a favor and cached these objects so that the next time someone calls FileSystem.get(uri), they
                // get the cached object. However, these objects were created not with the system VFS classloader, but the classloader above
                // it. We need to override the classloader on the Configuration objects. Ran into an issue were log recovery was being attempted
                // and SequenceFile$Reader was trying to instantiate the key class via WritableName.getClass(String, Configuration)
                for (FileObject fo : vfsCP) {
                    if (fo instanceof HdfsFileObject) {
                        String uri = fo.getName().getRootURI();
                        Configuration c = new Configuration(true);
                        c.set(FileSystem.FS_DEFAULT_NAME_KEY, uri);
                        FileSystem fs = FileSystem.get(c);
                        fs.getConf().setClassLoader(loader.getClassLoader());
                    }
                }

            }
        }
    }

    return localLoader.getClassLoader();
}