Example usage for org.apache.hadoop.conf Configuration addDefaultResource

List of usage examples for org.apache.hadoop.conf Configuration addDefaultResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addDefaultResource.

Prototype

public static synchronized void addDefaultResource(String name) 

Source Link

Document

Add a default resource.

Usage

From source file:de.zib.sfs.StatisticsFileSystemContractTest.java

License:BSD License

protected StatisticsFileSystemContractTest(String fileSystemUri) {
    this.fileSystemPath = new Path(fileSystemUri);

    Configuration.addDefaultResource("hadoop/core-site.xml");
    this.conf = new HdfsConfiguration();
    this.conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, FileSystemContractBaseTest.TEST_UMASK);

    // most test use tiny block sizes, so disable minimum block size
    this.conf.set(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, "0");

    // set NameNode and DataNode directories
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
}

From source file:gobblin.util.HadoopUtils.java

License:Apache License

/**
 * Add "gobblin-site.xml" as a {@link Configuration} resource.
 *//*w  w w .j a va  2  s  . c  om*/
public static void addGobblinSite() {
    Configuration.addDefaultResource("gobblin-site.xml");
}

From source file:io.apigee.lembos.node.types.ConfigurationWrap.java

License:Apache License

/**
 * Wraps {@link Configuration#addDefaultResource(String)}.
 *
 * @param ctx the JavaScript context (unused)
 * @param thisObj the 'this' object of the caller
 * @param args the arguments for the call
 * @param func the function called (unused)
 *//* w  w  w  .j  a v a2  s  .  c  om*/
@JSStaticFunction
public static void addDefaultResource(final Context ctx, final Scriptable thisObj, final Object[] args,
        final Function func) {
    if (args.length == 0 || !JavaScriptUtils.isDefined(args[0])) {
        throw Utils.makeError(ctx, thisObj, LembosMessages.ONE_ARG_EXPECTED);
    }

    Configuration.addDefaultResource(args[0].toString());
}

From source file:org.apache.eagle.storage.hbase.tools.CoprocessorTool.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    Options cmdOptions = new Options();
    cmdOptions.addOption(new Option("register", false, "Register coprocessor"));
    cmdOptions.addOption(new Option("unregister", false, "Unregister coprocessor"));

    cmdOptions.addOption("table", true,
            "HBase table name, separated with comma, for example, table1,table2,..");
    cmdOptions.addOption("jar", true, "Coprocessor target jar path");
    cmdOptions.addOption("localJar", true, "Coprocessor local source jar path");
    cmdOptions.addOption("config", true, "Configuration file");

    cmdOptions.getOption("table").setType(String.class);
    cmdOptions.getOption("table").setRequired(true);
    cmdOptions.getOption("jar").setType(String.class);
    cmdOptions.getOption("jar").setRequired(false);
    cmdOptions.getOption("localJar").setType(String.class);
    cmdOptions.getOption("localJar").setRequired(false);
    cmdOptions.getOption("config").setType(String.class);
    cmdOptions.getOption("config").setRequired(false);

    GnuParser parser = new GnuParser();
    CommandLine cmdCli = parser.parse(cmdOptions, args);
    String tableName = cmdCli.getOptionValue("table");
    String configFile = cmdCli.getOptionValue("config");

    if (configFile != null) {
        Configuration.addDefaultResource(configFile);
    }/*  w ww  .  ja v a2 s .  com*/

    if (cmdCli.hasOption("register")) {
        if (args.length < 3) {
            System.err.println("Error: coprocessor jar path is missing");
            System.err.println("Usage: java " + CoprocessorTool.class.getName() + " enable " + tableName
                    + " [jarOnHdfs] [jarOnLocal]");
            return 1;
        }
        String jarPath = cmdCli.getOptionValue("jar");
        LOGGER.info("Table name: {}", tableName);
        LOGGER.info("Coprocessor jar on hdfs: {}", jarPath);
        String localJarPath = cmdCli.getOptionValue("localJar");
        LOGGER.info("Coprocessor jar on local: {}", localJarPath);

        String[] tableNames = tableName.split(",\\s*");
        for (String table : tableNames) {
            LOGGER.info("Registering coprocessor for table {}", table);
            registerCoprocessor(jarPath, table, localJarPath);
        }
    } else if (cmdCli.hasOption("unregister")) {
        unregisterCoprocessor(tableName);
    } else {
        System.err.println("command is required, --register/--unregister");
        printHelpMessage(cmdOptions);
    }
    return 0;
}

From source file:org.apache.giraph.comm.TestMessageStores.java

License:Apache License

@Before
public void prepare() throws IOException {
    directory = Files.createTempDir();

    Configuration.addDefaultResource("giraph-site.xml");
    GiraphConfiguration initConfig = new GiraphConfiguration();
    initConfig.setComputationClass(IntNoOpComputation.class);
    GiraphConstants.MESSAGES_DIRECTORY.set(initConfig, new File(directory, "giraph_messages").toString());
    config = new ImmutableClassesGiraphConfiguration<IntWritable, IntWritable, IntWritable>(initConfig);

    testData = new TestData();
    testData.maxId = 1000000;/*from   w  w w  .j a v  a2s.co m*/
    testData.maxMessage = 1000000;
    testData.maxNumberOfMessages = 100;
    testData.numVertices = 50;
    testData.numTimes = 10;
    testData.numOfPartitions = 5;
    testData.maxMessagesInMemory = 20;

    service = MockUtils.mockServiceGetVertexPartitionOwner(testData.numOfPartitions);
}

From source file:org.apache.hcatalog.hbase.snapshot.RevisionManagerConfiguration.java

License:Apache License

public static Configuration addResources(Configuration conf) {
    conf.addDefaultResource("revision-manager-default.xml");
    conf.addResource("revision-manager-site.xml");
    return conf;/*from w  ww. j  a v a 2s  . co  m*/
}

From source file:org.apache.hoya.tools.ConfigHelper.java

License:Apache License

/**
 * Register a resource as a default resource.
 * Do not attempt to use this unless you understand that the
 * order in which default resources are loaded affects the outcome,
 * and that subclasses of Configuration often register new default
 * resources// w  w w  . j  a v  a 2s .  co m
 * @param resource the resource name
 * @return the URL or null
 */
public static URL registerDefaultResource(String resource) {
    URL resURL = ConfigHelper.class.getClassLoader().getResource(resource);
    if (resURL != null) {
        Configuration.addDefaultResource(resource);
    }
    return resURL;
}

From source file:org.apache.oozie.action.hadoop.LauncherMapper.java

License:Apache License

/**
 * Pushing all important conf to hadoop conf for the action
 *///from  w  w  w  .  j a v a2s  .com
private void propagateToHadoopConf() throws IOException {
    Configuration propagationConf = new Configuration(false);
    if (System.getProperty(OOZIE_ACTION_ID) != null) {
        propagationConf.set(OOZIE_ACTION_ID, System.getProperty(OOZIE_ACTION_ID));
    }
    if (System.getProperty(OOZIE_JOB_ID) != null) {
        propagationConf.set(OOZIE_JOB_ID, System.getProperty(OOZIE_JOB_ID));
    }
    if (System.getProperty(OOZIE_LAUNCHER_JOB_ID) != null) {
        propagationConf.set(OOZIE_LAUNCHER_JOB_ID, System.getProperty(OOZIE_LAUNCHER_JOB_ID));
    }

    // loading action conf prepared by Oozie
    Configuration actionConf = LauncherMain.loadActionConf();

    if (actionConf.get(LauncherMainHadoopUtils.CHILD_MAPREDUCE_JOB_TAGS) != null) {
        propagationConf.set(LauncherMain.MAPREDUCE_JOB_TAGS,
                actionConf.get(LauncherMainHadoopUtils.CHILD_MAPREDUCE_JOB_TAGS));
    }

    propagationConf.writeXml(new FileWriter(PROPAGATION_CONF_XML));
    Configuration.dumpConfiguration(propagationConf, new OutputStreamWriter(System.out));
    Configuration.addDefaultResource(PROPAGATION_CONF_XML);
}

From source file:org.apache.sentry.tests.e2e.hdfs.TestHDFSIntegration.java

License:Apache License

private static void startDFSandYARN() throws IOException, InterruptedException {
    adminUgi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override/*www.j  av a2s  . c o  m*/
        public Void run() throws Exception {
            System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
            hadoopConf = new HdfsConfiguration();
            hadoopConf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
                    SentryINodeAttributesProvider.class.getName());
            hadoopConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
            hadoopConf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
            File dfsDir = assertCreateDir(new File(baseDir, "dfs"));
            hadoopConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
            hadoopConf.set("hadoop.security.group.mapping", MiniDFS.PseudoGroupMappingService.class.getName());
            Configuration.addDefaultResource("test.xml");

            hadoopConf.set("sentry.authorization-provider.hdfs-path-prefixes", MANAGED_PREFIXES);
            hadoopConf.set("sentry.authorization-provider.cache-refresh-retry-wait.ms", "5000");
            hadoopConf.set("sentry.authorization-provider.cache-refresh-interval.ms",
                    String.valueOf(CACHE_REFRESH));

            hadoopConf.set("sentry.authorization-provider.cache-stale-threshold.ms",
                    String.valueOf(STALE_THRESHOLD));

            hadoopConf.set("sentry.hdfs.service.security.mode", "none");
            hadoopConf.set("sentry.hdfs.service.client.server.rpc-addresses", "localhost");
            hadoopConf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort));
            EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
            miniDFS = new MiniDFSCluster.Builder(hadoopConf).build();
            Path tmpPath = new Path("/tmp");
            Path hivePath = new Path("/user/hive");
            Path warehousePath = new Path(hivePath, "warehouse");
            miniDFS.getFileSystem().mkdirs(warehousePath);
            boolean directory = miniDFS.getFileSystem().isDirectory(warehousePath);
            LOGGER.info("\n\n Is dir :" + directory + "\n\n");
            LOGGER.info("\n\n DefaultFS :" + miniDFS.getFileSystem().getUri() + "\n\n");
            fsURI = miniDFS.getFileSystem().getUri().toString();
            hadoopConf.set("fs.defaultFS", fsURI);

            // Create Yarn cluster
            // miniMR = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

            miniDFS.getFileSystem().mkdirs(tmpPath);
            miniDFS.getFileSystem().setPermission(tmpPath, FsPermission.valueOf("drwxrwxrwx"));
            miniDFS.getFileSystem().setOwner(hivePath, "hive", "hive");
            miniDFS.getFileSystem().setOwner(warehousePath, "hive", "hive");
            LOGGER.info("\n\n Owner :" + miniDFS.getFileSystem().getFileStatus(warehousePath).getOwner() + ", "
                    + miniDFS.getFileSystem().getFileStatus(warehousePath).getGroup() + "\n\n");
            LOGGER.info("\n\n Owner tmp :" + miniDFS.getFileSystem().getFileStatus(tmpPath).getOwner() + ", "
                    + miniDFS.getFileSystem().getFileStatus(tmpPath).getGroup() + ", "
                    + miniDFS.getFileSystem().getFileStatus(tmpPath).getPermission() + ", " + "\n\n");

            int dfsSafeCheckRetry = 30;
            boolean hasStarted = false;
            for (int i = dfsSafeCheckRetry; i > 0; i--) {
                if (!miniDFS.getFileSystem().isInSafeMode()) {
                    hasStarted = true;
                    LOGGER.info("HDFS safemode check num times : " + (31 - i));
                    break;
                }
            }
            if (!hasStarted) {
                throw new RuntimeException("HDFS hasnt exited safe mode yet..");
            }

            return null;
        }
    });
}

From source file:org.apache.sentry.tests.e2e.hive.fs.MiniDFS.java

License:Apache License

private void createMiniDFSCluster(File baseDir, String serverType, boolean enableHDFSAcls) throws Exception {
    Configuration conf = new Configuration();
    if (HiveServer2Type.InternalMetastore.name().equalsIgnoreCase(serverType)) {
        // set the test group mapping that maps user to a group of same name
        conf.set("hadoop.security.group.mapping",
                "org.apache.sentry.tests.e2e.hive.fs.MiniDFS$PseudoGroupMappingService");
        // set umask for metastore test client can create tables in the warehouse dir
        conf.set("fs.permissions.umask-mode", "000");
        Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(conf);
    }/*from   ww  w. jav a  2s .  c  o m*/
    File dfsDir = assertCreateDir(new File(baseDir, "dfs"));
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
    conf.set("hadoop.security.group.mapping", MiniDFS.PseudoGroupMappingService.class.getName());
    if (enableHDFSAcls) {
        conf.set("dfs.namenode.acls.enabled", "true");
    }
    Configuration.addDefaultResource("test.xml");
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    fileSystem = dfsCluster.getFileSystem();
    String policyDir = System.getProperty("sentry.e2etest.hive.policy.location", "/user/hive/sentry");
    sentryDir = super.assertCreateDfsDir(new Path(fileSystem.getUri() + policyDir));
    dfsBaseDir = assertCreateDfsDir(new Path(new Path(fileSystem.getUri()), "/base"));
}