Example usage for org.apache.hadoop.conf Configuration setClassLoader

List of usage examples for org.apache.hadoop.conf Configuration setClassLoader

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setClassLoader.

Prototype

public void setClassLoader(ClassLoader classLoader) 

Source Link

Document

Set the class loader that will be used to load the various objects.

Usage

From source file:org.apache.lens.driver.hive.HiveDriver.java

License:Apache License

@Override
public HiveQueryPlan explain(AbstractQueryContext explainCtx) throws LensException {
    if (explainCtx.getDriverQuery(this) == null) {
        throw new NullPointerException("Null driver query for " + explainCtx.getUserQuery());
    }//from   w  ww  .  j a  va  2  s .  c o  m
    if (explainCtx.getDriverContext().getDriverQueryPlan(this) != null) {
        // explain called again and again
        return (HiveQueryPlan) explainCtx.getDriverContext().getDriverQueryPlan(this);
    }
    log.info("{} Explain: {}", getFullyQualifiedName(), explainCtx.getDriverQuery(this));
    Configuration explainConf = new Configuration(explainCtx.getDriverConf(this));
    explainConf.setClassLoader(explainCtx.getConf().getClassLoader());
    explainConf.setBoolean(LensConfConstants.QUERY_PERSISTENT_RESULT_INDRIVER, false);
    final String explainQuery = "EXPLAIN EXTENDED " + explainCtx.getDriverQuery(this);

    QueryContext explainQueryCtx = QueryContext.createContextWithSingleDriver(explainQuery,
            explainCtx.getSubmittedUser(), new LensConf(), explainConf, this,
            explainCtx.getLensSessionIdentifier(), false);

    // Get result set of explain
    InMemoryResultSet inMemoryResultSet = (InMemoryResultSet) execute(explainQueryCtx);
    List<String> explainOutput = new ArrayList<>();
    while (inMemoryResultSet.hasNext()) {
        explainOutput.add((String) inMemoryResultSet.next().getValues().get(0));
    }
    closeQuery(explainQueryCtx.getQueryHandle());
    try {
        hiveConf.setClassLoader(explainCtx.getConf().getClassLoader());
        HiveQueryPlan hqp = new HiveQueryPlan(explainOutput, null, hiveConf, calculateQueryCost(explainCtx));
        explainCtx.getDriverContext().setDriverQueryPlan(this, hqp);
        return hqp;
    } catch (HiveException e) {
        throw new LensException("Unable to create hive query plan", e);
    }
}

From source file:org.apache.lens.server.api.query.DriverSelectorQueryContext.java

License:Apache License

/**
 * Gets the driver query conf./*from  www  .j  av  a 2s.  com*/
 *
 * @param driver    the driver
 * @param queryConf the query conf
 * @return the final query conf
 */
private Configuration mergeConf(LensDriver driver, Configuration queryConf) {
    Configuration conf = new Configuration(driver.getConf());
    for (Map.Entry<String, String> entry : queryConf) {
        conf.set(entry.getKey(), entry.getValue());
    }
    conf.setClassLoader(queryConf.getClassLoader());
    return conf;
}

From source file:org.apache.lens.server.BaseLensService.java

License:Apache License

/**
 * Gets the lens conf.// ww  w .j  a  va 2s  . c om
 *
 * @param sessionHandle the session handle
 * @param conf          the conf
 * @return the lens conf
 * @throws LensException the lens exception
 */
public Configuration getLensConf(LensSessionHandle sessionHandle, LensConf conf) throws LensException {
    Configuration qconf = new Configuration(false);
    for (Map.Entry<String, String> entry : getSession(sessionHandle).getSessionConf()) {
        qconf.set(entry.getKey(), entry.getValue());
    }

    if (conf != null && !conf.getProperties().isEmpty()) {
        for (Map.Entry<String, String> entry : conf.getProperties().entrySet()) {
            qconf.set(entry.getKey(), entry.getValue());
        }
    }
    qconf.setClassLoader(getSession(sessionHandle).getClassLoader());
    return qconf;
}

From source file:org.apache.nifi.processors.hadoop.AbstractHadoopProcessor.java

License:Apache License

@Override
protected Collection<ValidationResult> customValidate(ValidationContext validationContext) {
    final String configResources = validationContext.getProperty(HADOOP_CONFIGURATION_RESOURCES)
            .evaluateAttributeExpressions().getValue();
    final String principal = validationContext.getProperty(kerberosProperties.getKerberosPrincipal())
            .evaluateAttributeExpressions().getValue();
    final String keytab = validationContext.getProperty(kerberosProperties.getKerberosKeytab())
            .evaluateAttributeExpressions().getValue();

    final List<ValidationResult> results = new ArrayList<>();

    if (!StringUtils.isBlank(configResources)) {
        try {/* w ww  .  j a va  2s  .c  o m*/
            ValidationResources resources = validationResourceHolder.get();

            // if no resources in the holder, or if the holder has different resources loaded,
            // then load the Configuration and set the new resources in the holder
            if (resources == null || !configResources.equals(resources.getConfigResources())) {
                getLogger().debug("Reloading validation resources");
                final Configuration config = new ExtendedConfiguration(getLogger());
                config.setClassLoader(Thread.currentThread().getContextClassLoader());
                resources = new ValidationResources(configResources,
                        getConfigurationFromResources(config, configResources));
                validationResourceHolder.set(resources);
            }

            final Configuration conf = resources.getConfiguration();
            results.addAll(KerberosProperties.validatePrincipalAndKeytab(this.getClass().getSimpleName(), conf,
                    principal, keytab, getLogger()));

        } catch (IOException e) {
            results.add(new ValidationResult.Builder().valid(false).subject(this.getClass().getSimpleName())
                    .explanation("Could not load Hadoop Configuration resources").build());
        }
    }

    return results;
}

From source file:org.apache.nifi.processors.hadoop.AbstractHadoopProcessor.java

License:Apache License

HdfsResources resetHDFSResources(String configResources, ProcessContext context) throws IOException {
    Configuration config = new ExtendedConfiguration(getLogger());
    config.setClassLoader(Thread.currentThread().getContextClassLoader());

    getConfigurationFromResources(config, configResources);

    // give sub-classes a chance to process configuration
    preProcessConfiguration(config, context);

    // first check for timeout on HDFS connection, because FileSystem has a hard coded 15 minute timeout
    checkHdfsUriForTimeout(config);//from w ww.j ava2s .  co  m

    // disable caching of Configuration and FileSystem objects, else we cannot reconfigure the processor without a complete
    // restart
    String disableCacheName = String.format("fs.%s.impl.disable.cache",
            FileSystem.getDefaultUri(config).getScheme());
    config.set(disableCacheName, "true");

    // If kerberos is enabled, create the file system as the kerberos principal
    // -- use RESOURCE_LOCK to guarantee UserGroupInformation is accessed by only a single thread at at time
    FileSystem fs;
    UserGroupInformation ugi;
    synchronized (RESOURCES_LOCK) {
        if (SecurityUtil.isSecurityEnabled(config)) {
            String principal = context.getProperty(kerberosProperties.getKerberosPrincipal())
                    .evaluateAttributeExpressions().getValue();
            String keyTab = context.getProperty(kerberosProperties.getKerberosKeytab())
                    .evaluateAttributeExpressions().getValue();
            ugi = SecurityUtil.loginKerberos(config, principal, keyTab);
            fs = getFileSystemAsUser(config, ugi);
        } else {
            config.set("ipc.client.fallback-to-simple-auth-allowed", "true");
            config.set("hadoop.security.authentication", "simple");
            ugi = SecurityUtil.loginSimple(config);
            fs = getFileSystemAsUser(config, ugi);
        }
    }
    getLogger().debug("resetHDFSResources UGI {}", new Object[] { ugi });

    final Path workingDir = fs.getWorkingDirectory();
    getLogger().info(
            "Initialized a new HDFS File System with working dir: {} default block size: {} default replication: {} config: {}",
            new Object[] { workingDir, fs.getDefaultBlockSize(workingDir), fs.getDefaultReplication(workingDir),
                    config.toString() });

    return new HdfsResources(config, fs, ugi);
}

From source file:org.apache.nutch.hostdb.UpdateHostDb.java

License:Apache License

private void updateHostDb(Path hostDb, Path crawlDb, Path topHosts, boolean checkFailed, boolean checkNew,
        boolean checkKnown, boolean force, boolean filter, boolean normalize) throws Exception {

    SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
    long start = System.currentTimeMillis();
    LOG.info("UpdateHostDb: starting at " + sdf.format(start));

    Job job = NutchJob.getInstance(getConf());
    Configuration conf = job.getConfiguration();
    boolean preserveBackup = conf.getBoolean("db.preserve.backup", true);
    job.setJarByClass(UpdateHostDb.class);
    job.setJobName("UpdateHostDb");

    FileSystem fs = hostDb.getFileSystem(conf);
    Path old = new Path(hostDb, "old");
    Path current = new Path(hostDb, "current");
    Path tempHostDb = new Path(hostDb, "hostdb-" + Integer.toString(new Random().nextInt(Integer.MAX_VALUE)));

    // lock an existing hostdb to prevent multiple simultaneous updates
    Path lock = new Path(hostDb, LOCK_NAME);
    if (!fs.exists(current)) {
        fs.mkdirs(current);/*from w  ww.j a v  a2 s  . c o m*/
    }
    LockUtil.createLockFile(fs, lock, false);

    MultipleInputs.addInputPath(job, current, SequenceFileInputFormat.class);

    if (topHosts != null) {
        MultipleInputs.addInputPath(job, topHosts, KeyValueTextInputFormat.class);
    }
    if (crawlDb != null) {
        // Tell the job we read from CrawlDB
        conf.setBoolean("hostdb.reading.crawldb", true);
        MultipleInputs.addInputPath(job, new Path(crawlDb, CrawlDb.CURRENT_NAME),
                SequenceFileInputFormat.class);
    }

    FileOutputFormat.setOutputPath(job, tempHostDb);

    job.setOutputFormatClass(SequenceFileOutputFormat.class);

    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(NutchWritable.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(HostDatum.class);
    job.setMapperClass(UpdateHostDbMapper.class);
    job.setReducerClass(UpdateHostDbReducer.class);
    job.setSpeculativeExecution(false);

    conf.setBoolean("mapreduce.fileoutputcommitter.marksuccessfuljobs", false);
    conf.setBoolean(HOSTDB_CHECK_FAILED, checkFailed);
    conf.setBoolean(HOSTDB_CHECK_NEW, checkNew);
    conf.setBoolean(HOSTDB_CHECK_KNOWN, checkKnown);
    conf.setBoolean(HOSTDB_FORCE_CHECK, force);
    conf.setBoolean(HOSTDB_URL_FILTERING, filter);
    conf.setBoolean(HOSTDB_URL_NORMALIZING, normalize);
    conf.setClassLoader(Thread.currentThread().getContextClassLoader());

    try {
        boolean success = job.waitForCompletion(true);
        if (!success) {
            String message = "UpdateHostDb job did not succeed, job status:" + job.getStatus().getState()
                    + ", reason: " + job.getStatus().getFailureInfo();
            LOG.error(message);
            NutchJob.cleanupAfterFailure(tempHostDb, lock, fs);
            throw new RuntimeException(message);
        }

        FSUtils.replace(fs, old, current, true);
        FSUtils.replace(fs, current, tempHostDb, true);

        if (!preserveBackup && fs.exists(old))
            fs.delete(old, true);
    } catch (Exception e) {
        LOG.error("UpdateHostDb job failed: {}", e.getMessage());
        NutchJob.cleanupAfterFailure(tempHostDb, lock, fs);
        throw e;
    }

    LockUtil.removeLockFile(fs, lock);
    long end = System.currentTimeMillis();
    LOG.info("UpdateHostDb: finished at " + sdf.format(end) + ", elapsed: "
            + TimingUtil.elapsedTime(start, end));
}

From source file:org.apache.pulsar.io.hdfs.AbstractHdfsConnector.java

License:Apache License

protected HdfsResources resetHDFSResources(HdfsSinkConfig hdfsSinkConfig) throws IOException {
    Configuration config = new ExtendedConfiguration();
    config.setClassLoader(Thread.currentThread().getContextClassLoader());

    getConfig(config, connectorConfig.getHdfsConfigResources());

    // first check for timeout on HDFS connection, because FileSystem has a hard coded 15 minute timeout
    checkHdfsUriForTimeout(config);//from  w  w w  .  ja  v  a 2s .co  m

    /* Disable caching of Configuration and FileSystem objects, else we cannot reconfigure
     * the processor without a complete restart
     */
    String disableCacheName = String.format("fs.%s.impl.disable.cache",
            FileSystem.getDefaultUri(config).getScheme());
    config.set(disableCacheName, "true");

    // If kerberos is enabled, create the file system as the kerberos principal
    // -- use RESOURCE_LOCK to guarantee UserGroupInformation is accessed by only a single thread at at time
    FileSystem fs;
    UserGroupInformation ugi;
    synchronized (RESOURCES_LOCK) {
        if (SecurityUtil.isSecurityEnabled(config)) {
            ugi = SecurityUtil.loginKerberos(config, connectorConfig.getKerberosUserPrincipal(),
                    connectorConfig.getKeytab());
            fs = getFileSystemAsUser(config, ugi);
        } else {
            config.set("ipc.client.fallback-to-simple-auth-allowed", "true");
            config.set("hadoop.security.authentication", "simple");
            ugi = SecurityUtil.loginSimple(config);
            fs = getFileSystemAsUser(config, ugi);
        }
    }
    return new HdfsResources(config, fs, ugi);
}

From source file:org.apache.sqoop.ConnFactory.java

License:Apache License

/**
 * If $SQOOP_CONF_DIR/managers.d/ exists and sqoop.connection.factories is
 * not set, then we look through the files in that directory; they should
 * contain lines of the form mgr.class.name[=/path/to/containing.jar].
 *
 * <p>/*from  www .  ja  v  a2  s. c o  m*/
 * Put all mgr.class.names into the Configuration, and load any specified
 * jars into the ClassLoader.
 * </p>
 *
 * @param conf the current configuration to populate with class names.
 * @return conf again, after possibly populating sqoop.connection.factories.
 */
private Configuration loadManagersFromConfDir(Configuration conf) {
    if (conf.get(FACTORY_CLASS_NAMES_KEY) != null) {
        LOG.debug(FACTORY_CLASS_NAMES_KEY + " is set; ignoring managers.d");
        return conf;
    }

    String confDirName = System.getenv("SQOOP_CONF_DIR");
    if (null == confDirName) {
        LOG.warn("$SQOOP_CONF_DIR has not been set in the environment. "
                + "Cannot check for additional configuration.");
        return conf;
    }

    File confDir = new File(confDirName);
    File mgrDir = new File(confDir, "managers.d");

    if (mgrDir.exists() && mgrDir.isDirectory()) {
        // We have a managers.d subdirectory. Get the file list, sort it,
        // and process them in order.
        String[] fileNames = mgrDir.list();
        Arrays.sort(fileNames);

        for (String fileName : fileNames) {
            File f = new File(mgrDir, fileName);
            if (f.isFile()) {
                addManagersFromFile(conf, f);
            }
        }

        // Add the default MF.
        addManager(conf, DEFAULT_FACTORY_CLASS_NAMES);
    }

    // Set the classloader in this configuration so that it will use
    // the jars we just loaded in.
    conf.setClassLoader(Thread.currentThread().getContextClassLoader());
    return conf;
}

From source file:org.apache.sqoop.tool.SqoopTool.java

License:Apache License

/**
 * If $SQOOP_CONF_DIR/tools.d/ exists and sqoop.tool.plugins is not set,
 * then we look through the files in that directory; they should contain
 * lines of the form 'plugin.class.name[=/path/to/containing.jar]'.
 *
 * <p>Put all plugin.class.names into the Configuration, and load any
 * specified jars into the ClassLoader./*  w  ww. j a va2s.com*/
 * </p>
 *
 * @param conf the current configuration to populate with class names.
 * @return conf again, after possibly populating sqoop.tool.plugins.
 */
private static Configuration loadPluginsFromConfDir(Configuration conf) {
    if (conf.get(TOOL_PLUGINS_KEY) != null) {
        LOG.debug(TOOL_PLUGINS_KEY + " is set; ignoring tools.d");
        return conf;
    }

    String confDirName = System.getenv("SQOOP_CONF_DIR");
    if (null == confDirName) {
        LOG.warn("$SQOOP_CONF_DIR has not been set in the environment. "
                + "Cannot check for additional configuration.");
        return conf;
    }

    File confDir = new File(confDirName);
    File toolsDir = new File(confDir, "tools.d");

    if (toolsDir.exists() && toolsDir.isDirectory()) {
        // We have a tools.d subdirectory. Get the file list, sort it,
        // and process them in order.
        String[] fileNames = toolsDir.list();
        Arrays.sort(fileNames);

        for (String fileName : fileNames) {
            File f = new File(toolsDir, fileName);
            if (f.isFile()) {
                loadPluginsFromFile(conf, f);
            }
        }
    }

    // Set the classloader in this configuration so that it will use
    // the jars we just loaded in.
    conf.setClassLoader(Thread.currentThread().getContextClassLoader());
    return conf;
}

From source file:org.deeplearning4j.hadoop.util.HdfsUtils.java

License:Apache License

public static void setJarFileFor(Configuration conf, Class<?> jarClass) {
    String jar = findJar(jarClass);
    conf.setClassLoader(Thread.currentThread().getContextClassLoader());
    conf.set("mapred.jar", jar);
}