Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:org.apache.hive.http.HttpServer.java

License:Apache License

/**
 * Does the user sending the HttpServletRequest have the administrator ACLs? If
 * it isn't the case, response will be modified to send an error to the user.
 *
 * @param servletContext//from   w w w  .j ava2s . c  o  m
 * @param request
 * @param response used to send the error response if user does not have admin access.
 * @return true if admin-authorized, false otherwise
 * @throws IOException
 */
static boolean hasAdministratorAccess(ServletContext servletContext, HttpServletRequest request,
        HttpServletResponse response) throws IOException {
    Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
    // If there is no authorization, anybody has administrator access.
    if (!conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
        return true;
    }

    String remoteUser = request.getRemoteUser();
    if (remoteUser == null) {
        response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
                "Unauthenticated users are not " + "authorized to access this page.");
        return false;
    }

    if (servletContext.getAttribute(ADMINS_ACL) != null
            && !userHasAdministratorAccess(servletContext, remoteUser)) {
        response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
                "User " + remoteUser + " is unauthorized to access this page.");
        return false;
    }

    return true;
}

From source file:org.apache.hoya.tools.ConfigHelperTest.java

License:Apache License

@Test
public void testHBaseProviderTemplates() throws IOException {
    Configuration conf = loadConf(HBaseKeys.HBASE_TEMPLATE_RESOURCE);
    Assert.assertTrue(conf.getBoolean("hbase.cluster.distributed", false));
}

From source file:org.apache.hoya.tools.HoyaUtils.java

License:Apache License

/**
 * Flag to indicate whether the cluster is in secure mode
 * @param conf configuration to look at//from   ww w.j a  va 2s  . c  om
 * @return true if the hoya client/service should be in secure mode
 */
public static boolean isClusterSecure(Configuration conf) {
    return conf.getBoolean(HoyaXmlConfKeys.KEY_SECURITY_ENABLED, false);
}

From source file:org.apache.http.examples.client.TestFetcher.java

License:Apache License

public static void main(String[] args) throws Exception {

    String url = "http://m.58.com/cd/zufang/";
    url = "http://i.m.58.com/cd/zufang/15538653692039x.shtml";
    url = "http://i.m.58.com/cd/zufang/15403127032966x.shtml";
    url = "http://m.58.com/wuhou/qiuzu/?from=list_select_quyu";
    url = "http://i.m.58.com/cd/qiuzu/15691792568835x.shtml";
    url = "http://i.m.58.com/cd/qiuzu/15514728510981x.shtml";

    url = "http://m.58.com/cd/ershoufang/";
    url = "http://i.m.58.com/cd/ershoufang/15660173611521x.shtml";
    // url = "http://i.m.58.com/cd/ershoufang/15692610703237x.shtml";
    // url = "http://i.m.58.com/cd/ershoufang/15646523265417x.shtml";
    // url = "http://i.m.58.com/cd/ershoufang/15682093896709x.shtml";
    url = "http://m.58.com/cd/hezu";
    url = "http://i.m.58.com/cd/hezu/11632175277065x.shtml";
    // url = "http://i.m.58.com/cd/hezu/15568727765129x.shtml";
    // url = "http://i.m.58.com/cd/hezu/15568727765129x.shtml";
    url = "http://wap.ganji.com/cd/fang1/445542193x";

    Pattern pattern = Pattern.compile("((.*?)\\?device=wap$)|((.*?)device=wap&(.*))|((.*?)&device=wap$)");
    System.out.println(pattern.matcher(url).replaceAll("$2$4$5$7"));
    pattern = Pattern.compile("(device=wap)");
    System.out.println(pattern.matcher(url).replaceAll(""));

    Configuration conf = NutchConfiguration.create();
    conf.set(Nutch.CRAWL_ID_KEY, "ea");
    NutchConstant.setUrlConfig(conf, 3);
    NutchConstant.setSegmentParseRules(conf);
    NutchConstant.getSegmentParseRules(conf);

    SegMentParsers parses = new SegMentParsers(conf);
    // Result<String, WebPage> rs = query.execute();
    long curTime = System.currentTimeMillis();
    UrlPathMatch urlcfg = NutchConstant.getUrlConfig(conf);
    boolean filter = conf.getBoolean(GeneratorJob.GENERATOR_FILTER, true);
    boolean normalise = conf.getBoolean(GeneratorJob.GENERATOR_NORMALISE, true);
    long limit = conf.getLong(GeneratorJob.GENERATOR_TOP_N, Long.MAX_VALUE);
    if (limit < 5) {
        limit = Long.MAX_VALUE;/*from ww  w  . java2s.  com*/
    }
    int retryMax = conf.getInt("db.fetch.retry.max", 3);

    limit = Integer.MAX_VALUE;

    curTime = conf.getLong(GeneratorJob.GENERATOR_CUR_TIME, System.currentTimeMillis());

    ProtocolFactory protocolFactory = new ProtocolFactory(conf);

    int rowCount = 0;
    HttpComponent httpComponent = new HttpComponent();
    httpComponent.setConf(conf);
    long l = System.currentTimeMillis();

    try {
        l = System.currentTimeMillis();
        HttpClient httpClient = httpComponent.getClient();
        HttpParams httpParams = httpClient.getParams();
        httpClient.getParams().setParameter("http.protocol.cookie-policy", CookiePolicy.BROWSER_COMPATIBILITY);
        httpClient.getParams().setParameter("http.protocol.content-charset", HTTP.UTF_8);
        String userAgent = getAgentString("NutchCVS", null, "Nutch", "http://lucene.apache.org/nutch/bot.html",
                "nutch-agent@lucene.apache.org");
        userAgent = "Mozilla/5.0 (Linux; U; Android 2.2; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1";
        // userAgent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36";
        // userAgent = "Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1";
        // userAgent = "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.77 Safari/537.1";
        String acceptLanguage = "en-us,en-gb,en;q=0.7,*;q=0.3";
        String accept = "text/html,application/xml;q=0.9,application/xhtml+xml,text/xml;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5";
        String acceptCharset = "utf-8,ISO-8859-1;q=0.7,*;q=0.7";
        System.out.println("userAgent=" + userAgent);
        // Set up an HTTPS socket factory that accepts self-signed certs.
        ArrayList<BasicHeader> headers = new ArrayList<BasicHeader>();
        // Set the User Agent in the header
        headers.add(new BasicHeader("User-Agent", userAgent));
        // prefer English
        // headers.add(new BasicHeader("Accept-Language", acceptLanguage));
        // // prefer UTF-8
        // headers.add(new BasicHeader("Accept-Charset", acceptCharset));
        // // prefer understandable formats
        // headers.add(new BasicHeader("Accept", accept));
        // accept gzipped content
        headers.add(new BasicHeader("Accept-Encoding", "x-gzip, gzip, deflate"));
        httpParams.setParameter(ClientPNames.DEFAULT_HEADERS, headers);

        org.apache.nutch.net.protocols.Response response = new HttpComponentResponse(httpComponent,
                new URL(url), null, true);
        System.out.println("==========================================================");
        System.out.println(new String(response.getContent()).replace("\"utf-8\"", "\"GB2312\""));
        System.out.println("==========================================================");
        int code = response.getCode();
        System.out.println((new Date().toLocaleString()) + " num:" + rowCount + " code:" + code + " time:"
                + (System.currentTimeMillis() - l) + "  url:" + url);
        l = System.currentTimeMillis();
    } catch (Exception e) {
        e.printStackTrace(System.out);
    }
}

From source file:org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.java

License:Apache License

/** {@inheritDoc} */
@SuppressWarnings("ConstantConditions")
@Override//from  www.  j  a va 2 s  .  c  o m
public void initialize(URI name, Configuration cfg) throws IOException {
    enterBusy();

    try {
        if (rmtClient != null)
            throw new IOException("File system is already initialized: " + rmtClient);

        A.notNull(name, "name");
        A.notNull(cfg, "cfg");

        super.initialize(name, cfg);

        setConf(cfg);

        String disableCacheName = String.format("fs.%s.impl.disable.cache", name.getScheme());

        cacheEnabled = !cfg.getBoolean(disableCacheName, false);

        mgmt = cfg.getBoolean(IGFS_MANAGEMENT, false);

        if (!IGFS_SCHEME.equals(name.getScheme()))
            throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME
                    + "://[name]/[optional_path], actual=" + name + ']');

        uri = name;

        uriAuthority = uri.getAuthority();

        setUser(cfg.get(MRJobConfig.USER_NAME, DFLT_USER_NAME));

        // Override sequential reads before prefetch if needed.
        seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);

        if (seqReadsBeforePrefetch > 0)
            seqReadsBeforePrefetchOverride = true;

        // In Ignite replication factor is controlled by data cache affinity.
        // We use replication factor to force the whole file to be stored on local node.
        dfltReplication = (short) cfg.getInt("dfs.replication", 3);

        // Get file colocation control flag.
        colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
        preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);

        // Get log directory.
        String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);

        File logDirFile = U.resolveIgnitePath(logDirCfg);

        String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;

        rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG);

        // Handshake.
        IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);

        igfsGrpBlockSize = handshake.blockSize();

        IgfsPaths paths = handshake.secondaryPaths();

        // Initialize client logger.
        Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);

        if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
            // Initiate client logger.
            if (logDir == null)
                throw new IOException("Failed to resolve log directory: " + logDirCfg);

            Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority,
                    DFLT_IGFS_LOG_BATCH_SIZE);

            clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
        } else
            clientLog = IgfsLogger.disabledLogger();

        modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes());

        boolean initSecondary = paths.defaultMode() == PROXY;

        if (!initSecondary && paths.pathModes() != null && !paths.pathModes().isEmpty()) {
            for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) {
                IgfsMode mode = pathMode.getValue();

                if (mode == PROXY) {
                    initSecondary = true;

                    break;
                }
            }
        }

        if (initSecondary) {
            Map<String, String> props = paths.properties();

            String secUri = props.get(SECONDARY_FS_URI);
            String secConfPath = props.get(SECONDARY_FS_CONFIG_PATH);
            String secUserName = props.get(SECONDARY_FS_USER_NAME);

            try {
                SecondaryFileSystemProvider secProvider = new SecondaryFileSystemProvider(secUri, secConfPath,
                        secUserName);

                secondaryFs = secProvider.createFileSystem();
                secondaryUri = secProvider.uri();
            } catch (IOException e) {
                if (!mgmt)
                    throw new IOException("Failed to connect to the secondary file system: " + secUri, e);
                else
                    LOG.warn(
                            "Visor failed to create secondary file system (operations on paths with PROXY mode "
                                    + "will have no effect): " + e.getMessage());
            }
        }
    } finally {
        leaveBusy();
    }
}

From source file:org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.java

License:Apache License

/**
 * @param name URI passed to constructor.
 * @param cfg Configuration passed to constructor.
 * @throws IOException If initialization failed.
 *//* w ww  . j  a v  a  2  s  .co  m*/
@SuppressWarnings("ConstantConditions")
private void initialize(URI name, Configuration cfg) throws IOException {
    enterBusy();

    try {
        if (rmtClient != null)
            throw new IOException("File system is already initialized: " + rmtClient);

        A.notNull(name, "name");
        A.notNull(cfg, "cfg");

        if (!IGFS_SCHEME.equals(name.getScheme()))
            throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME
                    + "://[name]/[optional_path], actual=" + name + ']');

        uriAuthority = name.getAuthority();

        // Override sequential reads before prefetch if needed.
        seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);

        if (seqReadsBeforePrefetch > 0)
            seqReadsBeforePrefetchOverride = true;

        // In Ignite replication factor is controlled by data cache affinity.
        // We use replication factor to force the whole file to be stored on local node.
        dfltReplication = (short) cfg.getInt("dfs.replication", 3);

        // Get file colocation control flag.
        colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
        preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);

        // Get log directory.
        String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);

        File logDirFile = U.resolveIgnitePath(logDirCfg);

        String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;

        rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG);

        // Handshake.
        IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);

        grpBlockSize = handshake.blockSize();

        IgfsPaths paths = handshake.secondaryPaths();

        Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);

        if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
            // Initiate client logger.
            if (logDir == null)
                throw new IOException("Failed to resolve log directory: " + logDirCfg);

            Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority,
                    DFLT_IGFS_LOG_BATCH_SIZE);

            clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
        } else
            clientLog = IgfsLogger.disabledLogger();

        modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes());

        boolean initSecondary = paths.defaultMode() == PROXY;

        if (!initSecondary && paths.pathModes() != null) {
            for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) {
                IgfsMode mode = pathMode.getValue();

                if (mode == PROXY) {
                    initSecondary = true;

                    break;
                }
            }
        }

        if (initSecondary) {
            Map<String, String> props = paths.properties();

            String secUri = props.get(SECONDARY_FS_URI);
            String secConfPath = props.get(SECONDARY_FS_CONFIG_PATH);
            String secUserName = props.get(SECONDARY_FS_USER_NAME);

            try {
                SecondaryFileSystemProvider secProvider = new SecondaryFileSystemProvider(secUri, secConfPath,
                        secUserName);

                secondaryFs = secProvider.createAbstractFileSystem();
                secondaryUri = secProvider.uri();
            } catch (IOException e) {
                throw new IOException("Failed to connect to the secondary file system: " + secUri, e);
            }
        }
    } finally {
        leaveBusy();
    }
}

From source file:org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem.java

License:Apache License

/** {@inheritDoc} */
@Override/*from w  w w  . j  av  a2s  . c  om*/
public void initialize(URI name, Configuration cfg) throws IOException {
    enterBusy();

    try {
        if (rmtClient != null)
            throw new IOException("File system is already initialized: " + rmtClient);

        A.notNull(name, "name");
        A.notNull(cfg, "cfg");

        super.initialize(name, cfg);

        setConf(cfg);

        String disableCacheName = String.format("fs.%s.impl.disable.cache", name.getScheme());

        cacheEnabled = !cfg.getBoolean(disableCacheName, false);

        mgmt = cfg.getBoolean(IGFS_MANAGEMENT, false);

        if (!IGFS_SCHEME.equals(name.getScheme()))
            throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME
                    + "://[name]/[optional_path], actual=" + name + ']');

        uri = name;

        uriAuthority = uri.getAuthority();

        setUser(cfg.get(MRJobConfig.USER_NAME, DFLT_USER_NAME));

        // Override sequential reads before prefetch if needed.
        seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);

        if (seqReadsBeforePrefetch > 0)
            seqReadsBeforePrefetchOverride = true;

        // In GG replication factor is controlled by data cache affinity.
        // We use replication factor to force the whole file to be stored on local node.
        dfltReplication = (short) cfg.getInt("dfs.replication", 3);

        // Get file colocation control flag.
        colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
        preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);

        // Get log directory.
        String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);

        File logDirFile = U.resolveIgnitePath(logDirCfg);

        String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;

        rmtClient = new IgfsHadoopWrapper(uriAuthority, logDir, cfg, LOG);

        // Handshake.
        IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);

        igfsGrpBlockSize = handshake.blockSize();

        IgfsPaths paths = handshake.secondaryPaths();

        // Initialize client logger.
        Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);

        if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
            // Initiate client logger.
            if (logDir == null)
                throw new IOException("Failed to resolve log directory: " + logDirCfg);

            Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority,
                    DFLT_IGFS_LOG_BATCH_SIZE);

            clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
        } else
            clientLog = IgfsLogger.disabledLogger();

        modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes());

        boolean initSecondary = paths.defaultMode() == PROXY;

        if (paths.pathModes() != null && !paths.pathModes().isEmpty()) {
            for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) {
                IgfsMode mode = pathMode.getValue();

                initSecondary |= mode == PROXY;
            }
        }

        if (initSecondary) {
            Map<String, String> props = paths.properties();

            String secUri = props.get(IgfsHadoopFileSystemWrapper.SECONDARY_FS_URI);
            String secConfPath = props.get(IgfsHadoopFileSystemWrapper.SECONDARY_FS_CONFIG_PATH);

            if (secConfPath == null)
                throw new IOException("Failed to connect to the secondary file system because configuration "
                        + "path is not provided.");

            if (secUri == null)
                throw new IOException(
                        "Failed to connect to the secondary file system because URI is not " + "provided.");

            try {
                secondaryUri = new URI(secUri);

                URL secondaryCfgUrl = U.resolveIgniteUrl(secConfPath);

                Configuration conf = new Configuration();

                if (secondaryCfgUrl != null)
                    conf.addResource(secondaryCfgUrl);

                String prop = String.format("fs.%s.impl.disable.cache", secondaryUri.getScheme());

                conf.setBoolean(prop, true);

                secondaryFs = FileSystem.get(secondaryUri, conf);
            } catch (URISyntaxException ignore) {
                if (!mgmt)
                    throw new IOException("Failed to resolve secondary file system URI: " + secUri);
                else
                    LOG.warn(
                            "Visor failed to create secondary file system (operations on paths with PROXY mode "
                                    + "will have no effect).");
            } catch (IOException e) {
                if (!mgmt)
                    throw new IOException("Failed to connect to the secondary file system: " + secUri, e);
                else
                    LOG.warn(
                            "Visor failed to create secondary file system (operations on paths with PROXY mode "
                                    + "will have no effect): " + e.getMessage());
            }
        }
    } finally {
        leaveBusy();
    }
}

From source file:org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem.java

License:Apache License

/**
 * @param name URI passed to constructor.
 * @param cfg Configuration passed to constructor.
 * @throws IOException If initialization failed.
 *///from   www  .  jav a2  s .  c  o  m
private void initialize(URI name, Configuration cfg) throws IOException {
    enterBusy();

    try {
        if (rmtClient != null)
            throw new IOException("File system is already initialized: " + rmtClient);

        A.notNull(name, "name");
        A.notNull(cfg, "cfg");

        if (!IGFS_SCHEME.equals(name.getScheme()))
            throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME
                    + "://[name]/[optional_path], actual=" + name + ']');

        uriAuthority = name.getAuthority();

        // Override sequential reads before prefetch if needed.
        seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);

        if (seqReadsBeforePrefetch > 0)
            seqReadsBeforePrefetchOverride = true;

        // In GG replication factor is controlled by data cache affinity.
        // We use replication factor to force the whole file to be stored on local node.
        dfltReplication = (short) cfg.getInt("dfs.replication", 3);

        // Get file colocation control flag.
        colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
        preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);

        // Get log directory.
        String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);

        File logDirFile = U.resolveIgnitePath(logDirCfg);

        String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;

        rmtClient = new IgfsHadoopWrapper(uriAuthority, logDir, cfg, LOG);

        // Handshake.
        IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);

        grpBlockSize = handshake.blockSize();

        IgfsPaths paths = handshake.secondaryPaths();

        Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);

        if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
            // Initiate client logger.
            if (logDir == null)
                throw new IOException("Failed to resolve log directory: " + logDirCfg);

            Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority,
                    DFLT_IGFS_LOG_BATCH_SIZE);

            clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
        } else
            clientLog = IgfsLogger.disabledLogger();

        modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes());

        boolean initSecondary = paths.defaultMode() == PROXY;

        if (paths.pathModes() != null) {
            for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) {
                IgfsMode mode = pathMode.getValue();

                initSecondary |= mode == PROXY;
            }
        }

        if (initSecondary) {
            Map<String, String> props = paths.properties();

            String secUri = props.get(IgfsHadoopFileSystemWrapper.SECONDARY_FS_URI);
            String secConfPath = props.get(IgfsHadoopFileSystemWrapper.SECONDARY_FS_CONFIG_PATH);

            if (secConfPath == null)
                throw new IOException("Failed to connect to the secondary file system because configuration "
                        + "path is not provided.");

            if (secUri == null)
                throw new IOException(
                        "Failed to connect to the secondary file system because URI is not " + "provided.");

            if (secConfPath == null)
                throw new IOException("Failed to connect to the secondary file system because configuration "
                        + "path is not provided.");

            if (secUri == null)
                throw new IOException(
                        "Failed to connect to the secondary file system because URI is not " + "provided.");

            try {
                secondaryUri = new URI(secUri);

                URL secondaryCfgUrl = U.resolveIgniteUrl(secConfPath);

                if (secondaryCfgUrl == null)
                    throw new IOException("Failed to resolve secondary file system config URL: " + secConfPath);

                Configuration conf = new Configuration();

                conf.addResource(secondaryCfgUrl);

                String prop = String.format("fs.%s.impl.disable.cache", secondaryUri.getScheme());

                conf.setBoolean(prop, true);

                secondaryFs = AbstractFileSystem.get(secondaryUri, conf);
            } catch (URISyntaxException ignore) {
                throw new IOException("Failed to resolve secondary file system URI: " + secUri);
            } catch (IOException e) {
                throw new IOException("Failed to connect to the secondary file system: " + secUri, e);
            }
        }
    } finally {
        leaveBusy();
    }
}

From source file:org.apache.ignite.internal.igfs.hadoop.IgfsHadoopUtils.java

License:Apache License

/**
 * Get boolean parameter.//from   ww  w  .j a v  a2s  . c o  m
 *
 * @param cfg Configuration.
 * @param name Parameter name.
 * @param authority Authority.
 * @param dflt Default value.
 * @return Boolean value.
 */
public static boolean parameter(Configuration cfg, String name, String authority, boolean dflt) {
    return cfg.getBoolean(String.format(name, authority != null ? authority : ""), dflt);
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.HadoopAbstractWordCountTest.java

License:Apache License

/**
 * Reads whole text file into String.// w  ww  .j a v  a 2  s.c om
 *
 * @param fileName Name of the file to read.
 * @return Content of the file as String value.
 * @throws Exception If could not read the file.
 */
protected String readAndSortFile(String fileName, Configuration conf) throws Exception {
    final List<String> list = new ArrayList<>();

    final boolean snappyDecode = conf != null && conf.getBoolean(FileOutputFormat.COMPRESS, false);

    if (snappyDecode) {
        try (SequenceFile.Reader reader = new SequenceFile.Reader(conf,
                SequenceFile.Reader.file(new Path(fileName)))) {
            Text key = new Text();

            IntWritable val = new IntWritable();

            while (reader.next(key, val))
                list.add(key + "\t" + val);
        }
    } else {
        try (InputStream is0 = igfs.open(new IgfsPath(fileName))) {
            BufferedReader reader = new BufferedReader(new InputStreamReader(is0));

            String line;

            while ((line = reader.readLine()) != null)
                list.add(line);
        }
    }

    Collections.sort(list);

    return Joiner.on('\n').join(list) + "\n";
}