Example usage for org.apache.hadoop.conf Configuration setStrings

List of usage examples for org.apache.hadoop.conf Configuration setStrings

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setStrings.

Prototype

public void setStrings(String name, String... values) 

Source Link

Document

Set the array of string values for the name property as as comma delimited values.

Usage

From source file:org.apache.ignite.internal.processors.hadoop.impl.igfs.IgniteHadoopFileSystemClientBasedOpenTest.java

License:Apache License

/**
 * Create configuration for test.//from   ww  w .j a  va 2s  . c o m
 *
 * @param idx Grid index.
 * @return Configuration.
 */
protected Configuration configuration(int idx) {
    Configuration cfg = new Configuration();

    cfg.set("fs.defaultFS", "igfs://" + authority(idx) + '/');
    cfg.set("fs.igfs.impl", IgniteHadoopFileSystem.class.getName());
    cfg.set("fs.AbstractFileSystem.igfs.impl",
            org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.class.getName());

    cfg.setBoolean("fs.igfs.impl.disable.cache", true);

    cfg.setStrings(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_IGNITE_CFG_PATH, authority(idx)),
            cfgPath(idx));

    if (skipInProc)
        cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, authority(idx)), true);

    return cfg;
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.igfs.IgniteHadoopFileSystemClientBasedOpenTest.java

License:Apache License

/**
 * Create configuration for test./*from  w  ww.  j a  va 2s .  c om*/
 *
 * @return Configuration.
 */
protected Configuration configurationWrongIgfs() {
    Configuration cfg = new Configuration();

    cfg.set("fs.defaultFS", "igfs://igfs-wrong-name@/");
    cfg.set("fs.igfs.impl", IgniteHadoopFileSystem.class.getName());
    cfg.set("fs.AbstractFileSystem.igfs.impl",
            org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.class.getName());

    cfg.setBoolean("fs.igfs.impl.disable.cache", true);

    cfg.setStrings(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_IGNITE_CFG_PATH, "igfs-wrong-name@"),
            cfgPath(0));

    return cfg;
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.igfs.IgniteHadoopFileSystemLoggerStateSelfTest.java

License:Apache License

/**
 * Instantiate new file system.//from   ww w  . ja  v  a 2 s. com
 *
 * @return New file system.
 * @throws Exception If failed.
 */
private IgniteHadoopFileSystem fileSystem() throws Exception {
    Configuration fsCfg = new Configuration();

    fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));

    fsCfg.setBoolean("fs.igfs.impl.disable.cache", true);

    if (logging)
        fsCfg.setBoolean(String.format(PARAM_IGFS_LOG_ENABLED, "igfs@"), logging);

    fsCfg.setStrings(String.format(PARAM_IGFS_LOG_DIR, "igfs@"), U.getIgniteHome());

    return (IgniteHadoopFileSystem) FileSystem.get(new URI("igfs://igfs@/"), fsCfg);
}

From source file:org.apache.kylin.storage.hbase.HBaseConnection.java

License:Apache License

public static void addHBaseClusterNNHAConfiguration(Configuration conf) {
    String hdfsConfigFile = KylinConfig.getInstanceFromEnv().getHBaseClusterHDFSConfigFile();
    if (hdfsConfigFile == null || hdfsConfigFile.isEmpty()) {
        return;//from  w ww  .  j a  v a2  s.  c  o  m
    }
    Configuration hdfsConf = new Configuration(false);
    hdfsConf.addResource(hdfsConfigFile);
    Collection<String> nameServices = hdfsConf.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES);
    Collection<String> mainNameServices = conf.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES);
    for (String serviceId : nameServices) {
        mainNameServices.add(serviceId);

        String serviceConfKey = DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + serviceId;
        String proxyConfKey = DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + serviceId;
        conf.set(serviceConfKey, hdfsConf.get(serviceConfKey, ""));
        conf.set(proxyConfKey, hdfsConf.get(proxyConfKey, ""));

        Collection<String> nameNodes = hdfsConf.getTrimmedStringCollection(serviceConfKey);
        for (String nameNode : nameNodes) {
            String rpcConfKey = DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + serviceId + "." + nameNode;
            conf.set(rpcConfKey, hdfsConf.get(rpcConfKey, ""));
        }
    }
    conf.setStrings(DFSConfigKeys.DFS_NAMESERVICES, mainNameServices.toArray(new String[0]));
    // See YARN-3021, instruct RM skip renew token of hbase cluster name services
    conf.setStrings(JOB_NAMENODES_TOKEN_RENEWAL_EXCLUDE, nameServices.toArray(new String[0]));
}

From source file:org.apache.lens.cube.parse.TestCubeRewriter.java

License:Apache License

@Test
public void testQueryWithMeasureWithDataCompletenessTagWithNoFailureOnPartialData()
        throws ParseException, LensException {
    //In this query a measure is used for which dataCompletenessTag is set.
    Configuration conf = getConf();
    conf.setStrings(CubeQueryConfUtil.COMPLETENESS_CHECK_PART_COL, "dt");
    String hqlQuery = rewrite("select SUM(msr1) from basecube where " + TWO_DAYS_RANGE, conf);
    String expected = getExpectedQuery("basecube", "select sum(basecube.msr1) FROM ", null, null,
            getWhereForHourly2days("basecube", "c1_testfact1_raw_base"));
    compareQueries(hqlQuery, expected);//from   w ww.j  a v a  2 s  .c  o  m
}

From source file:org.apache.lens.cube.parse.TestCubeRewriter.java

License:Apache License

@Test
public void testQueryWithMeasureWithDataCompletenessPresentInMultipleFacts()
        throws ParseException, LensException {
    /*In this query a measure is used which is present in two facts with different %completeness. While resolving the
    facts, the fact with the higher dataCompletenessFactor gets picked up.*/
    Configuration conf = getConf();
    conf.setStrings(CubeQueryConfUtil.COMPLETENESS_CHECK_PART_COL, "dt");
    String hqlQuery = rewrite("select SUM(msr9) from basecube where " + TWO_DAYS_RANGE, conf);
    String expected = getExpectedQuery("basecube", "select sum(basecube.msr9) FROM ", null, null,
            getWhereForHourly2days("basecube", "c1_testfact5_raw_base"));
    compareQueries(hqlQuery, expected);/*  w ww.  ja v a  2 s  .co  m*/
}

From source file:org.apache.lens.cube.parse.TestCubeRewriter.java

License:Apache License

@Test
public void testCubeWhereQueryWithMeasureWithDataCompletenessAndFailIfPartialDataFlagSet()
        throws ParseException, LensException {
    /*In this query a measure is used for which dataCompletenessTag is set and the flag FAIL_QUERY_ON_PARTIAL_DATA is
    set. The partitions for the queried range are present but some of the them have incomplete data. So, the query
    throws NO_CANDIDATE_FACT_AVAILABLE Exception*/
    Configuration conf = getConf();
    conf.setStrings(CubeQueryConfUtil.COMPLETENESS_CHECK_PART_COL, "dt");
    conf.setBoolean(CubeQueryConfUtil.FAIL_QUERY_ON_PARTIAL_DATA, true);

    LensException e = getLensExceptionInRewrite("select SUM(msr9) from basecube where " + TWO_DAYS_RANGE, conf);
    assertEquals(e.getErrorCode(),//from   www .  j  a  va2  s.c  o m
            LensCubeErrorCode.NO_CANDIDATE_FACT_AVAILABLE.getLensErrorInfo().getErrorCode());
    NoCandidateFactAvailableException ne = (NoCandidateFactAvailableException) e;
    PruneCauses.BriefAndDetailedError pruneCauses = ne.getJsonMessage();
    /*Since the Flag FAIL_QUERY_ON_PARTIAL_DATA is set, and the queried fact has incomplete data, hence, we expect the
    prune cause to be INCOMPLETE_PARTITION. The below check is to validate this.*/
    assertEquals(pruneCauses.getBrief(), String.format(INCOMPLETE_PARTITION.errorFormat, "[msr9]"));
}

From source file:org.apache.lens.driver.es.QueryTranslationTest.java

License:Apache License

@Override
protected void initializeConfig(Configuration config) {
    config.setInt(ESDriverConfig.TERM_FETCH_SIZE_KEY, 10000);
    config.setInt(ESDriverConfig.QUERY_TIME_OUT_LENS_KEY, 10000);
    config.setInt(ESDriverConfig.MAX_ROW_SIZE_KEY, -1);
    config.setInt(ESDriverConfig.AGGR_BUCKET_SIZE_LENS_KEY, 100);
    config.setStrings(ESDriverConfig.CLIENT_CLASS_KEY, MockClientES.class.getCanonicalName());
    config.setBoolean(CubeQueryConfUtil.FAIL_QUERY_ON_PARTIAL_DATA, false);
    config.setStrings(CubeQueryConfUtil.DRIVER_SUPPORTED_STORAGES, "es_storage");
    config.setStrings(CubeQueryConfUtil.TIME_RANGE_WRITER_CLASS,
            BetweenTimeRangeWriter.class.getCanonicalName());
    config.setStrings(CubeQueryConfUtil.PART_WHERE_CLAUSE_DATE_FORMAT, "yyyy-MM-dd'T'HH:mm:ss");
}

From source file:org.apache.lens.driver.es.ResultSetTransformationTest.java

License:Apache License

@Override
protected void initializeConfig(Configuration config) {
    config.setInt(ESDriverConfig.TERM_FETCH_SIZE_KEY, 10000);
    config.setInt(ESDriverConfig.QUERY_TIME_OUT_LENS_KEY, 10000);
    config.setInt(ESDriverConfig.MAX_ROW_SIZE_KEY, -1);
    config.setInt(ESDriverConfig.AGGR_BUCKET_SIZE_LENS_KEY, 100);
    config.setStrings(ESDriverConfig.CLIENT_CLASS_KEY, MockClientES.class.getCanonicalName());
    config.setBoolean(CubeQueryConfUtil.FAIL_QUERY_ON_PARTIAL_DATA, false);
    config.setStrings(CubeQueryConfUtil.DRIVER_SUPPORTED_STORAGES, "es_storage");
}

From source file:org.apache.lens.driver.es.ScrollingQueryTest.java

License:Apache License

@Override
protected void initializeConfig(Configuration config) {
    config.setInt(ESDriverConfig.TERM_FETCH_SIZE_KEY, 1);
    config.setInt(ESDriverConfig.QUERY_TIME_OUT_LENS_KEY, 10000);
    config.setInt(ESDriverConfig.MAX_ROW_SIZE_KEY, -1);
    config.setInt(ESDriverConfig.AGGR_BUCKET_SIZE_LENS_KEY, 100);
    config.setStrings(ESDriverConfig.CLIENT_CLASS_KEY, MockClientES.class.getCanonicalName());
    config.setBoolean(CubeQueryConfUtil.FAIL_QUERY_ON_PARTIAL_DATA, false);
    config.setStrings(CubeQueryConfUtil.DRIVER_SUPPORTED_STORAGES, "es_storage");
}