Example usage for org.apache.hadoop.conf Configuration setStrings

List of usage examples for org.apache.hadoop.conf Configuration setStrings

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setStrings.

Prototype

public void setStrings(String name, String... values) 

Source Link

Document

Set the array of string values for the name property as as comma delimited values.

Usage

From source file:org.apache.phoenix.end2end.index.IndexHandlerIT.java

License:Apache License

@Test
public void testClientWritesWithPriority() throws Exception {
    Configuration conf = new Configuration(UTIL.getConfiguration());
    // add the keys for our rpc factory
    conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, CountingIndexClientRpcFactory.class.getName());
    // and set the index table as the current table
    conf.setStrings(IndexQosRpcControllerFactory.INDEX_TABLE_NAMES_KEY, TestTable.getTableNameString());
    HTable table = new HTable(conf, TestTable.getTableName());

    // do a write to the table
    Put p = new Put(row);
    p.add(family, qual, new byte[] { 1, 0, 1, 0 });
    table.put(p);//from  w  ww  .j a va2  s .  c o  m
    table.flushCommits();

    // check the counts on the rpc controller
    assertEquals("Didn't get the expected number of index priority writes!", 1,
            (int) CountingIndexClientRpcController.priorityCounts
                    .get(QueryServicesOptions.DEFAULT_INDEX_MIN_PRIORITY));

    table.close();
}

From source file:org.apache.phoenix.end2end.SecureQueryServerIT.java

License:Apache License

/**
 * Setup and start kerberos, hbase//from w  w  w  .java 2 s  .  c  o m
 */
@BeforeClass
public static void setUp() throws Exception {
    final Configuration conf = UTIL.getConfiguration();
    // Ensure the dirs we need are created/empty
    ensureIsEmptyDirectory(TEMP_DIR);
    ensureIsEmptyDirectory(KEYTAB_DIR);
    KEYTAB = new File(KEYTAB_DIR, "test.keytab");
    // Start a MiniKDC
    KDC = UTIL.setupMiniKdc(KEYTAB);
    // Create a service principal and spnego principal in one keytab
    // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
    //     use separate identies for HBase and HDFS results in a GSS initiate error. The quick
    //     solution is to just use a single "service" principal instead of "hbase" and "hdfs"
    //     (or "dn" and "nn") per usual.
    KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, SERVICE_PRINCIPAL);
    // Start ZK by hand
    UTIL.startMiniZKCluster();

    // Create a number of unprivileged users
    createUsers(3);

    // Set configuration for HBase
    HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
    HBaseKerberosUtils.setSecuredConfiguration(conf);
    setHdfsSecuredConfiguration(conf);
    UserGroupInformation.setConfiguration(conf);
    conf.setInt(HConstants.MASTER_PORT, 0);
    conf.setInt(HConstants.MASTER_INFO_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, TokenProvider.class.getName());

    // Secure Phoenix setup
    conf.set("phoenix.queryserver.kerberos.principal", SPNEGO_PRINCIPAL);
    conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
    conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
    conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
    // Required so that PQS can impersonate the end-users to HBase
    conf.set("hadoop.proxyuser.HTTP.groups", "*");
    conf.set("hadoop.proxyuser.HTTP.hosts", "*");

    // Clear the cached singletons so we can inject our own.
    InstanceResolver.clearSingletons();
    // Make sure the ConnectionInfo doesn't try to pull a default Configuration
    InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
        @Override
        public Configuration getConfiguration() {
            return conf;
        }

        @Override
        public Configuration getConfiguration(Configuration confToClone) {
            Configuration copy = new Configuration(conf);
            copy.addResource(confToClone);
            return copy;
        }
    });
    updateDefaultRealm();

    // Start HDFS
    UTIL.startMiniDFSCluster(1);
    // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
    // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
    //     classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
    Path rootdir = UTIL.getDataTestDirOnTestFS(SecureQueryServerIT.class.getSimpleName());
    FSUtils.setRootDir(conf, rootdir);
    HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
    HBASE_CLUSTER.startup();

    // Then fork a thread with PQS in it.
    startQueryServer();
}

From source file:org.apache.pig.tools.pigstats.mapreduce.MRScriptState.java

License:Apache License

public void addWorkflowAdjacenciesToConf(MROperPlan mrop, Configuration conf) {
    for (MapReduceOper source : mrop) {
        List<String> targets = new ArrayList<String>();
        if (mrop.getSuccessors(source) != null) {
            for (MapReduceOper target : mrop.getSuccessors(source)) {
                targets.add(target.getOperatorKey().toString());
            }// w w w  .j a v a2s . co m
        }
        String[] s = new String[targets.size()];
        conf.setStrings("mapreduce.workflow.adjacency." + source.getOperatorKey().toString(),
                targets.toArray(s));
    }
}

From source file:org.apache.pig.tools.pigstats.tez.TezScriptState.java

License:Apache License

public void addWorkflowAdjacenciesToConf(TezOperPlan tezPlan, Configuration conf) {
    for (TezOperator source : tezPlan) {
        List<String> targets = new ArrayList<String>();
        if (tezPlan.getSuccessors(source) != null) {
            for (TezOperator target : tezPlan.getSuccessors(source)) {
                targets.add(target.getOperatorKey().toString());
            }//  w ww  .  j  a v a  2 s  .c om
        }
        String[] s = new String[targets.size()];
        conf.setStrings("mapreduce.workflow.adjacency." + source.getOperatorKey().toString(),
                targets.toArray(s));
    }
}

From source file:org.apache.rya.accumulo.pig.IndexWritingTool.java

License:Apache License

public void setVarOrders(final String s, final Configuration conf) throws MalformedQueryException {

    final SPARQLParser parser = new SPARQLParser();
    final TupleExpr query = parser.parseQuery(s, null).getTupleExpr();

    final List<String> projList = Lists
            .newArrayList(((Projection) query).getProjectionElemList().getTargetNames());
    final String projElems = Joiner.on(";").join(projList);
    conf.set("projElems", projElems);

    final Pattern splitPattern1 = Pattern.compile("\n");
    final Pattern splitPattern2 = Pattern.compile(",");
    final String[] lines = splitPattern1.split(s);

    final List<String> varOrders = Lists.newArrayList();
    final List<String> varOrderPos = Lists.newArrayList();

    int orderNum = 0;
    final int projSizeSq = projList.size() * projList.size();

    for (String t : lines) {

        if (orderNum > projSizeSq) {
            break;
        }/*  w w w.j  a v  a  2s .co  m*/

        String[] order = null;
        if (t.startsWith("#prefix")) {
            t = t.substring(7).trim();
            order = splitPattern2.split(t, projList.size());
        }

        String tempVarOrder = "";
        String tempVarOrderPos = "";

        if (order != null) {
            for (final String u : order) {
                if (tempVarOrder.length() == 0) {
                    tempVarOrder = u.trim();
                } else {
                    tempVarOrder = tempVarOrder + ";" + u.trim();
                }
                final int pos = projList.indexOf(u.trim());
                if (pos < 0) {
                    throw new IllegalArgumentException("Invalid variable order!");
                } else {
                    if (tempVarOrderPos.length() == 0) {
                        tempVarOrderPos = tempVarOrderPos + pos;
                    } else {
                        tempVarOrderPos = tempVarOrderPos + ";" + pos;
                    }
                }
            }

            varOrders.add(tempVarOrder);
            varOrderPos.add(tempVarOrderPos);
        }

        if (tempVarOrder.length() > 0) {
            orderNum++;
        }

    }

    if (orderNum == 0) {
        varOrders.add(projElems);
        String tempVarPos = "";

        for (int i = 0; i < projList.size(); i++) {
            if (i == 0) {
                tempVarPos = Integer.toString(0);
            } else {
                tempVarPos = tempVarPos + ";" + i;
            }
        }
        varOrderPos.add(tempVarPos);

    }

    final String[] vOrders = varOrders.toArray(new String[varOrders.size()]);
    final String[] vOrderPos = varOrderPos.toArray(new String[varOrderPos.size()]);

    conf.setStrings("varOrders", vOrders);
    conf.setStrings("varOrderPos", vOrderPos);

}

From source file:org.apache.rya.indexing.geoExamples.RyaMongoGeoDirectExample.java

License:Apache License

public static void main(String[] args) throws Exception {
    Configuration conf = getConf();
    conf.setBoolean(ConfigUtils.DISPLAY_QUERY_PLAN, PRINT_QUERIES);
    conf.setBoolean(OptionalConfigUtils.USE_GEO, true); // Note also the use of "GeoRyaSailFactory" below.
    conf.setStrings(OptionalConfigUtils.GEO_PREDICATES_LIST, "http://www.opengis.net/ont/geosparql#asWKT"); // Note also the use of "GeoRyaSailFactory" below.

    SailRepository repository = null;//from  w ww  . ja  v  a2s . c  o m
    SailRepositoryConnection conn = null;
    try {
        log.info("Connecting to Indexing Sail Repository.");
        Sail sail = GeoRyaSailFactory.getInstance(conf);
        repository = new SailRepository(sail);
        conn = repository.getConnection();

        long start = System.currentTimeMillis();
        testAddPointAndWithinSearch(conn); // uses geospatial features

        log.info("TIME: " + (System.currentTimeMillis() - start) / 1000.);
    } finally {
        log.info("Shutting down");
        closeQuietly(conn);
        closeQuietly(repository);
        if (mock != null) {
            mock.shutdown();
        }
        MongoConnectorFactory.closeMongoClient();
    }
}

From source file:org.apache.sentry.provider.db.generic.service.persistent.SentryStoreIntegrationBase.java

License:Apache License

private static void setup(Configuration conf) throws Exception {
    dataDir = new File(Files.createTempDir(), "sentry_policy_db");
    conf.set(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false");
    conf.set(ServerConfig.SENTRY_STORE_JDBC_URL,
            "jdbc:derby:;databaseName=" + dataDir.getPath() + ";create=true");
    conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "dummy");
    conf.setStrings(ServerConfig.ADMIN_GROUPS, adminGroups);
    conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING);

    policyFilePath = new File(Files.createTempDir(), "local_policy_file.ini");
    conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFilePath.getPath());
}

From source file:org.apache.sentry.provider.db.service.persistent.TestSentryStoreImportExport.java

License:Apache License

@BeforeClass
public static void setupEnv() throws Exception {
    dataDir = new File(Files.createTempDir(), "sentry_policy_db");
    Configuration conf = new Configuration(true);
    conf.set(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false");
    conf.set(ServerConfig.SENTRY_STORE_JDBC_URL,
            "jdbc:derby:;databaseName=" + dataDir.getPath() + ";create=true");
    conf.set(ServerConfig.SENTRY_STORE_JDBC_PASS, "sentry");
    conf.setStrings(ServerConfig.ADMIN_GROUPS, adminGroups);
    conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING);
    policyFilePath = new File(dataDir, "local_policy_file.ini");
    conf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFilePath.getPath());
    policyFile = new PolicyFile();
    boolean hdfsSyncEnabled = SentryServiceUtil.isHDFSSyncEnabled(conf);
    sentryStore = new SentryStore(conf);
    sentryStore.setPersistUpdateDeltas(hdfsSyncEnabled);

    String adminUser = "g1";
    addGroupsToUser(adminUser, adminGroups);
    writePolicyFile();/*from   ww  w. j  a  va  2 s  .  c  o m*/
}

From source file:org.apache.solr.hadoop.hack.MiniMRYarnCluster.java

License:Apache License

@Override
public void serviceInit(Configuration conf) throws Exception {
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
    if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
        conf.set(MRJobConfig.MR_AM_STAGING_DIR,
                new File(getTestWorkDir(), "apps_staging_dir/").getAbsolutePath());
    }/*  ww w  .j  a v  a2 s  . c  o m*/

    // By default, VMEM monitoring disabled, PMEM monitoring enabled.
    if (!conf.getBoolean(MRConfig.MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING,
            MRConfig.DEFAULT_MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING)) {
        conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
        conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
    }

    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");

    try {
        Path stagingPath = FileContext.getFileContext(conf)
                .makeQualified(new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
        /*
         * Re-configure the staging path on Windows if the file system is localFs.
         * We need to use a absolute path that contains the drive letter. The unit
         * test could run on a different drive than the AM. We can run into the
         * issue that job files are localized to the drive where the test runs on,
         * while the AM starts on a different drive and fails to find the job
         * metafiles. Using absolute path can avoid this ambiguity.
         */
        if (Path.WINDOWS) {
            if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) {
                conf.set(MRJobConfig.MR_AM_STAGING_DIR,
                        new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR)).getAbsolutePath());
            }
        }
        FileContext fc = FileContext.getFileContext(stagingPath.toUri(), conf);
        if (fc.util().exists(stagingPath)) {
            LOG.info(stagingPath + " exists! deleting...");
            fc.delete(stagingPath, true);
        }
        LOG.info("mkdir: " + stagingPath);
        //mkdir the staging directory so that right permissions are set while running as proxy user
        fc.mkdir(stagingPath, null, true);
        //mkdir done directory as well 
        String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
        Path doneDirPath = fc.makeQualified(new Path(doneDir));
        fc.mkdir(doneDirPath, null, true);
    } catch (IOException e) {
        throw new YarnRuntimeException("Could not create staging directory. ", e);
    }
    conf.set(MRConfig.MASTER_ADDRESS, "test"); // The default is local because of
                                               // which shuffle doesn't happen
                                               //configure the shuffle service in NM
    conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
            new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID });
    conf.setClass(String.format(Locale.ENGLISH, YarnConfiguration.NM_AUX_SERVICE_FMT,
            ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class, Service.class);

    // Non-standard shuffle port
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);

    conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class,
            ContainerExecutor.class);

    // TestMRJobs is for testing non-uberized operation only; see TestUberAM
    // for corresponding uberized tests.
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);

    super.serviceInit(conf);
}

From source file:org.apache.sqoop.mapreduce.MySQLDumpImportJob.java

License:Apache License

/**
 * Configure the inputformat to use for the job.
 *///w  w  w . j av  a2 s.c o m
protected void configureInputFormat(Job job, String tableName, String tableClassName, String splitByCol)
        throws ClassNotFoundException, IOException {

    if (null == tableName) {
        LOG.error("mysqldump-based import cannot support free-form query imports.");
        LOG.error("Do not use --direct and --query together for MySQL.");
        throw new IOException("null tableName for MySQLDumpImportJob.");
    }

    ConnManager mgr = getContext().getConnManager();
    String username = options.getUsername();
    if (null == username || username.length() == 0) {
        DBConfiguration.configureDB(job.getConfiguration(), mgr.getDriverClass(), options.getConnectString(),
                options.getConnectionParams());
    } else {
        DBConfiguration.configureDB(job.getConfiguration(), mgr.getDriverClass(), options.getConnectString(),
                username, options.getPassword(), options.getConnectionParams());
    }

    String[] colNames = options.getColumns();
    if (null == colNames) {
        colNames = mgr.getColumnNames(tableName);
    }

    String[] sqlColNames = null;
    if (null != colNames) {
        sqlColNames = new String[colNames.length];
        for (int i = 0; i < colNames.length; i++) {
            sqlColNames[i] = mgr.escapeColName(colNames[i]);
        }
    }

    // It's ok if the where clause is null in DBInputFormat.setInput.
    String whereClause = options.getWhereClause();

    // We can't set the class properly in here, because we may not have the
    // jar loaded in this JVM. So we start by calling setInput() with
    // DBWritable and then overriding the string manually.

    // Note that mysqldump also does *not* want a quoted table name.
    DataDrivenDBInputFormat.setInput(job, DBWritable.class, tableName, whereClause,
            mgr.escapeColName(splitByCol), sqlColNames);

    Configuration conf = job.getConfiguration();
    conf.setInt(MySQLUtils.OUTPUT_FIELD_DELIM_KEY, options.getOutputFieldDelim());
    conf.setInt(MySQLUtils.OUTPUT_RECORD_DELIM_KEY, options.getOutputRecordDelim());
    conf.setInt(MySQLUtils.OUTPUT_ENCLOSED_BY_KEY, options.getOutputEnclosedBy());
    conf.setInt(MySQLUtils.OUTPUT_ESCAPED_BY_KEY, options.getOutputEscapedBy());
    conf.setBoolean(MySQLUtils.OUTPUT_ENCLOSE_REQUIRED_KEY, options.isOutputEncloseRequired());
    String[] extraArgs = options.getExtraArgs();
    if (null != extraArgs) {
        conf.setStrings(MySQLUtils.EXTRA_ARGS_KEY, extraArgs);
    }

    LOG.debug("Using InputFormat: " + inputFormatClass);
    job.setInputFormatClass(getInputFormatClass());
}