Example usage for org.apache.hadoop.conf Configuration setStrings

List of usage examples for org.apache.hadoop.conf Configuration setStrings

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setStrings.

Prototype

public void setStrings(String name, String... values) 

Source Link

Document

Set the array of string values for the name property as as comma delimited values.

Usage

From source file:org.apache.cassandra.hadoop.AbstractBulkOutputFormat.java

License:Apache License

/**
 * Set the hosts to ignore. Data will not be bulk loaded onto the ignored nodes.
 * @param conf job configuration/*from w  ww.j  ava  2s .co m*/
 * @param ignoreNodes the nodes to ignore
 */
public static void setIgnoreHosts(Configuration conf, String... ignoreNodes) {
    conf.setStrings(AbstractBulkRecordWriter.IGNORE_HOSTS, ignoreNodes);
}

From source file:org.apache.eagle.storage.hbase.aggregate.coprocessor.TestGroupAggregateClient.java

License:Apache License

@BeforeClass
public static void setUpHBase() {
    Configuration conf = new Configuration();
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AggregateProtocolEndPoint.class.getName());
    TestHBaseBase.setupHBaseWithConfig(conf);
}

From source file:org.apache.eagle.storage.hbase.spi.TestHBaseStorageAggregateWithCoprocessor.java

License:Apache License

@BeforeClass
public static void setUpHBase() {
    System.setProperty("config.resource", "/application-co.conf");
    Configuration conf = new Configuration();
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AggregateProtocolEndPoint.class.getName());
    TestHBaseBase.setupHBaseWithConfig(conf);
}

From source file:org.apache.eagle.storage.hbase.TestWithHBaseCoprocessor.java

License:Apache License

@BeforeClass
public static void setUpHBase() throws IOException {
    System.setProperty("config.resource", "/application-co.conf");
    Configuration conf = HBaseConfiguration.create();
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AggregateProtocolEndPoint.class.getName());
    conf.set("zookeeper.znode.parent", getZkZnodeParent());
    conf.setInt("hbase.master.info.port", -1);//avoid port clobbering
    conf.setInt("hbase.regionserver.info.port", -1);//avoid port clobbering

    int attempts = 0;
    hbase = new HBaseTestingUtility(conf);
    boolean successToStart = false;
    while (attempts < 3) {
        try {//from   w ww.  ja va  2 s . c o m
            attempts++;
            hbase.startMiniCluster();
            successToStart = true;
        } catch (Exception e) {
            LOG.error("Error to start mini cluster (tried {} times): {}", attempts, e.getMessage(), e);
            try {
                hbase.shutdownMiniCluster();
            } catch (Exception e1) {
                LOG.warn(e.getMessage(), e);
            }
        }
    }

    Assert.assertTrue("Failed to start mini cluster in " + attempts + " attempts", successToStart);

    HTable table = hbase.createTable(String.valueOf("unittest"), "f");
    HTableDescriptor descriptor = new HTableDescriptor(table.getTableDescriptor());
    descriptor.addCoprocessor(AggregateProtocolEndPoint.class.getName());
    hbase.getHBaseAdmin().modifyTable("unittest", descriptor);

    System.setProperty("storage.hbase.autoCreateTable", "false");
    System.setProperty("storage.hbase.coprocessorEnabled", String.valueOf(true));
    System.setProperty("storage.hbase.zookeeperZnodeParent", getZkZnodeParent());
    System.setProperty("storage.hbase.zookeeperPropertyClientPort",
            String.valueOf(hbase.getZkCluster().getClientPort()));
}

From source file:org.apache.giraph.conf.ClassConfOption.java

License:Apache License

/**
 * Set classes for this key/*w w  w . j a v  a2 s.  com*/
 *
 * @param conf Configuration
 * @param klasses Classes to set
 */
public void setMany(Configuration conf, Class<? extends C>... klasses) {
    String[] klassNames = new String[klasses.length];
    for (int i = 0; i < klasses.length; ++i) {
        Class<?> klass = klasses[i];
        if (!interfaceClass.isAssignableFrom(klass)) {
            throw new RuntimeException(klass + " does not implement " + interfaceClass.getName());
        }
        klassNames[i] = klasses[i].getName();
    }
    conf.setStrings(getKey(), klassNames);
}

From source file:org.apache.giraph.hive.common.HiveUtils.java

License:Apache License

/**
 * Add string to collection/*w ww  .  j av a  2 s  . co m*/
 *
 * @param conf Configuration
 * @param key to add
 * @param values values for collection
 */
public static void addToStringCollection(Configuration conf, String key, Collection<String> values) {
    Collection<String> strings = conf.getStringCollection(key);
    strings.addAll(values);
    conf.setStrings(key, strings.toArray(new String[strings.size()]));
}

From source file:org.apache.gora.goraci.Verify.java

License:Apache License

private void readFlushed(Configuration conf) throws Exception {
    DataStore<Utf8, Flushed> flushedTable = DataStoreFactory.getDataStore(Utf8.class, Flushed.class, conf);

    Query<Utf8, Flushed> query = flushedTable.newQuery();
    Result<Utf8, Flushed> result = flushedTable.execute(query);

    ArrayList<String> flushedEntries = new ArrayList<String>();
    while (result.next()) {
        flushedEntries.add(result.getKey() + ":" + result.get().getCount());
    }/*from  w  ww. j  a v  a2s.  co  m*/

    conf.setStrings("org.apache.gora.goraci.verify.flushed", flushedEntries.toArray(new String[] {}));

    flushedTable.close();
}

From source file:org.apache.gora.mapreduce.GoraMapReduceUtils.java

License:Apache License

/**
 * Add our own serializer (obtained via the {@link PersistentSerialization} 
 * wrapper) to any other <code>io.serializations</code> which may be specified 
 * within existing Hadoop configuration.
 * //  www . j a  v a2s . co m
 * @param conf the Hadoop configuration object
 * @param reuseObjects boolean parameter to reuse objects
 */
public static void setIOSerializations(Configuration conf, boolean reuseObjects) {
    String serializationClass = PersistentSerialization.class.getCanonicalName();
    String[] serializations = StringUtils.joinStringArrays(conf.getStrings("io.serializations"),
            "org.apache.hadoop.io.serializer.WritableSerialization",
            StringSerialization.class.getCanonicalName(), serializationClass);
    conf.setStrings("io.serializations", serializations);
}

From source file:org.apache.ignite.igfs.IgfsHadoopFileSystemLoggerStateSelfTest.java

License:Apache License

/**
 * Instantiate new file system.//from  w w w. java 2  s .  c o m
 *
 * @return New file system.
 * @throws Exception If failed.
 */
private IgfsHadoopFileSystem fileSystem() throws Exception {
    Configuration fsCfg = new Configuration();

    fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));

    fsCfg.setBoolean("fs.igfs.impl.disable.cache", true);

    if (logging)
        fsCfg.setBoolean(String.format(PARAM_IGFS_LOG_ENABLED, "igfs:igfs-grid@"), logging);

    fsCfg.setStrings(String.format(PARAM_IGFS_LOG_DIR, "igfs:igfs-grid@"), U.getIgniteHome());

    return (IgfsHadoopFileSystem) FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg);
}

From source file:org.apache.ignite.igfs.IgniteHadoopFileSystemLoggerStateSelfTest.java

License:Apache License

/**
 * Instantiate new file system.//from  ww  w .j  ava 2  s  .  c o m
 *
 * @return New file system.
 * @throws Exception If failed.
 */
private IgniteHadoopFileSystem fileSystem() throws Exception {
    Configuration fsCfg = new Configuration();

    fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));

    fsCfg.setBoolean("fs.igfs.impl.disable.cache", true);

    if (logging)
        fsCfg.setBoolean(String.format(PARAM_IGFS_LOG_ENABLED, "igfs:igfs-grid@"), logging);

    fsCfg.setStrings(String.format(PARAM_IGFS_LOG_DIR, "igfs:igfs-grid@"), U.getIgniteHome());

    return (IgniteHadoopFileSystem) FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg);
}