Example usage for org.apache.hadoop.conf Configuration setStrings

List of usage examples for org.apache.hadoop.conf Configuration setStrings

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setStrings.

Prototype

public void setStrings(String name, String... values) 

Source Link

Document

Set the array of string values for the name property as as comma delimited values.

Usage

From source file:org.apache.tez.runtime.library.common.shuffle.impl.TestSimpleFetchedInputAllocator.java

License:Apache License

@Test(timeout = 5000)
public void testInMemAllocation() throws IOException {
    String localDirs = "/tmp/" + this.getClass().getName();
    Configuration conf = new Configuration();

    long jvmMax = Runtime.getRuntime().maxMemory();
    LOG.info("jvmMax: " + jvmMax);

    float bufferPercent = 0.1f;
    conf.setFloat(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_BUFFER_PERCENT, bufferPercent);
    conf.setFloat(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MEMORY_LIMIT_PERCENT, 1.0f);
    conf.setStrings(TezRuntimeFrameworkConfigs.LOCAL_DIRS, localDirs);

    long inMemThreshold = (long) (bufferPercent * jvmMax);
    LOG.info("InMemThreshold: " + inMemThreshold);

    SimpleFetchedInputAllocator inputManager = new SimpleFetchedInputAllocator(UUID.randomUUID().toString(),
            conf, Runtime.getRuntime().maxMemory(), inMemThreshold);

    long requestSize = (long) (0.4f * inMemThreshold);
    long compressedSize = 1l;
    LOG.info("RequestSize: " + requestSize);

    FetchedInput fi1 = inputManager.allocate(requestSize, compressedSize, new InputAttemptIdentifier(1, 1));
    assertEquals(FetchedInput.Type.MEMORY, fi1.getType());

    FetchedInput fi2 = inputManager.allocate(requestSize, compressedSize, new InputAttemptIdentifier(2, 1));
    assertEquals(FetchedInput.Type.MEMORY, fi2.getType());

    // Over limit by this point. Next reserve should give back a DISK allocation
    FetchedInput fi3 = inputManager.allocate(requestSize, compressedSize, new InputAttemptIdentifier(3, 1));
    assertEquals(FetchedInput.Type.DISK, fi3.getType());

    // Freed one memory allocation. Next should be mem again.
    fi1.abort();/*from  w  w w. j  a  v  a 2 s .  c o m*/
    fi1.free();
    FetchedInput fi4 = inputManager.allocate(requestSize, compressedSize, new InputAttemptIdentifier(4, 1));
    assertEquals(FetchedInput.Type.MEMORY, fi4.getType());

    // Freed one disk allocation. Next sould be disk again (no mem freed)
    fi3.abort();
    fi3.free();
    FetchedInput fi5 = inputManager.allocate(requestSize, compressedSize, new InputAttemptIdentifier(4, 1));
    assertEquals(FetchedInput.Type.DISK, fi5.getType());
}

From source file:org.apache.tez.runtime.library.common.shuffle.orderedgrouped.TestMergeManager.java

License:Apache License

@Test(timeout = 10000)
public void testLocalDiskMergeMultipleTasks() throws IOException {

    Configuration conf = new TezConfiguration(defaultConf);
    conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_COMPRESS, false);
    conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_CLASS, IntWritable.class.getName());
    conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_VALUE_CLASS, IntWritable.class.getName());

    Path localDir = new Path(workDir, "local");
    Path srcDir = new Path(workDir, "srcData");
    localFs.mkdirs(localDir);/*from  w w w .j  a  va  2  s.  c o  m*/
    localFs.mkdirs(srcDir);

    conf.setStrings(TezRuntimeFrameworkConfigs.LOCAL_DIRS, localDir.toString());

    FileSystem localFs = FileSystem.getLocal(conf);
    LocalDirAllocator localDirAllocator = new LocalDirAllocator(TezRuntimeFrameworkConfigs.LOCAL_DIRS);
    InputContext t0inputContext = createMockInputContext(UUID.randomUUID().toString());
    InputContext t1inputContext = createMockInputContext(UUID.randomUUID().toString());

    ExceptionReporter t0exceptionReporter = mock(ExceptionReporter.class);
    ExceptionReporter t1exceptionReporter = mock(ExceptionReporter.class);

    MergeManager t0mergeManagerReal = new MergeManager(conf, localFs, localDirAllocator, t0inputContext, null,
            null, null, null, t0exceptionReporter, 2000000, null, false, -1);
    MergeManager t0mergeManager = spy(t0mergeManagerReal);

    MergeManager t1mergeManagerReal = new MergeManager(conf, localFs, localDirAllocator, t1inputContext, null,
            null, null, null, t1exceptionReporter, 2000000, null, false, -1);
    MergeManager t1mergeManager = spy(t1mergeManagerReal);

    // Partition 0 Keys 0-2, Partition 1 Keys 3-5
    SrcFileInfo src1Info = createFile(conf, localFs,
            new Path(srcDir, InputAttemptIdentifier.PATH_PREFIX + "src1.out"), 2, 3, 0);
    // Partition 0 Keys 6-8, Partition 1 Keys 9-11
    SrcFileInfo src2Info = createFile(conf, localFs,
            new Path(srcDir, InputAttemptIdentifier.PATH_PREFIX + "src2.out"), 2, 3, 6);

    // Simulating Task 0 fetches partition 0. (targetIndex = 0,1)

    // Simulating Task 1 fetches partition 1. (targetIndex = 0,1)

    InputAttemptIdentifier t0Identifier0 = new InputAttemptIdentifier(0, 0, src1Info.path.getName());
    InputAttemptIdentifier t0Identifier1 = new InputAttemptIdentifier(1, 0, src2Info.path.getName());

    InputAttemptIdentifier t1Identifier0 = new InputAttemptIdentifier(0, 0, src1Info.path.getName());
    InputAttemptIdentifier t1Identifier1 = new InputAttemptIdentifier(1, 0, src2Info.path.getName());

    MapOutput t0MapOutput0 = getMapOutputForDirectDiskFetch(t0Identifier0, src1Info.path,
            src1Info.indexedRecords[0], t0mergeManager);
    MapOutput t0MapOutput1 = getMapOutputForDirectDiskFetch(t0Identifier1, src2Info.path,
            src2Info.indexedRecords[0], t0mergeManager);

    MapOutput t1MapOutput0 = getMapOutputForDirectDiskFetch(t1Identifier0, src1Info.path,
            src1Info.indexedRecords[1], t1mergeManager);
    MapOutput t1MapOutput1 = getMapOutputForDirectDiskFetch(t1Identifier1, src2Info.path,
            src2Info.indexedRecords[1], t1mergeManager);

    t0MapOutput0.commit();
    t0MapOutput1.commit();
    verify(t0mergeManager).closeOnDiskFile(t0MapOutput0.getOutputPath());
    verify(t0mergeManager).closeOnDiskFile(t0MapOutput1.getOutputPath());
    // Run the OnDiskMerge via MergeManager
    // Simulate the thread invocation - remove files, and invoke merge
    List<FileChunk> t0MergeFiles = new LinkedList<FileChunk>();
    t0MergeFiles.addAll(t0mergeManager.onDiskMapOutputs);
    t0mergeManager.onDiskMapOutputs.clear();
    t0mergeManager.onDiskMerger.merge(t0MergeFiles);
    Assert.assertEquals(1, t0mergeManager.onDiskMapOutputs.size());

    t1MapOutput0.commit();
    t1MapOutput1.commit();
    verify(t1mergeManager).closeOnDiskFile(t1MapOutput0.getOutputPath());
    verify(t1mergeManager).closeOnDiskFile(t1MapOutput1.getOutputPath());
    // Run the OnDiskMerge via MergeManager
    // Simulate the thread invocation - remove files, and invoke merge
    List<FileChunk> t1MergeFiles = new LinkedList<FileChunk>();
    t1MergeFiles.addAll(t1mergeManager.onDiskMapOutputs);
    t1mergeManager.onDiskMapOutputs.clear();
    t1mergeManager.onDiskMerger.merge(t1MergeFiles);
    Assert.assertEquals(1, t1mergeManager.onDiskMapOutputs.size());

    Assert.assertNotEquals(t0mergeManager.onDiskMapOutputs.iterator().next().getPath(),
            t1mergeManager.onDiskMapOutputs.iterator().next().getPath());

    Assert.assertTrue(t0mergeManager.onDiskMapOutputs.iterator().next().getPath().toString()
            .contains(t0inputContext.getUniqueIdentifier()));
    Assert.assertTrue(t1mergeManager.onDiskMapOutputs.iterator().next().getPath().toString()
            .contains(t1inputContext.getUniqueIdentifier()));

}

From source file:org.apache.tez.runtime.library.common.writers.TestUnorderedPartitionedKVWriter.java

License:Apache License

private Configuration createConfiguration(OutputContext outputContext, Class<? extends Writable> keyClass,
        Class<? extends Writable> valClass, boolean shouldCompress, int maxSingleBufferSizeBytes,
        Class<? extends Partitioner> partitionerClass) {
    Configuration conf = new Configuration(false);
    conf.setStrings(TezRuntimeFrameworkConfigs.LOCAL_DIRS, outputContext.getWorkDirs());
    conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_CLASS, keyClass.getName());
    conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_VALUE_CLASS, valClass.getName());
    conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_PARTITIONER_CLASS, partitionerClass.getName());
    if (maxSingleBufferSizeBytes >= 0) {
        conf.setInt(TezRuntimeConfiguration.TEZ_RUNTIME_UNORDERED_OUTPUT_MAX_PER_BUFFER_SIZE_BYTES,
                maxSingleBufferSizeBytes);
    }//w w w .j a va  2  s.c  om
    conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_COMPRESS, shouldCompress);
    if (shouldCompress) {
        conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_COMPRESS_CODEC, DefaultCodec.class.getName());
    }
    return conf;
}

From source file:org.apache.tez.runtime.library.shuffle.common.impl.TestSimpleFetchedInputAllocator.java

License:Apache License

@Test
public void testInMemAllocation() throws IOException {
    String localDirs = "/tmp/" + this.getClass().getName();
    Configuration conf = new Configuration();

    long jvmMax = Runtime.getRuntime().maxMemory();
    LOG.info("jvmMax: " + jvmMax);

    float bufferPercent = 0.1f;
    conf.setFloat(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_BUFFER_PERCENT, bufferPercent);
    conf.setFloat(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MEMORY_LIMIT_PERCENT, 1.0f);
    conf.setStrings(TezRuntimeFrameworkConfigs.LOCAL_DIRS, localDirs);

    long inMemThreshold = (long) (bufferPercent * jvmMax);
    LOG.info("InMemThreshold: " + inMemThreshold);

    SimpleFetchedInputAllocator inputManager = new SimpleFetchedInputAllocator(UUID.randomUUID().toString(),
            conf, Runtime.getRuntime().maxMemory(), inMemThreshold);

    long requestSize = (long) (0.4f * inMemThreshold);
    long compressedSize = 1l;
    LOG.info("RequestSize: " + requestSize);

    FetchedInput fi1 = inputManager.allocate(requestSize, compressedSize, new InputAttemptIdentifier(1, 1));
    assertEquals(FetchedInput.Type.MEMORY, fi1.getType());

    FetchedInput fi2 = inputManager.allocate(requestSize, compressedSize, new InputAttemptIdentifier(2, 1));
    assertEquals(FetchedInput.Type.MEMORY, fi2.getType());

    // Over limit by this point. Next reserve should give back a DISK allocation
    FetchedInput fi3 = inputManager.allocate(requestSize, compressedSize, new InputAttemptIdentifier(3, 1));
    assertEquals(FetchedInput.Type.DISK, fi3.getType());

    // Freed one memory allocation. Next should be mem again.
    fi1.abort();//from w w w.  j a v  a2s  .c om
    fi1.free();
    FetchedInput fi4 = inputManager.allocate(requestSize, compressedSize, new InputAttemptIdentifier(4, 1));
    assertEquals(FetchedInput.Type.MEMORY, fi4.getType());

    // Freed one disk allocation. Next sould be disk again (no mem freed)
    fi3.abort();
    fi3.free();
    FetchedInput fi5 = inputManager.allocate(requestSize, compressedSize, new InputAttemptIdentifier(4, 1));
    assertEquals(FetchedInput.Type.DISK, fi5.getType());
}

From source file:org.apache.tez.test.MiniTezCluster.java

License:Apache License

@Override
public void serviceInit(Configuration conf) throws Exception {
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_TEZ_FRAMEWORK_NAME);
    // Use libs from cluster since no build is available
    conf.setBoolean(TezConfiguration.TEZ_USE_CLUSTER_HADOOP_LIBS, true);
    // blacklisting disabled to prevent scheduling issues
    conf.setBoolean(TezConfiguration.TEZ_AM_NODE_BLACKLISTING_ENABLED, false);
    if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
        conf.set(MRJobConfig.MR_AM_STAGING_DIR,
                new File(getTestWorkDir(), "apps_staging_dir" + Path.SEPARATOR).getAbsolutePath());
    }/*from w  ww  . j  a  v  a2s . c o m*/

    if (conf.get(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC) == null) {
        // nothing defined. set quick delete value
        conf.setLong(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0l);
    }

    File appJarLocalFile = new File(MiniTezCluster.APPJAR);

    if (!appJarLocalFile.exists()) {
        String message = "TezAppJar " + MiniTezCluster.APPJAR + " not found. Exiting.";
        LOG.info(message);
        throw new TezUncheckedException(message);
    } else {
        LOG.info("Using Tez AppJar: " + appJarLocalFile.getAbsolutePath());
    }

    FileSystem fs = FileSystem.get(conf);
    Path testRootDir = fs.makeQualified(new Path("target", getName() + "-tmpDir"));
    Path appRemoteJar = new Path(testRootDir, "TezAppJar.jar");
    // Copy AppJar and make it public.
    Path appMasterJar = new Path(MiniTezCluster.APPJAR);
    fs.copyFromLocalFile(appMasterJar, appRemoteJar);
    fs.setPermission(appRemoteJar, new FsPermission("777"));

    conf.set(TezConfiguration.TEZ_LIB_URIS, appRemoteJar.toUri().toString());
    LOG.info("Set TEZ-LIB-URI to: " + conf.get(TezConfiguration.TEZ_LIB_URIS));

    // VMEM monitoring disabled, PMEM monitoring enabled.
    conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
    conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);

    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");

    try {
        Path stagingPath = FileContext.getFileContext(conf)
                .makeQualified(new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
        /*
         * Re-configure the staging path on Windows if the file system is localFs.
         * We need to use a absolute path that contains the drive letter. The unit
         * test could run on a different drive than the AM. We can run into the
         * issue that job files are localized to the drive where the test runs on,
         * while the AM starts on a different drive and fails to find the job
         * metafiles. Using absolute path can avoid this ambiguity.
         */
        if (Path.WINDOWS) {
            if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) {
                conf.set(MRJobConfig.MR_AM_STAGING_DIR,
                        new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR)).getAbsolutePath());
            }
        }
        FileContext fc = FileContext.getFileContext(stagingPath.toUri(), conf);
        if (fc.util().exists(stagingPath)) {
            LOG.info(stagingPath + " exists! deleting...");
            fc.delete(stagingPath, true);
        }
        LOG.info("mkdir: " + stagingPath);
        fc.mkdir(stagingPath, null, true);

        //mkdir done directory as well
        String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
        Path doneDirPath = fc.makeQualified(new Path(doneDir));
        fc.mkdir(doneDirPath, null, true);
    } catch (IOException e) {
        throw new TezUncheckedException("Could not create staging directory. ", e);
    }
    conf.set(MRConfig.MASTER_ADDRESS, "test");

    //configure the shuffle service in NM
    conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
            new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID });
    conf.setClass(
            String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID),
            ShuffleHandler.class, Service.class);

    // Non-standard shuffle port
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);

    conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class,
            ContainerExecutor.class);

    // TestMRJobs is for testing non-uberized operation only; see TestUberAM
    // for corresponding uberized tests.
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    super.serviceInit(conf);
}

From source file:org.apache.tez.tests.MiniTezClusterWithTimeline.java

License:Apache License

@Override
public void serviceInit(Configuration conf) throws Exception {
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_TEZ_FRAMEWORK_NAME);
    // Use libs from cluster since no build is available
    conf.setBoolean(TezConfiguration.TEZ_USE_CLUSTER_HADOOP_LIBS, true);
    // blacklisting disabled to prevent scheduling issues
    conf.setBoolean(TezConfiguration.TEZ_AM_NODE_BLACKLISTING_ENABLED, false);
    if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
        conf.set(MRJobConfig.MR_AM_STAGING_DIR,
                new File(getTestWorkDir(), "apps_staging_dir" + Path.SEPARATOR).getAbsolutePath());
    }/* w w w.j  a v  a2  s .  c o  m*/

    if (conf.get(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC) == null) {
        // nothing defined. set quick delete value
        conf.setLong(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0l);
    }

    File appJarLocalFile = new File(MiniTezClusterWithTimeline.APPJAR);

    if (!appJarLocalFile.exists()) {
        String message = "TezAppJar " + MiniTezClusterWithTimeline.APPJAR + " not found. Exiting.";
        LOG.info(message);
        throw new TezUncheckedException(message);
    } else {
        LOG.info("Using Tez AppJar: " + appJarLocalFile.getAbsolutePath());
    }

    FileSystem fs = FileSystem.get(conf);
    Path testRootDir = fs.makeQualified(new Path("target", getName() + "-tmpDir"));
    Path appRemoteJar = new Path(testRootDir, "TezAppJar.jar");
    // Copy AppJar and make it public.
    Path appMasterJar = new Path(MiniTezClusterWithTimeline.APPJAR);
    fs.copyFromLocalFile(appMasterJar, appRemoteJar);
    fs.setPermission(appRemoteJar, new FsPermission("777"));

    conf.set(TezConfiguration.TEZ_LIB_URIS, appRemoteJar.toUri().toString());
    LOG.info("Set TEZ-LIB-URI to: " + conf.get(TezConfiguration.TEZ_LIB_URIS));

    // VMEM monitoring disabled, PMEM monitoring enabled.
    conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
    conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);

    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");

    try {
        Path stagingPath = FileContext.getFileContext(conf)
                .makeQualified(new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
        /*
         * Re-configure the staging path on Windows if the file system is localFs.
         * We need to use a absolute path that contains the drive letter. The unit
         * test could run on a different drive than the AM. We can run into the
         * issue that job files are localized to the drive where the test runs on,
         * while the AM starts on a different drive and fails to find the job
         * metafiles. Using absolute path can avoid this ambiguity.
         */
        if (Path.WINDOWS) {
            if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) {
                conf.set(MRJobConfig.MR_AM_STAGING_DIR,
                        new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR)).getAbsolutePath());
            }
        }
        FileContext fc = FileContext.getFileContext(stagingPath.toUri(), conf);
        if (fc.util().exists(stagingPath)) {
            LOG.info(stagingPath + " exists! deleting...");
            fc.delete(stagingPath, true);
        }
        LOG.info("mkdir: " + stagingPath);
        fc.mkdir(stagingPath, null, true);

        //mkdir done directory as well
        String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
        Path doneDirPath = fc.makeQualified(new Path(doneDir));
        fc.mkdir(doneDirPath, null, true);
    } catch (IOException e) {
        throw new TezUncheckedException("Could not create staging directory. ", e);
    }
    conf.set(MRConfig.MASTER_ADDRESS, "test");

    //configure the shuffle service in NM
    conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
            new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID });
    conf.setClass(
            String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID),
            ShuffleHandler.class, Service.class);

    // Non-standard shuffle port
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);

    conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class,
            ContainerExecutor.class);

    // TestMRJobs is for testing non-uberized operation only; see TestUberAM
    // for corresponding uberized tests.
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    super.serviceInit(conf);
}

From source file:org.archive.io.hbase.HBaseWriter.java

License:LGPL

/**
 * Instantiates a new h base writer./*from w w w.ja  va 2  s  . c o m*/
 * 
 * @param serialNo
 *            the serial no
 * @param settings
 *            the settings
 * @param parameters
 *            the parameters
 * @throws IOException
 *             Signals that an I/O exception has occurred.
 */
public HBaseWriter(AtomicInteger serialNo, final WriterPoolSettings settings, HBaseParameters parameters)
        throws IOException {
    // Instantiates a new HBaseWriter for the WriterPool to use in heritrix.
    super(serialNo, settings, null);

    Preconditions.checkArgument(parameters != null);
    this.hbaseParameters = parameters;
    // create an instance of hbase configuration object
    Configuration hbaseConfiguration = HBaseConfiguration.create();
    // create a connection to hbase
    Connection connection = ConnectionFactory.createConnection(hbaseConfiguration);
    this.hTable = initializeCrawlTable(connection, TableName.valueOf(hbaseParameters.getHbaseTableName()));

    // set the zk quorum list
    log.info("setting zookeeper quorum to : " + hbaseParameters.getZkQuorum());
    hbaseConfiguration.setStrings(HConstants.ZOOKEEPER_QUORUM, hbaseParameters.getZkQuorum().split(","));

    // set the client port
    log.info("setting zookeeper client Port to : " + hbaseParameters.getZkPort());
    hbaseConfiguration.setInt(getHbaseParameters().getZookeeperClientPortKey(), hbaseParameters.getZkPort());
}

From source file:org.bgi.flexlab.gaea.tools.mapreduce.annotator.AnnotatorOptions.java

License:Open Source License

@Override
public void setHadoopConf(String[] args, Configuration conf) {
    conf.setStrings("args", args);
    conf.set("inputFilePath", getInputFilePath());
    conf.set("reference", getReferenceSequencePath());
    conf.set("configFile", getConfigFile());
    conf.set("outputType", getOutputType());
    conf.setBoolean("cacheref", isCachedRef());
    conf.setBoolean("verbose", isVerbose());
    conf.setBoolean("debug", isDebug());
}

From source file:org.bgi.flexlab.gaea.tools.mapreduce.bamqualitycontrol.BamQualityControlOptions.java

License:Open Source License

@Override
public void setHadoopConf(String[] args, Configuration conf) {
    // TODO Auto-generated method stub
    String[] otherArgs;// ww  w  . j  a  va2  s  .c  o m
    try {
        otherArgs = new GenericOptionsParser(args).getRemainingArgs();
        conf.setStrings("args", otherArgs);
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
}

From source file:org.bgi.flexlab.gaea.tools.mapreduce.fastqqualitycontrol.FastqQualityControlOptions.java

License:Open Source License

@Override
public void setHadoopConf(String[] args, Configuration conf) {
    conf.setStrings("args", args);
}