Example usage for org.apache.hadoop.conf Configuration setClass

List of usage examples for org.apache.hadoop.conf Configuration setClass

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setClass.

Prototype

public void setClass(String name, Class<?> theClass, Class<?> xface) 

Source Link

Document

Set the value of the name property to the name of a theClass implementing the given interface xface.

Usage

From source file:org.apache.oozie.sla.TestSLAJobEventListener.java

License:Apache License

@Override
@Before/*from  w  ww.j av a 2s  .c  o m*/
protected void setUp() throws Exception {
    super.setUp();
    services = new Services();
    Configuration conf = services.getConf();
    conf.set(Services.CONF_SERVICE_EXT_CLASSES,
            "org.apache.oozie.service.EventHandlerService," + "org.apache.oozie.sla.service.SLAService");
    conf.setClass(EventHandlerService.CONF_LISTENERS, SLAJobEventListener.class, JobEventListener.class);
    services.init();
}

From source file:org.apache.oozie.sla.TestSLAService.java

License:Apache License

@Override
@Before/*from   w  w w.  j av  a2 s .  c  om*/
protected void setUp() throws Exception {
    super.setUp();
    Services services = new Services();
    Configuration conf = services.getConf();
    conf.set(Services.CONF_SERVICE_EXT_CLASSES,
            "org.apache.oozie.service.EventHandlerService," + "org.apache.oozie.sla.service.SLAService");
    conf.setClass(EventHandlerService.CONF_LISTENERS, DummySLAEventListener.class, SLAEventListener.class);
    conf.setLong(SLAService.CONF_JOB_EVENT_LATENCY, 0);
    conf.setInt(EventHandlerService.CONF_WORKER_THREADS, 0);
    services.init();
    output.setLength(0);
}

From source file:org.apache.parquet.proto.ProtoWriteSupport.java

License:Apache License

public static void setSchema(Configuration configuration, Class<? extends Message> protoClass) {
    configuration.setClass(PB_CLASS_WRITE, protoClass, Message.class);
}

From source file:org.apache.phoenix.hbase.index.balancer.IndexLoadBalancerIT.java

License:Apache License

@BeforeClass
public static void setupCluster() throws Exception {
    final int NUM_RS = 4;
    Configuration conf = UTIL.getConfiguration();
    conf.setBoolean(HConstants.REGIONSERVER_INFO_PORT_AUTO, true);
    conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, IndexMasterObserver.class.getName());
    conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, IndexLoadBalancer.class, LoadBalancer.class);
    IndexTestingUtils.setupConfig(conf);
    // disable version checking, so we can test against whatever version of HBase happens to be
    // installed (right now, its generally going to be SNAPSHOT versions).
    conf.setBoolean(Indexer.CHECK_VERSION_CONF_KEY, false);
    // set replication required parameter
    ConfigUtil.setReplicationConfigIfAbsent(conf);
    UTIL.startMiniCluster(NUM_RS);/*ww  w  .j  av  a2 s  .co  m*/
    admin = UTIL.getHBaseAdmin();
}

From source file:org.apache.phoenix.hbase.index.Indexer.java

License:Apache License

@Override
public void start(CoprocessorEnvironment e) throws IOException {
    try {/*from w  ww .  j  a v  a 2 s  .c  o m*/
        final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
        String serverName = env.getRegionServerServices().getServerName().getServerName();
        if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) {
            // make sure the right version <-> combinations are allowed.
            String errormsg = Indexer.validateVersion(env.getHBaseVersion(), env.getConfiguration());
            if (errormsg != null) {
                IOException ioe = new IOException(errormsg);
                env.getRegionServerServices().abort(errormsg, ioe);
                throw ioe;
            }
        }

        this.builder = new IndexBuildManager(env);
        // Clone the config since it is shared
        Configuration clonedConfig = PropertiesUtil.cloneConfig(e.getConfiguration());
        /*
         * Set the rpc controller factory so that the HTables used by IndexWriter would
         * set the correct priorities on the remote RPC calls.
         */
        clonedConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
                InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class);
        // lower the number of rpc retries.  We inherit config from HConnectionManager#setServerSideHConnectionRetries,
        // which by default uses a multiplier of 10.  That is too many retries for our synchronous index writes
        clonedConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, env.getConfiguration()
                .getInt(INDEX_WRITER_RPC_RETRIES_NUMBER, DEFAULT_INDEX_WRITER_RPC_RETRIES_NUMBER));
        clonedConfig.setInt(HConstants.HBASE_CLIENT_PAUSE,
                env.getConfiguration().getInt(INDEX_WRITER_RPC_PAUSE, DEFAULT_INDEX_WRITER_RPC_PAUSE));
        DelegateRegionCoprocessorEnvironment indexWriterEnv = new DelegateRegionCoprocessorEnvironment(
                clonedConfig, env);
        // setup the actual index writer
        this.writer = new IndexWriter(indexWriterEnv, serverName + "-index-writer");

        this.rowLockWaitDuration = clonedConfig.getInt("hbase.rowlock.wait.duration",
                DEFAULT_ROWLOCK_WAIT_DURATION);
        this.lockManager = new LockManager();

        // Metrics impl for the Indexer -- avoiding unnecessary indirection for hadoop-1/2 compat
        this.metricSource = MetricsIndexerSourceFactory.getInstance().create();
        setSlowThresholds(e.getConfiguration());

        compactionConfig = PropertiesUtil.cloneConfig(e.getConfiguration());
        // lower the number of rpc retries, so we don't hang the compaction
        compactionConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
                e.getConfiguration().getInt(QueryServices.METADATA_WRITE_RETRIES_NUMBER,
                        QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRIES_NUMBER));
        compactionConfig.setInt(HConstants.HBASE_CLIENT_PAUSE,
                e.getConfiguration().getInt(QueryServices.METADATA_WRITE_RETRY_PAUSE,
                        QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRY_PAUSE));

        try {
            // get the specified failure policy. We only ever override it in tests, but we need to do it
            // here
            Class<? extends IndexFailurePolicy> policyClass = env.getConfiguration().getClass(
                    INDEX_RECOVERY_FAILURE_POLICY_KEY, StoreFailuresInCachePolicy.class,
                    IndexFailurePolicy.class);
            IndexFailurePolicy policy = policyClass.getConstructor(PerRegionIndexWriteCache.class)
                    .newInstance(failedIndexEdits);
            LOG.debug("Setting up recovery writter with failure policy: " + policy.getClass());
            recoveryWriter = new RecoveryIndexWriter(policy, indexWriterEnv, serverName + "-recovery-writer");
        } catch (Exception ex) {
            throw new IOException("Could not instantiate recovery failure policy!", ex);
        }
    } catch (NoSuchMethodError ex) {
        disabled = true;
        super.start(e);
        LOG.error("Must be too early a version of HBase. Disabled coprocessor ", ex);
    }
}

From source file:org.apache.phoenix.index.PhoenixTransactionalIndexer.java

License:Apache License

@Override
public void start(CoprocessorEnvironment e) throws IOException {
    final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
    String serverName = env.getRegionServerServices().getServerName().getServerName();
    codec = new PhoenixIndexCodec();
    codec.initialize(env);/*from w  w  w . j  a  v a 2 s .  c  om*/
    // Clone the config since it is shared
    Configuration clonedConfig = PropertiesUtil.cloneConfig(e.getConfiguration());
    /*
     * Set the rpc controller factory so that the HTables used by IndexWriter would
     * set the correct priorities on the remote RPC calls.
     */
    clonedConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
            InterRegionServerIndexRpcControllerFactory.class, RpcControllerFactory.class);
    // lower the number of rpc retries.  We inherit config from HConnectionManager#setServerSideHConnectionRetries,
    // which by default uses a multiplier of 10.  That is too many retries for our synchronous index writes
    clonedConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, env.getConfiguration()
            .getInt(INDEX_WRITER_RPC_RETRIES_NUMBER, DEFAULT_INDEX_WRITER_RPC_RETRIES_NUMBER));
    clonedConfig.setInt(HConstants.HBASE_CLIENT_PAUSE,
            env.getConfiguration().getInt(INDEX_WRITER_RPC_PAUSE, DEFAULT_INDEX_WRITER_RPC_PAUSE));
    DelegateRegionCoprocessorEnvironment indexWriterEnv = new DelegateRegionCoprocessorEnvironment(clonedConfig,
            env);
    // setup the actual index writer
    // For transactional tables, we keep the index active upon a write failure
    // since we have the all versus none behavior for transactions. Also, we
    // fail on any write exception since this will end up failing the transaction.
    this.writer = new IndexWriter(IndexWriter.getCommitter(indexWriterEnv, ParallelWriterIndexCommitter.class),
            new LeaveIndexActiveFailurePolicy(), indexWriterEnv, serverName + "-tx-index-writer");
}

From source file:org.apache.phoenix.mapreduce.CsvBulkImportUtil.java

License:Apache License

/**
 * Configure an {@link ImportPreUpsertKeyValueProcessor} for a CSV bulk import job.
 *
 * @param conf job configuration/*  w w  w  .j  a  v a 2 s  .  c  om*/
 * @param processorClass class to be used for performing pre-upsert processing
 */
public static void configurePreUpsertProcessor(Configuration conf,
        Class<? extends ImportPreUpsertKeyValueProcessor> processorClass) {
    conf.setClass(PhoenixConfigurationUtil.UPSERT_HOOK_CLASS_CONFKEY, processorClass,
            ImportPreUpsertKeyValueProcessor.class);
}

From source file:org.apache.phoenix.mapreduce.CsvToKeyValueMapperTest.java

License:Apache License

@Test
public void testLoadPreUpdateProcessor() {
    Configuration conf = new Configuration();
    conf.setClass(CsvToKeyValueMapper.UPSERT_HOOK_CLASS_CONFKEY, MockUpsertProcessor.class,
            ImportPreUpsertKeyValueProcessor.class);

    ImportPreUpsertKeyValueProcessor processor = CsvToKeyValueMapper.loadPreUpsertProcessor(conf);
    assertEquals(MockUpsertProcessor.class, processor.getClass());
}

From source file:org.apache.phoenix.mapreduce.FormatToBytesWritableMapperTest.java

License:Apache License

@Test
public void testLoadPreUpdateProcessor() {
    Configuration conf = new Configuration();
    conf.setClass(PhoenixConfigurationUtil.UPSERT_HOOK_CLASS_CONFKEY, MockUpsertProcessor.class,
            ImportPreUpsertKeyValueProcessor.class);

    ImportPreUpsertKeyValueProcessor processor = PhoenixConfigurationUtil.loadPreUpsertProcessor(conf);
    assertEquals(MockUpsertProcessor.class, processor.getClass());
}

From source file:org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.java

License:Apache License

public static void setInputClass(final Configuration configuration, Class<? extends DBWritable> inputClass) {
    Preconditions.checkNotNull(configuration);
    configuration.setClass(INPUT_CLASS, inputClass, DBWritable.class);
}