Example usage for org.apache.hadoop.conf Configuration getClass

List of usage examples for org.apache.hadoop.conf Configuration getClass

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getClass.

Prototype

public Class<?> getClass(String name, Class<?> defaultValue) 

Source Link

Document

Get the value of the name property as a Class.

Usage

From source file:com.netflix.bdp.s3mper.listing.ConsistentListingAspect.java

License:Apache License

/**
 * Creates the metastore on initialization.
 * /*from   www .j a v  a2s .  c o  m*/
 * #TODO The metastore isn't created instantly by DynamoDB.  This should wait until
 * the initialization is complete.  If the store doesn't exist, calls will fail until
 * it is created.
 * 
 * @param jp
 * @throws Exception  
 */
@Before("init()")
public synchronized void initialize(JoinPoint jp) throws Exception {

    URI uri = (URI) jp.getArgs()[0];
    Configuration conf = (Configuration) jp.getArgs()[1];

    updateConfig(conf);

    //Check again after updating configs
    if (disabled) {
        return;
    }

    if (metastore == null) {
        log.debug("Initializing S3mper Metastore");

        //FIXME: This is defaulted to the dynamodb metastore impl, but shouldn't 
        //       reference it directly like this.
        Class<?> metaImpl = conf.getClass("s3mper.metastore.impl",
                com.netflix.bdp.s3mper.metastore.impl.DynamoDBMetastore.class);

        try {
            metastore = Metastore.getFilesystemMetastore(conf);
            metastore.initalize(uri, conf);
        } catch (Exception e) {
            disable();

            if (failOnError) {
                throw e;
            }
        }
    } else {
        log.debug("S3mper Metastore already initialized.");
    }

    if (alertDispatcher == null) {
        log.debug("Initializing Alert Dispatcher");

        try {
            Class<?> dispatcherImpl = conf.getClass("s3mper.dispatcher.impl",
                    com.netflix.bdp.s3mper.alert.impl.CloudWatchAlertDispatcher.class);

            alertDispatcher = (AlertDispatcher) ReflectionUtils.newInstance(dispatcherImpl, conf);
            alertDispatcher.init(uri, conf);
        } catch (Exception e) {
            log.error("Error initializing s3mper alert dispatcher", e);

            disable();

            if (failOnError) {
                throw e;
            }
        }
    } else {
        alertDispatcher.setConfig(conf);
    }
}

From source file:com.netflix.bdp.s3mper.metastore.Metastore.java

License:Apache License

public static FileSystemMetastore getFilesystemMetastore(Configuration conf) throws Exception {
    if (metastore == null) {
        synchronized (Metastore.class) {
            if (metastore == null) {
                Class<?> metaImpl = conf.getClass("s3mper.metastore.impl",
                        com.netflix.bdp.s3mper.metastore.impl.BigTableMetastore.class);
                //                            com.netflix.bdp.s3mper.metastore.impl.DynamoDBMetastore.class);

                try {
                    metastore = (FileSystemMetastore) ReflectionUtils.newInstance(metaImpl, conf);
                    if (log.isDebugEnabled()) {
                        metastore = new LoggingMetastore(metastore);
                    }/*from  w  ww  .  jav  a 2  s  .c o m*/
                } catch (Exception e) {
                    log.error("Error initializing s3mper metastore", e);
                    throw e;
                }

            }
        }
    }
    return metastore;
}

From source file:com.scaleoutsoftware.soss.hserver.GridOutputFormat.java

License:Apache License

@Override
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext taskAttemptContext)
        throws IOException, InterruptedException {

    Configuration configuration = taskAttemptContext.getConfiguration();

    if (configuration.getBoolean(outputIsNamedMapProperty, false)) { //This is a NamedMap
        String mapName = configuration.get(outputNamedMapProperty);
        Class<CustomSerializer<K>> keySerializerClass = (Class<CustomSerializer<K>>) configuration
                .getClass(outputNamedMapKeySerializerProperty, null);
        Class<CustomSerializer<V>> valueSerializerClass = (Class<CustomSerializer<V>>) configuration
                .getClass(outputNamedMapValueSerializerProperty, null);
        int smOrdinal = configuration.getInt(SERIALIZATION_MODE, SerializationMode.DEFAULT.ordinal());
        int amOrdinal = configuration.getInt(AVAILABILITY_MODE, AvailabilityMode.USE_REPLICAS.ordinal());
        SerializationMode serializationMode = SerializationMode.values()[smOrdinal];
        AvailabilityMode availabilityMode = AvailabilityMode.values()[amOrdinal];

        if (mapName == null || mapName.length() == 0 || keySerializerClass == null
                || valueSerializerClass == null) {
            throw new IOException("Input format is not configured with a valid NamedMap.");
        }//from   w  w w . j  a  va 2s . c  om

        CustomSerializer<K> keySerializer = ReflectionUtils.newInstance(keySerializerClass, configuration);
        keySerializer.setObjectClass((Class<K>) configuration.getClass(outputNamedMapKeyProperty, null));
        CustomSerializer<V> valueSerializer = ReflectionUtils.newInstance(valueSerializerClass, configuration);
        valueSerializer.setObjectClass((Class<V>) configuration.getClass(outputNamedMapValueProperty, null));
        NamedMap<K, V> namedMap = NamedMapFactory.getMap(mapName, keySerializer, valueSerializer);
        namedMap.setSerializationMode(serializationMode);
        namedMap.setAvailabilityMode(availabilityMode);
        return new NamedMapRecordWriter<K, V>(namedMap);
    } else { //This is a NamedCache
        String cacheName = configuration.get(outputNamedCacheProperty);
        if (cacheName == null || cacheName.length() == 0)
            throw new IOException("Output NamedCache not specified.");

        NamedCache cache;

        try {
            cache = CacheFactory.getCache(cacheName);
        } catch (NamedCacheException e) {
            throw new IOException("Cannot initialize NamedCache.", e);
        }

        Class valueClass = taskAttemptContext.getOutputValueClass();
        if (Writable.class.isAssignableFrom(valueClass)) {
            cache.setCustomSerialization(new WritableSerializer(valueClass));
        }

        return new NamedCacheRecordWriter<K, V>(cache);
    }
}

From source file:com.scaleoutsoftware.soss.hserver.NamedMapInputFormat.java

License:Apache License

@Override
@SuppressWarnings("unchecked")
public RecordReader<K, V> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext)
        throws IOException, InterruptedException {
    Configuration configuration = taskAttemptContext.getConfiguration();
    int mapId = configuration.getInt(inputAppIdProperty, 0);
    Class<CustomSerializer<K>> keySerializerClass = (Class<CustomSerializer<K>>) configuration
            .getClass(inputNamedMapKeySerializerProperty, null);
    Class<CustomSerializer<V>> valueSerializerClass = (Class<CustomSerializer<V>>) configuration
            .getClass(inputNamedMapValueSerializerProperty, null);

    if (mapId == 0 || keySerializerClass == null || valueSerializerClass == null) {
        throw new IOException("Input format is not configured with a valid NamedMap.");
    }//from ww w  . java 2 s  . c om

    CustomSerializer<K> keySerializer = ReflectionUtils.newInstance(keySerializerClass, configuration);
    keySerializer.setObjectClass((Class<K>) configuration.getClass(inputNamedMapKeyProperty, null));
    CustomSerializer<V> valueSerializer = ReflectionUtils.newInstance(valueSerializerClass, configuration);
    valueSerializer.setObjectClass((Class<V>) configuration.getClass(inputNamedMapValueProperty, null));
    int smOrdinal = configuration.getInt(SERIALIZATION_MODE, SerializationMode.DEFAULT.ordinal());
    SerializationMode serializationMode = SerializationMode.values()[smOrdinal];
    return new NamedMapReader<K, V>(configuration, mapId, keySerializer, valueSerializer, serializationMode);
}

From source file:com.taobao.adfs.distributed.editlogger.DistributedEditLogger.java

License:Apache License

public static DistributedEditLogger getDistributedEditLogger(Configuration conf, Object data)
        throws IOException {
    try {/*w  w w  .j  a v  a  2s . c  o m*/
        conf = (conf == null) ? new Configuration(false) : conf;
        Class<?> editLoggerClass = conf.getClass("distributed.edit.log.class.name",
                DistributedEditLoggerInMemory.class);
        Constructor<?> constructor = editLoggerClass.getConstructor(Configuration.class, Object.class);
        return (DistributedEditLogger) constructor.newInstance(conf, data);
    } catch (Throwable t) {
        throw new IOException(t);
    }
}

From source file:com.taobao.datax.plugins.common.DFSUtils.java

License:Open Source License

/**
 * Initialize handle of {@link FileSystem}.
 * //from   w w w .  ja  va 2 s .c o m
 * @param uri
 *            URI
 * 
 * @param conf
 *            {@link Configuration}
 * 
 * @return an FileSystem instance
  */

public static FileSystem createFileSystem(URI uri, Configuration conf) throws IOException {
    Class<?> clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null);
    if (clazz == null) {
        throw new IOException("No FileSystem for scheme: " + uri.getScheme());
    }
    FileSystem fs = (FileSystem) ReflectionUtils.newInstance(clazz, conf);
    fs.initialize(uri, conf);
    return fs;
}

From source file:com.thinkbiganalytics.kylo.catalog.aws.S3FileSystemProvider.java

License:Apache License

/**
 * Gets a custom credentials provider from the specified Hadoop configuration.
 *//*from w w  w.  ja va 2 s. c om*/
@Nonnull
@VisibleForTesting
Optional<AWSCredentialsProvider> getCustomCredentialsProvider(@Nonnull final URI uri,
        @Nonnull final Configuration conf) {
    return Optional.ofNullable(conf.getClass("fs.s3.customAWSCredentialsProvider", null)).map(providerClass -> {
        try {
            return S3AUtils.createAWSCredentialProvider(conf, providerClass, uri);
        } catch (final IOException e) {
            throw new IllegalArgumentException("Unable to create S3 client: " + e, e);
        }
    });
}

From source file:com.thinkbiganalytics.kylo.util.HadoopClassLoaderTest.java

License:Apache License

/**
 * Test Hadoop class loader./*from  www. j  a  v a  2  s. co m*/
 */
@Test
@SuppressWarnings({ "squid:S2095", "unchecked" })
public void test() {
    final Configuration conf = new Configuration(false);
    final HadoopClassLoader classLoader = new HadoopClassLoader(conf);

    // Test null paths
    Assert.assertFalse("Expected null jar to be ignored", classLoader.addJar(null));
    Assert.assertFalse("Expected null jars to be ignored", classLoader.addJars(null));
    Assert.assertArrayEquals(new URL[0], classLoader.getURLs());
    Assert.assertEquals(0, conf.size());

    // Test invalid path
    Assert.assertFalse("Expected invalid jar to be ignored",
            classLoader.addJar("file:/tmp/" + UUID.randomUUID()));
    Assert.assertArrayEquals(new URL[0], classLoader.getURLs());
    Assert.assertEquals(0, conf.size());

    // Test Hadoop path
    Assert.assertTrue("Expected Hadoop jar to be added", classLoader.addJar("mock:/tmp/file.ext"));
    Matcher matcher1 = withToString(CoreMatchers.equalTo("hadoop:mock:/tmp/file.ext"));
    Assert.assertThat(Arrays.asList(classLoader.getURLs()), CoreMatchers.hasItems(matcher1));

    // Test path without FileSystem services
    final String classUrl = getClass().getResource("./").toString();
    Assert.assertTrue("Expected class directory to be added", classLoader.addJar(classUrl));
    Matcher matcher2 = withToString(CoreMatchers.equalTo(classUrl));
    Assert.assertThat(Arrays.asList(classLoader.getURLs()), CoreMatchers.hasItems(matcher2));
    Assert.assertEquals(0, conf.size());

    // Test path with FileSystem services
    final String resourceUrl = getClass().getResource("/").toString();
    Assert.assertTrue("Expected resource directory to be added", classLoader.addJar(resourceUrl));
    Matcher matcher3 = withToString(CoreMatchers.equalTo(resourceUrl));
    Assert.assertThat(Arrays.asList(classLoader.getURLs()), CoreMatchers.hasItems(matcher3));
    Assert.assertEquals(MockFileSystem.class, conf.getClass("fs.mock.impl", null));

    // Test existing jar
    final int existingSize = classLoader.getURLs().length;
    Assert.assertFalse("Expected existing jar to be ignored", classLoader.addJar(resourceUrl));
    Assert.assertEquals(existingSize, classLoader.getURLs().length);
}

From source file:com.xiaoxiaomo.mr.utils.kafka.HadoopJobMapper.java

License:Apache License

@Override
protected void setup(Context context) throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    try {/*  w ww.  j  a  v  a  2s.  c om*/
        //            Class<?> serdeClass = conf.getClass(CONFIG_SERDE_CLASS, null);
        //            if (serdeClass != null) {
        //                serde = serdeClass.asSubclass(Serde.class).newInstance();
        //                log.info("Using Serde " + extractor);
        //            }
        Class<?> extractorClass = conf.getClass(CONFIG_TIMESTAMP_EXTRACTOR_CLASS, null);
        if (extractorClass != null) {
            extractor = extractorClass.asSubclass(TimestampExtractor.class).newInstance();
            log.info("Using timestamp extractor " + extractor);
        }

    } catch (Exception e) {
        throw new IOException(e);
    }
    super.setup(context);
}

From source file:edu.indiana.d2i.htrc.io.mem.MemCachedRecordWriter.java

License:Apache License

public MemCachedRecordWriter(Configuration conf) {
    // read configuration
    MAX_EXPIRE = conf.getInt(HTRCConstants.MEMCACHED_MAX_EXPIRE, -1);
    int numClients = conf.getInt(HTRCConstants.MEMCACHED_CLIENT_NUM, -1);
    String[] hostArray = conf.getStrings(HTRCConstants.MEMCACHED_HOSTS);
    List<String> hosts = Arrays.asList(hostArray);
    Class<?> writableClass = conf.getClass("mapred.output.value.class", Writable.class);

    String namespace = conf.get(MemKMeansConfig.KEY_NS);
    if (namespace != null)
        NameSpace = namespace;/*w w w.j  a v  a  2s  .co m*/

    client = ThreadedMemcachedClient.getThreadedMemcachedClient(numClients, hosts);
    transcoder = new HadoopWritableTranscoder<V>(conf, writableClass);
}