List of usage examples for org.apache.hadoop.conf Configuration getClass
public <U> Class<? extends U> getClass(String name, Class<? extends U> defaultValue, Class<U> xface)
name
property as a Class
implementing the interface specified by xface
. From source file:org.apache.parquet.avro.AvroReadSupport.java
License:Apache License
private GenericData getDataModel(Configuration conf) { if (model != null) { return model; }//from w w w. j ava2 s .c om Class<? extends AvroDataSupplier> suppClass = conf.getClass(AVRO_DATA_SUPPLIER, SpecificDataSupplier.class, AvroDataSupplier.class); return ReflectionUtils.newInstance(suppClass, conf).get(); }
From source file:org.apache.parquet.avro.AvroWriteSupport.java
License:Apache License
private static GenericData getDataModel(Configuration conf) { Class<? extends AvroDataSupplier> suppClass = conf.getClass(AVRO_DATA_SUPPLIER, SpecificDataSupplier.class, AvroDataSupplier.class); return ReflectionUtils.newInstance(suppClass, conf).get(); }
From source file:org.apache.parquet.avro.DruidParquetAvroReadSupport.java
License:Apache License
@Override public RecordMaterializer<GenericRecord> prepareForRead(Configuration configuration, Map<String, String> keyValueMetaData, MessageType fileSchema, ReadContext readContext) { // coercing this value to false by default here to be friendlier default behavior // see https://github.com/apache/incubator-druid/issues/5433#issuecomment-388539306 String jobProp = "parquet.avro.add-list-element-records"; Boolean explicitlySet = configuration.getBoolean(jobProp, false); if (!explicitlySet) { configuration.setBoolean(jobProp, false); }/* ww w . j av a 2 s . c o m*/ MessageType parquetSchema = readContext.getRequestedSchema(); Schema avroSchema = new AvroSchemaConverter(configuration).convert(parquetSchema); Class<? extends AvroDataSupplier> suppClass = configuration.getClass(AVRO_DATA_SUPPLIER, SpecificDataSupplier.class, AvroDataSupplier.class); AvroDataSupplier supplier = ReflectionUtils.newInstance(suppClass, configuration); return new AvroRecordMaterializer<>(parquetSchema, avroSchema, supplier.get()); }
From source file:org.apache.parquet.avro.DruidParquetReadSupport.java
License:Apache License
@Override public RecordMaterializer<GenericRecord> prepareForRead(Configuration configuration, Map<String, String> keyValueMetaData, MessageType fileSchema, ReadContext readContext) { MessageType parquetSchema = readContext.getRequestedSchema(); Schema avroSchema = new AvroSchemaConverter(configuration).convert(parquetSchema); Class<? extends AvroDataSupplier> suppClass = configuration.getClass(AVRO_DATA_SUPPLIER, SpecificDataSupplier.class, AvroDataSupplier.class); AvroDataSupplier supplier = ReflectionUtils.newInstance(suppClass, configuration); return new AvroRecordMaterializer<GenericRecord>(parquetSchema, avroSchema, supplier.get()); }
From source file:org.apache.parquet.proto.ProtoWriteSupport.java
License:Apache License
@Override public WriteContext init(Configuration configuration) { // if no protobuf descriptor was given in constructor, load descriptor from configuration (set with setProtobufClass) if (protoMessage == null) { Class<? extends Message> pbClass = configuration.getClass(PB_CLASS_WRITE, null, Message.class); if (pbClass != null) { protoMessage = pbClass;//from w ww . j a va 2 s . c om } else { String msg = "Protocol buffer class not specified."; String hint = " Please use method ProtoParquetOutputFormat.setProtobufClass(...) or other similar method."; throw new BadConfigurationException(msg + hint); } } MessageType rootSchema = new ProtoSchemaConverter().convert(protoMessage); Descriptors.Descriptor messageDescriptor = Protobufs.getMessageDescriptor(protoMessage); validatedMapping(messageDescriptor, rootSchema); this.messageWriter = new MessageWriter(messageDescriptor, rootSchema); Map<String, String> extraMetaData = new HashMap<String, String>(); extraMetaData.put(ProtoReadSupport.PB_CLASS, protoMessage.getName()); extraMetaData.put(ProtoReadSupport.PB_DESCRIPTOR, serializeDescriptor(protoMessage)); return new WriteContext(rootSchema, extraMetaData); }
From source file:org.apache.phoenix.hbase.index.builder.BaseIndexBuilder.java
License:Apache License
@Override public void setup(RegionCoprocessorEnvironment env) throws IOException { this.env = env; // setup the phoenix codec. Generally, this will just be in standard one, but abstracting here // so we can use it later when generalizing covered indexes Configuration conf = env.getConfiguration(); Class<? extends IndexCodec> codecClass = conf.getClass(CODEC_CLASS_NAME_KEY, null, IndexCodec.class); try {//from www. j a v a 2s . co m Constructor<? extends IndexCodec> meth = codecClass.getDeclaredConstructor(new Class[0]); meth.setAccessible(true); this.codec = meth.newInstance(); this.codec.initialize(env); } catch (IOException e) { throw e; } catch (Exception e) { throw new IOException(e); } }
From source file:org.apache.phoenix.hbase.index.builder.IndexBuildManager.java
License:Apache License
private static IndexBuilder getIndexBuilder(RegionCoprocessorEnvironment e) throws IOException { Configuration conf = e.getConfiguration(); Class<? extends IndexBuilder> builderClass = conf.getClass(Indexer.INDEX_BUILDER_CONF_KEY, null, IndexBuilder.class); try {/*from w w w.j av a 2s. c o m*/ IndexBuilder builder = builderClass.newInstance(); builder.setup(e); return builder; } catch (InstantiationException e1) { throw new IOException("Couldn't instantiate index builder:" + builderClass + ", disabling indexing on table " + e.getRegion().getTableDesc().getNameAsString()); } catch (IllegalAccessException e1) { throw new IOException("Couldn't instantiate index builder:" + builderClass + ", disabling indexing on table " + e.getRegion().getTableDesc().getNameAsString()); } }
From source file:org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder.java
License:Apache License
@Override public void setup(RegionCoprocessorEnvironment env) throws IOException { this.env = env; // setup the phoenix codec. Generally, this will just be in standard one, but abstracting here // so we can use it later when generalizing covered indexes Configuration conf = env.getConfiguration(); Class<? extends IndexCodec> codecClass = conf.getClass(CODEC_CLASS_NAME_KEY, null, IndexCodec.class); try {/* w w w .ja v a 2s .c o m*/ Constructor<? extends IndexCodec> meth = codecClass.getDeclaredConstructor(new Class[0]); meth.setAccessible(true); this.codec = meth.newInstance(); this.codec.initialize(env); } catch (IOException e) { throw e; } catch (Exception e) { throw new IOException(e); } this.localTable = new LocalTable(env); }
From source file:org.apache.phoenix.hbase.index.write.IndexWriter.java
License:Apache License
public static IndexCommitter getCommitter(RegionCoprocessorEnvironment env, Class<? extends IndexCommitter> defaultClass) throws IOException { Configuration conf = env.getConfiguration(); try {//from w ww .j a v a 2 s .c o m IndexCommitter committer = conf.getClass(INDEX_COMMITTER_CONF_KEY, defaultClass, IndexCommitter.class) .newInstance(); return committer; } catch (InstantiationException e) { throw new IOException(e); } catch (IllegalAccessException e) { throw new IOException(e); } }
From source file:org.apache.phoenix.hbase.index.write.IndexWriter.java
License:Apache License
public static IndexFailurePolicy getFailurePolicy(RegionCoprocessorEnvironment env) throws IOException { Configuration conf = env.getConfiguration(); try {//from w ww . j a va 2 s . com IndexFailurePolicy committer = conf.getClass(INDEX_FAILURE_POLICY_CONF_KEY, KillServerOnFailurePolicy.class, IndexFailurePolicy.class).newInstance(); return committer; } catch (InstantiationException e) { throw new IOException(e); } catch (IllegalAccessException e) { throw new IOException(e); } }