Example usage for org.apache.hadoop.conf Configuration get

List of usage examples for org.apache.hadoop.conf Configuration get

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration get.

Prototype

public String get(String name) 

Source Link

Document

Get the value of the name property, null if no such property exists.

Usage

From source file:com.aliyun.fs.oss.utils.ResourceLoader.java

License:Apache License

private static List<URL> geClassLoaderURLs(Configuration conf) throws Exception {
    String dependPath = conf.get("fs.oss.core.dependency.path");
    String[] sdkDeps = null;//from w  w w  .  ja  v  a  2 s .c o m
    Boolean runLocal = conf.getBoolean("mapreduce.job.run-local", false);
    if ((dependPath == null || dependPath.isEmpty()) && !runLocal) {
        throw new RuntimeException(
                "Job dose not run locally, set " + "\"fs.oss.core.dependency.path\" first please.");
    } else if (dependPath == null || dependPath.isEmpty()) {
        LOG.info("\"mapreduce.job.run-local\" set true.");
    } else {
        sdkDeps = dependPath.split(",");
    }

    ArrayList<URL> urls = new ArrayList<URL>();
    if (sdkDeps != null) {
        for (String dep : sdkDeps) {
            urls.add(new URL("file://" + dep));
        }
    }
    String[] cp;
    if (conf.getBoolean("mapreduce.job.run-local", false)) {
        if (SystemUtils.IS_OS_WINDOWS) {
            cp = System.getProperty("java.class.path").split(";");

        } else {
            cp = System.getProperty("java.class.path").split(":");
        }
        for (String entity : cp) {
            urls.add(new URL("file:" + entity));
        }
    }

    return urls;
}

From source file:com.aliyun.fs.utils.OssInputUtils.java

License:Apache License

public RecordReader<LongWritable, Text> getOssRecordReader(FileSplit fileSplit, Configuration conf)
        throws IOException {
    String delimiter = conf.get("textinputformat.record.delimiter");
    byte[] recordDelimiterBytes = null;
    if (null != delimiter) {
        recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
    }// ww  w  .j a v  a  2 s .c o m

    if (fs == null) {
        this.fs = FileSystem.get(fileSplit.getPath().toUri(), conf);
        fs.initialize(fileSplit.getPath().toUri(), conf);
    }

    return new OssRecordReader(conf, fileSplit, fs, recordDelimiterBytes);
}

From source file:com.aliyun.odps.fs.VolumeFileSystem.java

License:Apache License

private String getHomeVolume(Configuration conf) {
    String defaultVolume = conf.get(VolumeFileSystemConfigKeys.ODPS_HOME_VOLMUE);
    if (defaultVolume != null) {
        return defaultVolume;
    } else {//  w  ww .j  a v a2s .co m
        return VolumeFSConstants.DEFAULT_HOME_VOLUME;
    }
}

From source file:com.aliyun.odps.fs.VolumeFileSystem.java

License:Apache License

private VolumeFSClient createVolumeClient(Configuration conf) throws IOException {
    String accessId = conf.get(VolumeFileSystemConfigKeys.ODPS_ACCESS_ID);
    String accessKey = conf.get(VolumeFileSystemConfigKeys.ODPS_ACCESS_KEY);
    if (accessId == null || accessKey == null) {
        throw new IOException("Incomplete config, no accessId or accessKey");
    }/* w ww.  j  a  v a2  s .co m*/
    String serviceEndpoint = conf.get(VolumeFileSystemConfigKeys.ODPS_SERVICE_ENDPOINT);
    String tunnelEndpoint = conf.get(VolumeFileSystemConfigKeys.ODPS_TUNNEL_ENDPOINT);
    if (serviceEndpoint == null) {
        throw new IOException("Incomplete config, no " + VolumeFileSystemConfigKeys.ODPS_SERVICE_ENDPOINT);
    }
    Account account = new AliyunAccount(accessId, accessKey);
    Odps odps = new Odps(account);
    odps.setEndpoint(serviceEndpoint);
    return new VolumeFSClient(odps, project, serviceEndpoint, tunnelEndpoint, conf);
}

From source file:com.aliyun.openservices.tablestore.hadoop.TableStore.java

License:Apache License

/**
 * for internal use only//from   w ww.  j  a v  a 2 s.c  o  m
 */
public static SyncClientInterface newOtsClient(Configuration conf) {
    Credential cred = Credential.deserialize(conf.get(TableStore.CREDENTIAL));
    Endpoint ep = Endpoint.deserialize(conf.get(TableStore.ENDPOINT));
    ClientConfiguration clientCfg = new ClientConfiguration();
    clientCfg.setRetryStrategy(new DefaultRetryStrategy(10, TimeUnit.SECONDS));
    if (cred.securityToken == null) {
        return new SyncClient(ep.endpoint, cred.accessKeyId, cred.accessKeySecret, ep.instance, clientCfg);
    } else {
        return new SyncClient(ep.endpoint, cred.accessKeyId, cred.accessKeySecret, ep.instance, clientCfg,
                cred.securityToken);
    }
}

From source file:com.aliyun.openservices.tablestore.hadoop.TableStoreInputFormat.java

License:Apache License

/**
 * Add a RangeRowQueryCriteria object as data source.
 *//*www.  j ava  2 s .  c  o  m*/
public static void addCriteria(Configuration conf, RangeRowQueryCriteria criteria) {
    Preconditions.checkNotNull(criteria, "criteria must be nonnull");
    Preconditions.checkArgument(criteria.getDirection() == Direction.FORWARD, "criteria must be forward");
    String cur = conf.get(CRITERIA);
    MultiCriteria cri = null;
    if (cur == null) {
        cri = new MultiCriteria();
    } else {
        cri = MultiCriteria.deserialize(cur);
    }
    cri.addCriteria(criteria);
    conf.set(CRITERIA, cri.serialize());
}

From source file:com.aliyun.openservices.tablestore.hadoop.TableStoreInputFormat.java

License:Apache License

private static List<RangeRowQueryCriteria> getScans(Configuration conf) {
    String cur = conf.get(CRITERIA);
    MultiCriteria cri = null;//from   w  w w.jav  a2s.  c o  m
    if (cur == null) {
        cri = new MultiCriteria();
    } else {
        cri = MultiCriteria.deserialize(cur);
    }
    List<RangeRowQueryCriteria> scans = cri.getCriteria();

    return scans;
}

From source file:com.aliyun.openservices.tablestore.hadoop.TableStoreOutputFormat.java

License:Apache License

public static void checkTable(Configuration conf) throws IOException {
    String outputTable = conf.get(OUTPUT_TABLE);
    Preconditions.checkNotNull(outputTable, "Output table must be nonnull.");
    SyncClientInterface ots = TableStore.newOtsClient(conf);
    try {/*  w w w  .j  a  v a  2 s . com*/
        // test existence of the output table
        ots.describeTable(new DescribeTableRequest(outputTable));
        logger.info("{} exists", outputTable);
    } catch (TableStoreException ex) {
        logger.error("{} does not exist, or it is unaccessible.", outputTable);
        logger.error("{}", ex);
        throw new IOException("output table is unaccessible.");
    } finally {
        ots.shutdown();
    }
}

From source file:com.aliyun.openservices.tablestore.hadoop.TableStoreOutputFormat.java

License:Apache License

@Override
public RecordWriter<Writable, BatchWriteWritable> getRecordWriter(TaskAttemptContext context)
        throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    String outputTable = conf.get(OUTPUT_TABLE);
    Preconditions.checkNotNull(outputTable, "Output table must be set.");
    SyncClientInterface ots = TableStore.newOtsClient(conf);
    int maxBatchSize = conf.getInt(MAX_UPDATE_BATCH_SIZE, 0);
    if (maxBatchSize == 0) {
        return new TableStoreRecordWriter(ots, outputTable);
    } else {/*ww w. j  a va 2  s .com*/
        return new TableStoreRecordWriter(ots, outputTable, maxBatchSize);
    }
}

From source file:com.aliyun.openservices.tablestore.hadoop.TableStoreRecordReader.java

License:Apache License

/**
 * for internal usage only/*ww w . j av a 2s.co m*/
 */
public void initialize(InputSplit split, Configuration cfg) {
    close();

    Credential cred;
    {
        String in = cfg.get(TableStore.CREDENTIAL);
        Preconditions.checkNotNull(in, "Must set \"TABLESTORE_CREDENTIAL\"");
        cred = Credential.deserialize(in);
    }
    Endpoint ep;
    {
        String in = cfg.get(TableStore.ENDPOINT);
        Preconditions.checkNotNull(in, "Must set \"TABLESTORE_ENDPOINT\"");
        ep = Endpoint.deserialize(in);
    }
    if (cred.securityToken == null) {
        ots = new SyncClient(ep.endpoint, cred.accessKeyId, cred.accessKeySecret, ep.instance);
    } else {
        ots = new SyncClient(ep.endpoint, cred.accessKeyId, cred.accessKeySecret, ep.instance,
                cred.securityToken);
    }

    TableStoreInputSplit tsSplit = (TableStoreInputSplit) split;
    scan = tsSplit.getRangeRowQueryCriteria();
    logger.info("table: {} columns-to-get: {} start: {} end: {}", scan.getTableName(), scan.getColumnsToGet(),
            scan.getInclusiveStartPrimaryKey().toString(), scan.getExclusiveEndPrimaryKey().toString());
    results = ots.createRangeIterator(new RangeIteratorParameter(scan));
}