Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:mvm.rya.indexing.accumulo.ConfigUtils.java

License:Apache License

public static boolean getUseTemporal(Configuration conf) {
    return conf.getBoolean(USE_TEMPORAL, false);
}

From source file:mvm.rya.indexing.accumulo.ConfigUtils.java

License:Apache License

public static boolean getUseEntity(Configuration conf) {
    return conf.getBoolean(USE_ENTITY, false);
}

From source file:mvm.rya.indexing.accumulo.ConfigUtils.java

License:Apache License

public static boolean getUsePCJ(Configuration conf) {
    return conf.getBoolean(USE_PCJ, false);
}

From source file:mvm.rya.indexing.accumulo.ConfigUtils.java

License:Apache License

public static boolean getUseOptimalPCJ(Configuration conf) {
    return conf.getBoolean(USE_OPTIMAL_PCJ, false);
}

From source file:mvm.rya.indexing.accumulo.ConfigUtils.java

License:Apache License

public static boolean getUseMongo(Configuration conf) {
    return conf.getBoolean(USE_MONGO, false);
}

From source file:net.darkseraphim.webanalytics.hadoop.csv.CSVLineRecordReader.java

License:Apache License

/**
 * reads configuration set in the runner, setting delimiter and separator to
 * be used to process the CSV file . If isZipFile is set, creates a
 * ZipInputStream on top of the InputStream
 *
 * @param is//  w w w. ja  v a  2 s  .c  o  m
 *            - the input stream
 * @param conf
 *            - hadoop conf
 * @throws IOException
 */
public void init(InputStream is, Configuration conf) throws IOException {
    this.delimiter = conf.get(FORMAT_DELIMITER, DEFAULT_DELIMITER);
    this.separator = conf.get(FORMAT_SEPARATOR, DEFAULT_SEPARATOR);
    this.isZipFile = conf.getBoolean(IS_ZIPFILE, DEFAULT_ZIP);
    if (isZipFile) {
        @SuppressWarnings("resource")
        ZipInputStream zis = new ZipInputStream(new BufferedInputStream(is));
        if (zis.getNextEntry() == null)
            throw new IOException("No entries");
        is = zis;
    }
    this.is = is;
    this.in = new BufferedReader(new InputStreamReader(is));
}

From source file:net.java.jatextmining.lib.CoOccurrenceMapper.java

License:Apache License

@Override
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    String doc = value.toString();
    doc = net.java.jatextmining.util.Util.normalize(doc);
    EnumSet<ExtractType> pos = EnumSet.noneOf(ExtractType.class);
    if (conf.getBoolean("noun", false)) {
        pos.add(Tokenizer.ExtractType.Noun);
    }//from   w ww .  j  a va 2  s. c o  m
    if (conf.getBoolean("adj", false)) {
        pos.add(Tokenizer.ExtractType.Adj);
    }
    if (conf.getBoolean("compNoun", false)) {
        pos.add(Tokenizer.ExtractType.CompNoun);
    }
    String[] tokens = tokenizer.getToken(doc, pos);
    HashSet<String> uniqTokens = new HashSet<String>();
    for (int i = 0; i < tokens.length; i++) {
        if (tokens[i].length() < 2) {
            continue;
        }
        uniqTokens.add(tokens[i]);
        for (int j = 0; j < tokens.length; j++) {
            if (tokens[j].length() < 2) {
                continue;
            }
            if (tokens[i].equals(tokens[j])) {
                continue;
            }
            if (i != j) {
                String compoundToken = tokens[i] + "\t" + tokens[j];
                uniqTokens.add(compoundToken);
            }
        }
    }
    for (String token : uniqTokens) {
        rKey.set(token);
        context.write(rKey, RVAL);
    }
}

From source file:net.java.jatextmining.lib.CountMapper.java

License:Apache License

@Override
public void map(Object key, Text value, Context context) {
    Configuration conf = context.getConfiguration();
    String doc = value.toString();
    doc = net.java.jatextmining.util.Util.normalize(doc);
    tokenizer.extractToken(doc);// ww  w .ja  v a 2  s  .  c  o  m
    if (conf.getBoolean("df", false)) {
        TreeSet<String> uniqWords = new TreeSet<String>();
        if (conf.getBoolean("noun", false)) {
            for (String buf : tokenizer.getNoun()) {
                uniqWords.add(buf);
            }
        }
        if (conf.getBoolean("compNoun", false)) {
            for (String buf : tokenizer.getCompoundNoun()) {
                uniqWords.add(buf);
            }
        }
        if (conf.getBoolean("adj", false)) {
            for (String buf : tokenizer.getAdj()) {
                uniqWords.add(buf);
            }
        }
        for (String buf : uniqWords) {
            writeContext(context, buf);
        }
    } else {
        if (conf.getBoolean("noun", false)) {
            for (String buf : tokenizer.getNoun()) {
                writeContext(context, buf);
            }
        }
        if (conf.getBoolean("compNoun", false)) {
            for (String buf : tokenizer.getCompoundNoun()) {
                writeContext(context, buf);
            }
        }
        if (conf.getBoolean("adj", false)) {
            for (String buf : tokenizer.getAdj()) {
                writeContext(context, buf);
            }
        }
    }
}

From source file:net.java.jatextmining.lib.CountReducer.java

License:Apache License

@Override
public final void setup(Context context) {
    Configuration conf = context.getConfiguration();
    if (conf.getBoolean("weighting", false)) {
        String confPath = conf.get(CONF_PATH);
        conf.addResource(confPath);/*w  w  w. j  a  va  2  s  .  c  o  m*/
        dfMap = new LinkedHashMap<String, Double>(Integer.valueOf(conf.get("jatextmining.dfHashSize")));
        try {
            Path[] cacheFiles = DistributedCache.getLocalCacheFiles(conf);
            if (cacheFiles != null) {
                for (Path cachePath : cacheFiles) {
                    loadCacheFile(cachePath, context);
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
    countMinNum = Integer.valueOf(conf.get("jatextmining.counterMinimumLimitNum"));
}

From source file:net.java.jatextmining.lib.CountReducer.java

License:Apache License

@Override
public final void reduce(Text key, Iterable<DoubleWritable> values, Context context)
        throws IOException, InterruptedException {

    Configuration conf = context.getConfiguration();
    double sum = 0.0;
    if (conf.getBoolean("weighting", false)) {
        String stringKey = key.toString();
        if (dfMap.containsKey(stringKey)) {
            double df = dfMap.get(stringKey);
            for (DoubleWritable val : values) {
                sum += val.get() / df;
            }//from   w ww  .  j av  a  2s .c  o  m
        }
    } else {
        for (DoubleWritable val : values) {
            sum += val.get();
        }
    }
    if (sum >= countMinNum) {
        value.set(sum);
        context.write(key, value);
    }
}