Example usage for org.apache.hadoop.conf Configuration getFloat

List of usage examples for org.apache.hadoop.conf Configuration getFloat

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getFloat.

Prototype

public float getFloat(String name, float defaultValue) 

Source Link

Document

Get the value of the name property as a float.

Usage

From source file:com.ssamples.hbase.stochasticbalancer.BaseLoadBalancer.java

License:Apache License

protected void setSlop(Configuration conf) {
    this.slop = conf.getFloat("hbase.regions.slop", (float) 0.2);
    this.overallSlop = conf.getFloat("hbase.regions.overallSlop", slop);
}

From source file:com.ssamples.hbase.stochasticbalancer.StochasticLoadBalancer.java

License:Apache License

@Override
public synchronized void setConf(Configuration conf) {
    super.setConf(conf);
    maxSteps = conf.getInt(MAX_STEPS_KEY, maxSteps);
    stepsPerRegion = conf.getInt(STEPS_PER_REGION_KEY, stepsPerRegion);
    maxRunningTime = conf.getLong(MAX_RUNNING_TIME_KEY, maxRunningTime);
    runMaxSteps = conf.getBoolean(RUN_MAX_STEPS_KEY, runMaxSteps);

    numRegionLoadsToRemember = conf.getInt(KEEP_REGION_LOADS, numRegionLoadsToRemember);
    isByTable = conf.getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, isByTable);
    minCostNeedBalance = conf.getFloat(MIN_COST_NEED_BALANCE_KEY, minCostNeedBalance);
    if (localityCandidateGenerator == null) {
        localityCandidateGenerator = new LocalityBasedCandidateGenerator(services);
    }//from  ww w .j  ava2  s .co  m
    localityCost = new ServerLocalityCostFunction(conf, services);
    rackLocalityCost = new RackLocalityCostFunction(conf, services);

    if (this.candidateGenerators == null) {
        candidateGenerators = Lists.newArrayList();
        candidateGenerators.add(new RandomCandidateGenerator());
        candidateGenerators.add(new LoadCandidateGenerator());
        candidateGenerators.add(localityCandidateGenerator);
        candidateGenerators.add(new RegionReplicaRackCandidateGenerator());
    }
    regionLoadFunctions = new CostFromRegionLoadFunction[] { new ReadRequestCostFunction(conf),
            new CPRequestCostFunction(conf), new WriteRequestCostFunction(conf),
            new MemStoreSizeCostFunction(conf), new StoreFileCostFunction(conf) };
    regionReplicaHostCostFunction = new RegionReplicaHostCostFunction(conf);
    regionReplicaRackCostFunction = new RegionReplicaRackCostFunction(conf);
    costFunctions = new CostFunction[] { new RegionCountSkewCostFunction(conf),
            new PrimaryRegionCountSkewCostFunction(conf), new MoveCostFunction(conf), localityCost,
            rackLocalityCost, new TableSkewCostFunction(conf), regionReplicaHostCostFunction,
            regionReplicaRackCostFunction, regionLoadFunctions[0], regionLoadFunctions[1],
            regionLoadFunctions[2], regionLoadFunctions[3], regionLoadFunctions[4] };
    curFunctionCosts = new Double[costFunctions.length];
    tempFunctionCosts = new Double[costFunctions.length];
    LOG.info("Loaded config; maxSteps=" + maxSteps + ", stepsPerRegion=" + stepsPerRegion + ", maxRunningTime="
            + maxRunningTime + ", isByTable=" + isByTable + ", etc.");
}

From source file:com.ssamples.hbase.stochasticbalancer.StochasticLoadBalancer.java

License:Apache License

@Override
protected void setSlop(Configuration conf) {
    this.slop = conf.getFloat("hbase.regions.slop", 0.001F);
}

From source file:com.ssamples.hbase.stochasticbalancer.StochasticLoadBalancerNew.java

License:Apache License

@Override
public synchronized void setConf(Configuration conf) {
    super.setConf(conf);
    maxSteps = conf.getInt(MAX_STEPS_KEY, maxSteps);
    stepsPerRegion = conf.getInt(STEPS_PER_REGION_KEY, stepsPerRegion);
    maxRunningTime = conf.getLong(MAX_RUNNING_TIME_KEY, maxRunningTime);
    runMaxSteps = conf.getBoolean(RUN_MAX_STEPS_KEY, runMaxSteps);

    numRegionLoadsToRemember = conf.getInt(KEEP_REGION_LOADS, numRegionLoadsToRemember);
    isByTable = conf.getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, isByTable);
    minCostNeedBalance = conf.getFloat(MIN_COST_NEED_BALANCE_KEY, minCostNeedBalance);
    if (localityCandidateGenerator == null) {
        localityCandidateGenerator = new LocalityBasedCandidateGenerator(services);
    }/*from  ww w .ja v a2 s .  co m*/
    localityCost = new ServerLocalityCostFunction(conf, services);
    rackLocalityCost = new RackLocalityCostFunction(conf, services);

    if (this.candidateGenerators == null) {
        candidateGenerators = Lists.newArrayList();
        candidateGenerators.add(new RandomCandidateGenerator());
        candidateGenerators.add(new LoadCandidateGenerator());
        candidateGenerators.add(localityCandidateGenerator);
        candidateGenerators.add(new RegionReplicaRackCandidateGenerator());
    }
    regionLoadFunctions = new CostFromRegionLoadFunction[] { new ReadRequestCostFunction(conf),
            new CPRequestCostFunction(conf), new WriteRequestCostFunction(conf),
            new MemStoreSizeCostFunction(conf), new StoreFileCostFunction(conf) };
    regionReplicaHostCostFunction = new RegionReplicaHostCostFunction(conf);
    regionReplicaRackCostFunction = new RegionReplicaRackCostFunction(conf);
    costFunctions = new CostFunction[] {
            //new RegionCountSkewCostFunction(conf),
            new PrimaryRegionCountSkewCostFunction(conf), new MoveCostFunction(conf), localityCost,
            rackLocalityCost,
            //new TableSkewCostFunction(conf),
            new TableSkewCostFunctionNew(conf),
            //new TableRegionSkewCostFunction(conf),
            new ServerResourceCostFunction(conf), regionReplicaHostCostFunction, regionReplicaRackCostFunction,
            regionLoadFunctions[0], regionLoadFunctions[1], regionLoadFunctions[2], regionLoadFunctions[3],
            regionLoadFunctions[4] };
    curFunctionCosts = new Double[costFunctions.length];
    tempFunctionCosts = new Double[costFunctions.length];
    LOG.info("Loaded config; maxSteps=" + maxSteps + ", stepsPerRegion=" + stepsPerRegion + ", maxRunningTime="
            + maxRunningTime + ", isByTable=" + isByTable + ", etc.");
}

From source file:de.tudarmstadt.ukp.dkpro.bigdata.collocations.AssocReducer.java

License:Apache License

@Override
protected void setup(Context context) throws IOException, InterruptedException {
    super.setup(context);
    Configuration conf = context.getConfiguration();
    this.ngramTotal = conf.getLong(NGRAM_TOTAL, -1);
    this.minValue = conf.getFloat(MIN_VALUE, DEFAULT_MIN_VALUE);
    String assocType = conf.get(ASSOC_METRIC, DEFAULT_ASSOC);
    if (assocType.equalsIgnoreCase("llr"))
        assocCalculator = new ConcreteLLCallback();
    else if (assocType.equalsIgnoreCase("dice"))
        assocCalculator = new DiceCallback();
    else if (assocType.equalsIgnoreCase("pmi"))
        assocCalculator = new PMICallback();
    else if (assocType.equalsIgnoreCase("chi"))
        assocCalculator = new ChiSquareCallback();

    this.emitUnigrams = conf.getBoolean(CollocDriver.EMIT_UNIGRAMS, CollocDriver.DEFAULT_EMIT_UNIGRAMS);
    log.info("NGram Total: {}, Min DICE value: {}, Emit Unigrams: {}",
            new Object[] { ngramTotal, minValue, emitUnigrams });

    if (ngramTotal == -1) {
        throw new IllegalStateException("No NGRAM_TOTAL available in job config");
    }//w  ww  . j av a2  s. c om
    mos = new MultipleOutputs<Text, DoubleWritable>(context);
}

From source file:edu.indiana.d2i.htrc.skmeans.StreamingKMeansAdapter.java

License:Apache License

public StreamingKMeansAdapter(Configuration conf) {
    float cutoff = conf.getFloat(StreamingKMeansConfigKeys.CUTOFF, 0);
    int maxClusters = conf.getInt(StreamingKMeansConfigKeys.MAXCLUSTER, 0);
    final int dim = conf.getInt(StreamingKMeansConfigKeys.VECTOR_DIMENSION, 0);
    final DistanceMeasure measure = ClassUtils
            .instantiateAs(conf.get(StreamingKMeansConfigKeys.DIST_MEASUREMENT), DistanceMeasure.class);

    if (cutoff == 0 || maxClusters == 0 || dim == 0)
        throw new RuntimeException("Illegal parameters for streaming kmeans, cutoff: " + cutoff
                + ", maxClusters: " + maxClusters + ", dimension: " + dim);

    this.maxClusters = maxClusters;
    this.distanceCutoff = cutoff;
    this.centroidFactory = new StreamingKmeans.CentroidFactory() {
        @Override/*from   w  w  w  . j a v  a 2 s  .c o  m*/
        public UpdatableSearcher create() {
            // (dimension, distance obj, 0 < #projections < 100, searchSize)
            //            return new ProjectionSearch(dim, measure, 8, 20);
            return new ProjectionSearch(dim, measure, 1, 2);
            //            return new Brute(measure);
        }
    };
    this.centroids = centroidFactory.create();
}

From source file:edu.uci.ics.pregelix.example.GraphSampleUndirectedVertex.java

License:Apache License

@Override
public void configure(Configuration conf) {
    try {/* www  .j a  v a2 s  .co  m*/
        globalRate = conf.getFloat(GLOBAL_RATE, 0);
        seedInterval = (int) (1.0 / (globalRate / 100));
        if (getSuperstep() > 1) {
            LongWritable totalSelectedVertex = (LongWritable) IterationUtils.readGlobalAggregateValue(conf,
                    BspUtils.getJobId(conf), GlobalSamplingAggregator.class.getName());
            LongWritable totalVertex = (LongWritable) IterationUtils.readGlobalAggregateValue(conf,
                    BspUtils.getJobId(conf), GlobalVertexCountAggregator.class.getName());
            fillingRate = (float) totalSelectedVertex.get() / (float) totalVertex.get();
        }
    } catch (Exception e) {
        throw new IllegalStateException(e);
    }
}

From source file:edu.umn.cs.spatialHadoop.core.RTreeGridRecordWriter.java

License:Open Source License

/**
 * Initializes a new RTreeGridRecordWriter.
 * @param outDir Output path for the job
 * @param job The corresponding job//from   w w w  .ja v a  2s .c  o m
 * @param prefix A prefix to use for output files for uniqueness
 * @param cells The cells used to partition the written shapes
 * @throws IOException
 */
public RTreeGridRecordWriter(Path outDir, JobConf job, String prefix, CellInfo[] cells) throws IOException {
    super(outDir, job, prefix, cells);
    LOG.info("Writing to RTrees");

    // Determine the size of each RTree to decide when to flush a cell
    Configuration conf = fileSystem.getConf();
    this.fastRTree = conf.get(SpatialSite.RTREE_BUILD_MODE, "fast").equals("fast");
    this.maximumStorageOverhead = (int) (conf.getFloat(SpatialSite.INDEXING_OVERHEAD, 0.1f) * blockSize);
}

From source file:edu.umn.cs.spatialHadoop.indexing.Indexer.java

License:Open Source License

/**
 * Create a partitioner for a particular job
 * @param ins/*from w  ww  .  ja v  a  2  s  . com*/
 * @param out
 * @param job
 * @param partitionerName
 * @return
 * @throws IOException
 */
public static Partitioner createPartitioner(Path[] ins, Path out, Configuration job, String partitionerName)
        throws IOException {
    try {
        Partitioner partitioner;
        Class<? extends Partitioner> partitionerClass = PartitionerClasses.get(partitionerName.toLowerCase());
        if (partitionerClass == null) {
            // Try to parse the name as a class name
            try {
                partitionerClass = Class.forName(partitionerName).asSubclass(Partitioner.class);
            } catch (ClassNotFoundException e) {
                throw new RuntimeException("Unknown index type '" + partitionerName + "'");
            }
        }

        if (PartitionerReplicate.containsKey(partitionerName.toLowerCase())) {
            boolean replicate = PartitionerReplicate.get(partitionerName.toLowerCase());
            job.setBoolean("replicate", replicate);
        }
        partitioner = partitionerClass.newInstance();

        long t1 = System.currentTimeMillis();
        final Rectangle inMBR = (Rectangle) OperationsParams.getShape(job, "mbr");
        // Determine number of partitions
        long inSize = 0;
        for (Path in : ins) {
            inSize += FileUtil.getPathSize(in.getFileSystem(job), in);
        }
        long estimatedOutSize = (long) (inSize * (1.0 + job.getFloat(SpatialSite.INDEXING_OVERHEAD, 0.1f)));
        FileSystem outFS = out.getFileSystem(job);
        long outBlockSize = outFS.getDefaultBlockSize(out);

        final List<Point> sample = new ArrayList<Point>();
        float sample_ratio = job.getFloat(SpatialSite.SAMPLE_RATIO, 0.01f);
        long sample_size = job.getLong(SpatialSite.SAMPLE_SIZE, 100 * 1024 * 1024);

        LOG.info("Reading a sample of " + (int) Math.round(sample_ratio * 100) + "%");
        ResultCollector<Point> resultCollector = new ResultCollector<Point>() {
            @Override
            public void collect(Point p) {
                sample.add(p.clone());
            }
        };

        OperationsParams params2 = new OperationsParams(job);
        params2.setFloat("ratio", sample_ratio);
        params2.setLong("size", sample_size);
        if (job.get("shape") != null)
            params2.set("shape", job.get("shape"));
        if (job.get("local") != null)
            params2.set("local", job.get("local"));
        params2.setClass("outshape", Point.class, Shape.class);
        Sampler.sample(ins, resultCollector, params2);
        long t2 = System.currentTimeMillis();
        System.out.println("Total time for sampling in millis: " + (t2 - t1));
        LOG.info("Finished reading a sample of " + sample.size() + " records");

        int partitionCapacity = (int) Math.max(1,
                Math.floor((double) sample.size() * outBlockSize / estimatedOutSize));
        int numPartitions = Math.max(1, (int) Math.ceil((float) estimatedOutSize / outBlockSize));
        LOG.info("Partitioning the space into " + numPartitions + " partitions with capacity of "
                + partitionCapacity);

        partitioner.createFromPoints(inMBR, sample.toArray(new Point[sample.size()]), partitionCapacity);

        return partitioner;
    } catch (InstantiationException e) {
        e.printStackTrace();
        return null;
    } catch (IllegalAccessException e) {
        e.printStackTrace();
        return null;
    }
}

From source file:edu.umn.cs.spatialHadoop.mapred.RandomShapeGenerator.java

License:Open Source License

/**
 * Initialize from a FileSplit//  ww  w .j a  va  2  s . c o m
 * @param job
 * @param split
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public RandomShapeGenerator(Configuration job, RandomInputFormat.GeneratedSplit split) throws IOException {
    this(split.length, OperationsParams.getShape(job, "mbr").getMBR(),
            SpatialSite.getDistributionType(job, "type", DistributionType.UNIFORM), job.getInt("rectsize", 100),
            split.index + job.getLong("seed", System.currentTimeMillis()), job.getFloat("thickness", 1));
    setShape((S) SpatialSite.createStockShape(job));
}