Example usage for org.apache.hadoop.conf Configuration setDouble

List of usage examples for org.apache.hadoop.conf Configuration setDouble

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setDouble.

Prototype

public void setDouble(String name, double value) 

Source Link

Document

Set the value of the name property to a double.

Usage

From source file:edu.iu.daal_optimization_solvers.SGDMomentOptDenseBatch.SGDMOMOptDaalLauncher.java

License:Apache License

/**
 * Launches all the tasks in order.// w  w w . ja  va2  s .co  m
 */
@Override
public int run(String[] args) throws Exception {

    /* Put shared libraries into the distributed cache */
    Configuration conf = this.getConf();

    Initialize init = new Initialize(conf, args);

    /* Put shared libraries into the distributed cache */
    init.loadDistributedLibs();

    // load args
    init.loadSysArgs();

    conf.setInt(HarpDAALConstants.FILE_DIM, Integer.parseInt(args[init.getSysArgNum()]));
    conf.setInt(HarpDAALConstants.FEATURE_DIM, Integer.parseInt(args[init.getSysArgNum() + 1]));
    conf.setInt(HarpDAALConstants.BATCH_SIZE, Integer.parseInt(args[init.getSysArgNum() + 2]));
    conf.setDouble(HarpDAALConstants.ACC_THRESHOLD, Double.parseDouble(args[init.getSysArgNum() + 3]));
    conf.setDouble(HarpDAALConstants.LEARNING_RATE, Double.parseDouble(args[init.getSysArgNum() + 4]));

    // launch job
    System.out.println("Starting Job");
    long perJobSubmitTime = System.currentTimeMillis();
    System.out.println(
            "Start Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));

    Job sgdmomOptJob = init.createJob("sgdmomOptJob", SGDMOMOptDaalLauncher.class,
            SGDMOMOptDaalCollectiveMapper.class);

    // finish job
    boolean jobSuccess = sgdmomOptJob.waitForCompletion(true);
    System.out.println(
            "End Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));
    System.out.println(
            "| Job#" + " Finished in " + (System.currentTimeMillis() - perJobSubmitTime) + " miliseconds |");
    if (!jobSuccess) {
        sgdmomOptJob.killJob();
        System.out.println("sgdmomOptJob failed");
    }

    return 0;
}

From source file:edu.iu.daal_sgd.SGDDaalLauncher.java

License:Apache License

/**
 * Launches SGD workers./*from  ww w .j a v a 2  s  .  c o m*/
 */
@Override
public int run(String[] args) throws Exception {

    /* Put shared libraries into the distributed cache */
    Configuration conf = this.getConf();

    Initialize init = new Initialize(conf, args);

    /* Put shared libraries into the distributed cache */
    init.loadDistributedLibs();
    init.loadDistributedLibsExp();
    // load args
    init.loadSysArgs();

    //load app args
    conf.setInt(HarpDAALConstants.FILE_DIM, Integer.parseInt(args[init.getSysArgNum()]));
    conf.setInt(HarpDAALConstants.FEATURE_DIM, Integer.parseInt(args[init.getSysArgNum() + 1]));
    conf.setDouble(Constants.LAMBDA, Double.parseDouble(args[init.getSysArgNum() + 2]));
    conf.setDouble(Constants.EPSILON, Double.parseDouble(args[init.getSysArgNum() + 3]));
    conf.setBoolean(Constants.ENABLE_TUNING, Boolean.parseBoolean(args[init.getSysArgNum() + 4]));
    conf.set(HarpDAALConstants.TEST_FILE_PATH, args[init.getSysArgNum() + 5]);

    // launch job
    System.out.println("Starting Job");
    long perJobSubmitTime = System.currentTimeMillis();
    System.out.println(
            "Start Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));

    Job mfsgdJob = init.createJob("mfsgdJob", SGDDaalLauncher.class, SGDDaalCollectiveMapper.class);

    // finish job
    boolean jobSuccess = mfsgdJob.waitForCompletion(true);
    System.out.println(
            "End Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));
    System.out.println(
            "| Job#" + " Finished in " + (System.currentTimeMillis() - perJobSubmitTime) + " miliseconds |");
    if (!jobSuccess) {
        mfsgdJob.killJob();
        System.out.println("mfsgdJob failed");
    }

    return 0;
}

From source file:edu.iu.lda.LDALauncher.java

License:Apache License

private Job configureLDAJob(Path docDir, int numTopics, double alpha, double beta, int numIterations,
        int minBound, int maxBound, int numMapTasks, int numThreadsPerWorker, double scheduleRatio, int mem,
        boolean printModel, Path modelDir, Path outputDir, Configuration configuration, int jobID)
        throws IOException, URISyntaxException {
    configuration.setInt(Constants.NUM_TOPICS, numTopics);
    configuration.setDouble(Constants.ALPHA, alpha);
    configuration.setDouble(Constants.BETA, beta);
    configuration.setInt(Constants.NUM_ITERATIONS, numIterations);
    configuration.setInt(Constants.MIN_BOUND, minBound);
    configuration.setInt(Constants.MAX_BOUND, maxBound);
    configuration.setInt(Constants.NUM_THREADS, numThreadsPerWorker);
    configuration.setDouble(Constants.SCHEDULE_RATIO, scheduleRatio);
    System.out.println("Model Dir Path: " + modelDir.toString());
    configuration.set(Constants.MODEL_DIR, modelDir.toString());
    configuration.setBoolean(Constants.PRINT_MODEL, printModel);
    Job job = Job.getInstance(configuration, "lda_job_" + jobID);
    JobConf jobConf = (JobConf) job.getConfiguration();

    jobConf.set("mapreduce.framework.name", "map-collective");
    // mapreduce.map.collective.memory.mb
    // 125000/*from w  ww . java2s.c o  m*/
    jobConf.setInt("mapreduce.map.collective.memory.mb", mem);
    // mapreduce.map.collective.java.opts
    // -Xmx120000m -Xms120000m
    int xmx = (mem - 5000) > (mem * 0.9) ? (mem - 5000) : (int) Math.ceil(mem * 0.9);
    int xmn = (int) Math.ceil(0.25 * xmx);
    jobConf.set("mapreduce.map.collective.java.opts",
            "-Xmx" + xmx + "m -Xms" + xmx + "m" + " -Xmn" + xmn + "m");
    jobConf.setNumMapTasks(numMapTasks);
    jobConf.setInt("mapreduce.job.max.split.locations", 10000);
    FileInputFormat.setInputPaths(job, docDir);
    FileOutputFormat.setOutputPath(job, outputDir);
    job.setInputFormatClass(MultiFileInputFormat.class);
    job.setJarByClass(LDALauncher.class);
    job.setMapperClass(LDAMPCollectiveMapper.class);
    job.setNumReduceTasks(0);
    return job;
}

From source file:edu.iu.sgd.SGDLauncher.java

License:Apache License

private Job configureSGDJob(Path inputDir, int r, double lambda, double epsilon, int numIterations,
        int trainRatio, int numMapTasks, int numThreadsPerWorker, double scheduleRatio, int mem, Path modelDir,
        Path outputDir, String testFilePath, Configuration configuration, int jobID)
        throws IOException, URISyntaxException {
    configuration.setInt(Constants.R, r);
    configuration.setDouble(Constants.LAMBDA, lambda);
    configuration.setDouble(Constants.EPSILON, epsilon);
    configuration.setInt(Constants.NUM_ITERATIONS, numIterations);
    configuration.setInt(Constants.TRAIN_RATIO, trainRatio);
    configuration.setInt(Constants.NUM_THREADS, numThreadsPerWorker);
    configuration.setDouble(Constants.SCHEDULE_RATIO, scheduleRatio);
    configuration.set(Constants.MODEL_DIR, modelDir.toString());
    configuration.set(Constants.TEST_FILE_PATH, testFilePath);
    Job job = Job.getInstance(configuration, "sgd_job_" + jobID);
    JobConf jobConf = (JobConf) job.getConfiguration();
    jobConf.set("mapreduce.framework.name", "map-collective");
    // mapreduce.map.collective.memory.mb
    // 125000/*  www .j  ava  2 s.  c  om*/
    jobConf.setInt("mapreduce.map.collective.memory.mb", mem);
    // mapreduce.map.collective.java.opts
    // -Xmx120000m -Xms120000m
    int xmx = (mem - 5000) > (mem * 0.9) ? (mem - 5000) : (int) Math.ceil(mem * 0.9);
    int xmn = (int) Math.ceil(0.25 * xmx);
    jobConf.set("mapreduce.map.collective.java.opts",
            "-Xmx" + xmx + "m -Xms" + xmx + "m" + " -Xmn" + xmn + "m");
    jobConf.setNumMapTasks(numMapTasks);
    jobConf.setInt("mapreduce.job.max.split.locations", 10000);
    FileInputFormat.setInputPaths(job, inputDir);
    FileOutputFormat.setOutputPath(job, outputDir);
    job.setInputFormatClass(MultiFileInputFormat.class);
    job.setJarByClass(SGDLauncher.class);
    job.setMapperClass(SGDCollectiveMapper.class);
    job.setNumReduceTasks(0);
    return job;
}

From source file:edu.iu.wdamds.MDSLauncher.java

License:Apache License

private Job prepareWDAMDSJob(int numMapTasks, Path dataDirPath, Path xFilePath, Path xOutFilePath,
        Path outDirPath, String idsFile, String labelsFile, double threshold, int d, double alpha, int n,
        int cgIter, int numThreads)
        throws IOException, URISyntaxException, InterruptedException, ClassNotFoundException {
    Job job = Job.getInstance(getConf(), "map-collective-wdamds");
    Configuration jobConfig = job.getConfiguration();
    FileInputFormat.setInputPaths(job, dataDirPath);
    FileOutputFormat.setOutputPath(job, outDirPath);
    jobConfig.setInt(MDSConstants.NUM_MAPS, numMapTasks);
    // Load from HDFS
    // Now we ignore and don't read x file from
    // HDFS/* ww w  .  j  a v  a2 s  .  com*/
    jobConfig.set(MDSConstants.X_FILE_PATH, xFilePath.toString());
    // Output to HDFS
    jobConfig.set(MDSConstants.X_OUT_FILE_PATH, xOutFilePath.toString());
    // Load from shared file system
    jobConfig.set(MDSConstants.IDS_FILE, idsFile);
    // Load from shared file system
    jobConfig.set(MDSConstants.LABELS_FILE, labelsFile);
    jobConfig.setDouble(MDSConstants.THRESHOLD, threshold);
    jobConfig.setInt(MDSConstants.D, d);
    jobConfig.setDouble(MDSConstants.ALPHA, alpha);
    jobConfig.setInt(MDSConstants.N, n);
    jobConfig.setInt(MDSConstants.CG_ITER, cgIter);
    jobConfig.setInt(MDSConstants.NUM_THREADS, numThreads);
    // input class to file-based class
    job.setInputFormatClass(SingleFileInputFormat.class);
    job.setJarByClass(MDSLauncher.class);
    job.setMapperClass(WDAMDSMapper.class);
    // When use MultiFileInputFormat, remember to
    // set the number of map tasks
    org.apache.hadoop.mapred.JobConf jobConf = (JobConf) job.getConfiguration();
    jobConf.set("mapreduce.framework.name", "map-collective");
    jobConf.setNumMapTasks(numMapTasks);
    job.setNumReduceTasks(0);
    return job;
}

From source file:org.apache.apex.engine.YarnAppLauncherImpl.java

License:Apache License

private void setConfiguration(Configuration conf, String property, Object value) {
    if (value instanceof Integer) {
        conf.setInt(property, (Integer) value);
    } else if (value instanceof Boolean) {
        conf.setBoolean(property, (Boolean) value);
    } else if (value instanceof Long) {
        conf.setLong(property, (Long) value);
    } else if (value instanceof Float) {
        conf.setFloat(property, (Float) value);
    } else if (value instanceof Double) {
        conf.setDouble(property, (Double) value);
    } else {/*  www .  j ava2 s.  co  m*/
        conf.set(property, value.toString());
    }
}

From source file:org.apache.lens.server.api.driver.TestWeightedCostSelector.java

License:Apache License

private TestDriverAllocation testDriverSelector(double r1, double r2, double r3, double r4)
        throws LensException {

    List<LensDriver> drivers = new ArrayList<LensDriver>();
    Map<LensDriver, String> driverQueries = new HashMap<LensDriver, String>();

    Configuration conf1 = new Configuration();
    Configuration conf2 = new Configuration();
    Configuration conf3 = new Configuration();
    Configuration conf4 = new Configuration();

    //Creating drivers and setting driver ratios
    MockDriver d1 = new MockDriver();
    d1.configure(conf1, null, null);//from w w  w. java  2  s.  c o m
    if (r1 > 0) {
        conf1.setDouble(DRIVER_WEIGHT, r1);
    }

    MockDriver d2 = new MockDriver();
    d2.configure(conf2, null, null);
    if (r2 > 0) {
        conf2.setDouble(DRIVER_WEIGHT, r2);
    }

    MockDriver d3 = new MockDriver();
    d3.configure(conf3, null, null);
    if (r3 > 0) {
        conf3.setDouble(DRIVER_WEIGHT, r3);
    }

    MockDriver d4 = new MockDriver();
    d4.configure(conf4, null, null);
    if (r4 > 0) {
        conf4.setDouble(DRIVER_WEIGHT, r4);
    }

    drivers.add(d1);
    drivers.add(d2);
    drivers.add(d3);
    drivers.add(d4);

    String query = "test query";
    driverQueries.put(d1, query);
    driverQueries.put(d2, query);
    driverQueries.put(d3, query);
    driverQueries.put(d4, query);

    MockQueryContext ctx = createMockContext(query, conf, qconf, driverQueries);
    ctx.setDriverCost(d4, queryCost); //Increasing driver 4's query cost

    return driverLoop(ctx, d1, d2, d3, d4);
}

From source file:org.apache.lens.server.api.driver.TestWeightedCostSelector.java

License:Apache License

@Test
public void testDifferentWeights() throws LensException {

    int r1 = 10;//ww w .  j  a v  a 2  s . c  o  m
    int r2 = 90;

    List<LensDriver> drivers = new ArrayList<LensDriver>();
    Map<LensDriver, String> driverQueries = new HashMap<LensDriver, String>();

    Configuration conf1 = new Configuration();
    Configuration conf2 = new Configuration();

    //Creating drivers and setting driver ratios
    MockDriver d1 = new MockDriver();
    conf1.setDouble(DRIVER_WEIGHT, r1);
    d1.configure(conf1, null, null);

    MockDriver d2 = new MockDriver();
    conf2.setDouble(DRIVER_WEIGHT, r2);
    d2.configure(conf2, null, null);

    drivers.add(d1);
    drivers.add(d2);
    String query = "test query";
    driverQueries.put(d1, query);
    driverQueries.put(d2, query);

    MockQueryContext ctx = createMockContext(query, conf, qconf, driverQueries);

    ctx.setDriverCost(d2, queryCost);

    LensDriver selected = selector.select(ctx, conf);

    Assert.assertEquals(selected, d1,
            "TEST Different Weights: Improper driver allocation. Check " + "WeightedQueryCostDriverSelector.");
}

From source file:org.apache.tez.common.TestTezUtils.java

License:Apache License

private Configuration getConf() {
    Configuration conf = new Configuration(false);
    conf.set("test1", "value1");
    conf.setBoolean("test2", true);
    conf.setDouble("test3", 1.2345);
    conf.setInt("test4", 34567);
    conf.setLong("test5", 1234567890L);
    conf.setStrings("test6", "S1", "S2", "S3");
    return conf;/*from  w ww  . j  a va 2  s . c om*/
}

From source file:org.apache.tez.dag.history.ats.acls.TestATSHistoryV15.java

License:Apache License

@BeforeClass
public static void setup() throws IOException {
    try {/*from  w w w. j  a  va2  s . co  m*/
        conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
        dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).racks(null).build();
        remoteFs = dfsCluster.getFileSystem();
    } catch (IOException io) {
        throw new RuntimeException("problem starting mini dfs cluster", io);
    }

    if (mrrTezCluster == null) {
        try {
            mrrTezCluster = new MiniTezClusterWithTimeline(TestATSHistoryV15.class.getName(), 1, 1, 1, true);
            Configuration conf = new Configuration();
            conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
            conf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS
            conf.setInt("yarn.nodemanager.delete.debug-delay-sec", 20000);
            atsActivePath = new Path("/tmp/ats/active/" + random.nextInt(100000));
            Path atsDonePath = new Path("/tmp/ats/done/" + random.nextInt(100000));
            conf.setDouble(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5);

            remoteFs.mkdirs(atsActivePath);
            remoteFs.mkdirs(atsDonePath);

            conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
            conf.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR,
                    remoteFs.resolvePath(atsActivePath).toString());
            conf.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR,
                    remoteFs.resolvePath(atsDonePath).toString());

            mrrTezCluster.init(conf);
            mrrTezCluster.start();
        } catch (Throwable e) {
            LOG.info("Failed to start Mini Tez Cluster", e);
        }
    }
    user = UserGroupInformation.getCurrentUser().getShortUserName();
    timelineAddress = mrrTezCluster.getConfig().get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS);
    if (timelineAddress != null) {
        // Hack to handle bug in MiniYARNCluster handling of webapp address
        timelineAddress = timelineAddress.replace("0.0.0.0", "localhost");
    }
}