Example usage for org.apache.hadoop.conf Configuration setInt

List of usage examples for org.apache.hadoop.conf Configuration setInt

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setInt.

Prototype

public void setInt(String name, int value) 

Source Link

Document

Set the value of the name property to an int.

Usage

From source file:com.alibaba.wasp.client.FConnectionManager.java

License:Apache License

/**
 * Set the number of retries to use serverside when trying to communicate with
 * another server over {@link com.alibaba.wasp.client.FConnection}. Used
 * updating catalog tables, etc. Call this method before we create any
 * Connections.//from w  ww .ja  v a2  s .c  o  m
 * 
 * @param c
 *          The Configuration instance to set the retries into.
 * @param log
 *          Used to log what we set in here.
 */
public static void setServerSideFConnectionRetries(final Configuration c, final Log log) {
    int fcRetries = c.getInt(FConstants.WASP_CLIENT_RETRIES_NUMBER,
            FConstants.DEFAULT_WASP_CLIENT_RETRIES_NUMBER);
    // Go big. Multiply by 10. If we can't get to meta after this many retries
    // then something seriously wrong.
    int serversideMultiplier = c.getInt("wasp.client.serverside.retries.multiplier", 10);
    int retries = fcRetries * serversideMultiplier;
    c.setInt(FConstants.WASP_CLIENT_RETRIES_NUMBER, retries);
    log.debug("Set serverside FConnection retries=" + retries);
}

From source file:com.alibaba.wasp.client.WaspAdmin.java

License:Apache License

/**
 * Check to see if Wasp is running. Throw an exception if not. We consider
 * that Wasp is running if ZooKeeper and Master are running.
 *
 * @param conf/*from  www.  j a  va2  s. c  om*/
 *          system configuration
 * @throws com.alibaba.wasp.MasterNotRunningException
 *           if the master is not running
 * @throws com.alibaba.wasp.ZooKeeperConnectionException
 *           if unable to connect to zookeeper
 */
public static void checkWaspAvailable(Configuration conf)
        throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException {
    Configuration copyOfConf = WaspConfiguration.create(conf);

    // We set it to make it fail as soon as possible if Wasp is not available
    copyOfConf.setInt("wasp.client.retries.number", 1);
    copyOfConf.setInt("zookeeper.recovery.retry", 0);

    FConnectionManager.FConnectionImplementation connection = (FConnectionManager.FConnectionImplementation) FConnectionManager
            .getConnection(copyOfConf);

    try {
        // Check ZK first.
        // If the connection exists, we may have a connection to ZK that does
        // not work anymore
        ZooKeeperWatcher zkw = null;
        try {
            zkw = connection.getZooKeeperWatcher();
            zkw.getRecoverableZooKeeper().getZooKeeper().exists(zkw.baseZNode, false);

        } catch (IOException e) {
            throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
        } catch (KeeperException e) {
            throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
        } finally {
            if (zkw != null) {
                zkw.close();
            }
        }

        // Check Master
        connection.isMasterRunning();

    } finally {
        connection.close();
    }
}

From source file:com.alibaba.wasp.master.FMasterCommandLine.java

License:Apache License

private int stopMaster() {
    WaspAdmin adm = null;/*www.ja v  a  2 s. com*/
    try {
        Configuration conf = getConf();
        // Don't try more than once
        conf.setInt("wasp.client.retries.number", 1);
        adm = new WaspAdmin(getConf());
    } catch (MasterNotRunningException e) {
        LOG.error("Master not running");
        return -1;
    } catch (ZooKeeperConnectionException e) {
        LOG.error("ZooKeeper not available");
        return -1;
    }
    try {
        adm.shutdown();
    } catch (Throwable t) {
        LOG.error("Failed to stop master", t);
        return -1;
    } finally {
        if (adm != null) {
            try {
                adm.close();
            } catch (IOException e) {
                LOG.error("Failed to close admin.");
                return -1;
            }
        }
    }
    return 0;
}

From source file:com.alibaba.wasp.ZNodeClearer.java

License:Apache License

/**
 * Delete the master znode if its content (ServerName string) is the same as
 * the one in the znode file. (env: HBASE_ZNODE_FILE).
 * /* www . ja v  a 2 s.  c om*/
 * @return true on successful deletion, false otherwise.
 */
public static boolean clear(Configuration conf) {
    Configuration tempConf = new Configuration(conf);
    tempConf.setInt("zookeeper.recovery.retry", 0);

    ZooKeeperWatcher zkw;
    try {
        zkw = new ZooKeeperWatcher(tempConf, "clean znode for master", new Abortable() {
            @Override
            public void abort(String why, Throwable e) {
            }

            @Override
            public boolean isAborted() {
                return false;
            }
        });
    } catch (IOException e) {
        LOG.warn("Can't connect to zookeeper to read the master znode", e);
        return false;
    }

    String znodeFileContent;
    try {
        znodeFileContent = ZNodeClearer.readMyEphemeralNodeOnDisk();
    } catch (IOException e) {
        LOG.warn("Can't read the content of the znode file", e);
        return false;
    }

    return MasterAddressTracker.deleteIfEquals(zkw, znodeFileContent);
}

From source file:com.aliyun.openservices.tablestore.hadoop.TableStoreOutputFormat.java

License:Apache License

/**
 * Set max batch size for BatchWriteRow requests to TableStore.
 * This is optional./*w  w  w.  ja  va2  s .co m*/
 */
public static void setMaxBatchSize(Configuration conf, int maxBatchSize) {
    Preconditions.checkArgument(maxBatchSize > 0, "maxBatchsize must be greater than 0.");
    conf.setInt(MAX_UPDATE_BATCH_SIZE, maxBatchSize);
}

From source file:com.avira.couchdoop.demo.BenchmarkUpdater.java

License:Apache License

public Job configureJob(Configuration conf, String input) throws IOException {
    conf.setInt("mapreduce.map.failures.maxpercent", 5);
    conf.setInt("mapred.max.map.failures.percent", 5);
    conf.setInt("mapred.max.tracker.failures", 20);

    Job job = Job.getInstance(conf);//from  ww  w .j  a  v  a2  s . c  om
    job.setJarByClass(BenchmarkUpdater.class);

    // User classpath takes precedence in favor of Hadoop classpath.
    // This is because the Couchbase client requires a newer version of
    // org.apache.httpcomponents:httpcore.
    job.setUserClassesTakesPrecedence(true);

    // Input
    FileInputFormat.setInputPaths(job, input);

    // Mapper
    job.setMapperClass(BenchmarkUpdateMapper.class);
    job.setMapOutputKeyClass(String.class);
    job.setMapOutputValueClass(CouchbaseAction.class);

    // Reducer
    job.setNumReduceTasks(0);

    // Output
    job.setOutputFormatClass(CouchbaseOutputFormat.class);
    job.setMapOutputKeyClass(String.class);
    job.setMapOutputValueClass(CouchbaseAction.class);

    return job;
}

From source file:com.avira.couchdoop.jobs.CouchbaseExporter.java

License:Apache License

public Job configureJob(Configuration conf, String input) throws IOException {
    conf.setInt("mapreduce.map.failures.maxpercent", 5);
    conf.setInt("mapred.max.map.failures.percent", 5);
    conf.setInt("mapred.max.tracker.failures", 20);

    Job job = Job.getInstance(conf);// w ww. j  a v  a 2s.c om
    job.setJarByClass(CouchbaseExporter.class);

    // Input
    FileInputFormat.setInputPaths(job, input);

    // Mapper
    job.setMapperClass(CsvToCouchbaseMapper.class);
    job.setMapOutputKeyClass(String.class);
    job.setMapOutputValueClass(CouchbaseAction.class);

    // Reducer
    job.setNumReduceTasks(0);

    // Output
    job.setOutputFormatClass(CouchbaseOutputFormat.class);
    job.setOutputKeyClass(String.class);
    job.setOutputValueClass(CouchbaseAction.class);

    return job;
}

From source file:com.avira.couchdoop.jobs.CouchbaseViewImporter.java

License:Apache License

public Job configureJob(Configuration conf, String output) throws IOException {
    conf.setInt("mapreduce.map.failures.maxpercent", 5);
    conf.setInt("mapred.max.map.failures.percent", 5);
    conf.setInt("mapred.max.tracker.failures", 20);

    Job job = Job.getInstance(conf);/*from   w w  w .  j  a v a  2s  .  c  o  m*/
    job.setJarByClass(CouchbaseViewImporter.class);

    // Input
    job.setInputFormatClass(CouchbaseViewInputFormat.class);

    // Mapper
    job.setMapperClass(CouchbaseViewToFileMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);

    // Reducer
    job.setNumReduceTasks(0);

    // Output
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);
    FileOutputFormat.setOutputPath(job, new Path(output));

    return job;
}

From source file:com.avira.couchdoop.jobs.CouchbaseViewToHBaseImporter.java

License:Apache License

public Job configureJob(Configuration conf, String outputTable) throws IOException {
    conf.setInt("mapreduce.map.failures.maxpercent", 5);
    conf.setInt("mapred.max.map.failures.percent", 5);
    conf.setInt("mapred.max.tracker.failures", 20);

    Job job = Job.getInstance(conf);/* w w  w . jav a 2  s .co  m*/
    job.setJarByClass(CouchbaseViewToHBaseImporter.class);

    // Input
    job.setInputFormatClass(CouchbaseViewInputFormat.class);

    // Mapper
    job.setMapperClass(CouchbaseViewToHBaseMapper.class);

    // Reducer
    job.setNumReduceTasks(0);

    // Output
    TableMapReduceUtil.initTableReducerJob(outputTable, IdentityTableReducer.class, job);

    return job;
}

From source file:com.bark.hadoop.lab3.PageRank.java

@Override
public int run(String args[]) {
    String tmp = "/tmp/" + new Date().getTime();
    //        long timeStamp = new Date().getTime();
    try {/*from   ww w.  j  a  v  a 2  s .c  o m*/
        /**
         * Job 1: Parse XML input and read title,links
         */
        Configuration conf = new Configuration();
        conf.set("xmlinput.start", "<page>");
        conf.set("xmlinput.end", "</page>");

        Job job = Job.getInstance(conf);
        job.setJarByClass(PageRank.class);

        // specify a mapper
        job.setMapperClass(RedLinkMapper.class);

        // specify a reducer
        job.setReducerClass(RedLinkReducer.class);

        // specify output types
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        // specify input and output DIRECTORIES
        FileInputFormat.addInputPath(job, new Path(args[0]));
        job.setInputFormatClass(XmlInputFormat.class);

        FileOutputFormat.setOutputPath(job, new Path((args[1] + tmp + "/job1")));
        job.setOutputFormatClass(TextOutputFormat.class);

        job.waitForCompletion(true);
    } catch (InterruptedException | ClassNotFoundException | IOException ex) {
        Logger.getLogger(PageRank.class.getName()).log(Level.SEVERE, ex.toString(), ex);
        System.err.println("Error during mapreduce job1.");
        return 2;
    }
    /**
     * Job 2: Adjacency outGraph
     */
    try {
        Configuration conf2 = new Configuration();

        Job job2 = Job.getInstance(conf2);
        job2.setJarByClass(PageRank.class);

        // specify a mapper
        job2.setMapperClass(AdjMapper.class);

        // specify a reducer
        job2.setReducerClass(AdjReducer.class);

        // specify output types
        job2.setOutputKeyClass(Text.class);
        job2.setOutputValueClass(Text.class);

        // specify input and output DIRECTORIES
        FileInputFormat.addInputPath(job2, new Path((args[1] + tmp + "/job1")));
        job2.setInputFormatClass(TextInputFormat.class);

        FileOutputFormat.setOutputPath(job2, new Path((args[1] + tmp + "/job2")));
        job2.setOutputFormatClass(TextOutputFormat.class);

        job2.waitForCompletion(true);
    } catch (InterruptedException | ClassNotFoundException | IOException ex) {
        Logger.getLogger(PageRank.class.getName()).log(Level.SEVERE, ex.toString(), ex);
        System.err.println("Error during mapreduce job2.");
        return 2;
    }
    /**
     * Job 3: PageCount
     */
    try {
        Configuration conf3 = new Configuration();
        /**
         * Change output separator to "=" instead of default \t for this job
         */
        conf3.set("mapreduce.output.textoutputformat.separator", "=");

        Job job3 = Job.getInstance(conf3);
        job3.setJarByClass(PageRank.class);

        // specify a mapper
        job3.setMapperClass(PageCountMapper.class);

        // specify a reducer
        job3.setReducerClass(PageCountReducer.class);

        // specify output types
        job3.setOutputKeyClass(Text.class);
        job3.setOutputValueClass(IntWritable.class);

        // specify input and output DIRECTORIES
        FileInputFormat.addInputPath(job3, new Path((args[1] + tmp + "/job2")));
        job3.setInputFormatClass(TextInputFormat.class);

        FileOutputFormat.setOutputPath(job3, new Path((args[1] + tmp + "/job3")));
        job3.setOutputFormatClass(TextOutputFormat.class);

        job3.waitForCompletion(true);
    } catch (InterruptedException | ClassNotFoundException | IOException ex) {
        Logger.getLogger(PageRank.class.getName()).log(Level.SEVERE, ex.toString(), ex);
        System.err.println("Error during mapreduce job3.");
        return 2;
    }
    /**
     * Job 4: PageRank
     */
    for (int i = 1; i < 9; i++) {
        try {
            Configuration conf4 = new Configuration();
            /**
             * Read number of nodes from the output of job 3 : pageCount
             */
            Path path = new Path((args[1] + tmp + "/job3"));
            FileSystem fs = path.getFileSystem(conf4);
            RemoteIterator<LocatedFileStatus> ri = fs.listFiles(path, true);

            int n = 0;
            Pattern pt = Pattern.compile("(\\d+)");
            while (ri.hasNext()) {
                LocatedFileStatus lfs = ri.next();
                if (lfs.isFile() && n == 0) {
                    FSDataInputStream inputStream = fs.open(lfs.getPath());
                    BufferedReader br = new BufferedReader(new InputStreamReader(inputStream));
                    String s = null;
                    while ((s = br.readLine()) != null) {
                        Matcher mt = pt.matcher(s);
                        if (mt.find()) {
                            n = new Integer(mt.group(1));
                            break;
                        }
                    }
                }
            }
            /**
             * Done reading number of nodes, make it available to MapReduce
             * job key: N
             */
            conf4.setInt("N", n);

            Job job4 = Job.getInstance(conf4);
            job4.setJarByClass(PageRank.class);

            // specify a mapper
            job4.setMapperClass(PageRankMapper.class);

            // specify a reducer
            job4.setReducerClass(PageRankReducer.class);

            // specify output types
            job4.setOutputKeyClass(Text.class);
            job4.setOutputValueClass(Text.class);

            // specify input and output DIRECTORIES
            if (i == 1) {
                FileInputFormat.addInputPath(job4, new Path((args[1] + tmp + "/job2")));
            } else {
                FileInputFormat.addInputPath(job4, new Path((args[1] + tmp + "/job4/" + (i - 1))));
            }
            job4.setInputFormatClass(TextInputFormat.class);

            FileOutputFormat.setOutputPath(job4, new Path((args[1] + tmp + "/job4/" + i)));
            job4.setOutputFormatClass(TextOutputFormat.class);
            job4.waitForCompletion(true);
        } catch (InterruptedException | ClassNotFoundException | IOException ex) {
            Logger.getLogger(PageRank.class.getName()).log(Level.SEVERE, ex.toString(), ex);
            System.err.println("Error during mapreduce job4.");
            return 2;
        }
    }
    /**
     * Job 5: Sort iteration 1 and iteration 8
     */
    int returnCode = 0;
    for (int i = 0; i < 2; i++) {
        try {
            Configuration conf5 = new Configuration();

            /**
             * Read number of nodes from the output of job 3 : pageCount
             */
            Path path = new Path((args[1] + tmp + "/job3"));
            FileSystem fs = path.getFileSystem(conf5);
            RemoteIterator<LocatedFileStatus> ri = fs.listFiles(path, true);

            int n = 0;
            Pattern pt = Pattern.compile("(\\d+)");
            while (ri.hasNext()) {
                LocatedFileStatus lfs = ri.next();
                if (lfs.isFile() && n == 0) {
                    FSDataInputStream inputStream = fs.open(lfs.getPath());
                    BufferedReader br = new BufferedReader(new InputStreamReader(inputStream));
                    String s = null;
                    while ((s = br.readLine()) != null) {
                        Matcher mt = pt.matcher(s);
                        if (mt.find()) {
                            n = new Integer(mt.group(1));
                            break;
                        }
                    }
                }
            }
            /**
             * Done reading number of nodes, make it available to MapReduce
             * job key: N
             */
            conf5.setInt("N", n);

            Job job5 = Job.getInstance(conf5);
            /**
             * one reducer only
             */
            job5.setNumReduceTasks(1);
            job5.setSortComparatorClass(MyWritableComparator.class);
            job5.setJarByClass(PageRank.class);

            // specify a mapper
            job5.setMapperClass(SortMapper.class);
            job5.setMapOutputKeyClass(DoubleWritable.class);
            job5.setMapOutputValueClass(Text.class);

            // specify a reducer
            job5.setReducerClass(SortReducer.class);

            // specify output types
            job5.setOutputKeyClass(Text.class);
            job5.setOutputValueClass(DoubleWritable.class);

            // specify input and output DIRECTORIES
            int y = 7 * i + 1;
            FileInputFormat.addInputPath(job5, new Path((args[1] + tmp + "/job4/" + y)));
            job5.setInputFormatClass(TextInputFormat.class);

            FileOutputFormat.setOutputPath(job5, new Path((args[1] + tmp + "/job5/" + y)));
            job5.setOutputFormatClass(TextOutputFormat.class);

            returnCode = job5.waitForCompletion(true) ? 0 : 1;
        } catch (InterruptedException | ClassNotFoundException | IOException ex) {
            Logger.getLogger(PageRank.class.getName()).log(Level.SEVERE, ex.toString(), ex);
            System.err.println("Error during mapreduce job5.");
            return 2;
        }
    }
    /**
     * Copy necessary output files to args[1]        /**
     * Copy necessary output files to args[1]
     */

    /**
     * Rename and copy OutLinkGraph
     */
    try {
        Configuration conf = new Configuration();

        Path outLinkGraph = new Path((args[1] + tmp + "/job2/part-r-00000"));
        FileSystem outLinkGraphFS = outLinkGraph.getFileSystem(conf);

        Path output = new Path(args[1] + "/results/PageRank.outlink.out");
        FileSystem outputFS = output.getFileSystem(conf);
        org.apache.hadoop.fs.FileUtil.copy(outLinkGraphFS, outLinkGraph, outputFS, output, false, true, conf);
    } catch (IOException ex) {
        Logger.getLogger(PageRank.class.getName()).log(Level.SEVERE, ex.toString(), ex);
        System.err.println("Error while copying results.");
        return 2;
    }

    /**
     * Rename and copy total number of pages
     */
    try {
        Configuration conf = new Configuration();

        Path outLinkGraph = new Path((args[1] + tmp + "/job3/part-r-00000"));
        FileSystem outLinkGraphFS = outLinkGraph.getFileSystem(conf);

        Path output = new Path(args[1] + "/results/PageRank.n.out");
        FileSystem outputFS = output.getFileSystem(conf);
        org.apache.hadoop.fs.FileUtil.copy(outLinkGraphFS, outLinkGraph, outputFS, output, false, true, conf);
    } catch (IOException ex) {
        Logger.getLogger(PageRank.class.getName()).log(Level.SEVERE, ex.toString(), ex);
        System.err.println("Error while copying results.");
        return 2;
    }

    /**
     * Rename and copy iteration 1
     */
    try {
        Configuration conf = new Configuration();

        Path outLinkGraph = new Path((args[1] + tmp + "/job5/1/part-r-00000"));
        FileSystem outLinkGraphFS = outLinkGraph.getFileSystem(conf);

        Path output = new Path(args[1] + "/results/PageRank.iter1.out");
        FileSystem outputFS = output.getFileSystem(conf);
        org.apache.hadoop.fs.FileUtil.copy(outLinkGraphFS, outLinkGraph, outputFS, output, false, true, conf);
    } catch (IOException ex) {
        Logger.getLogger(PageRank.class.getName()).log(Level.SEVERE, ex.toString(), ex);
        System.err.println("Error while copying results.");
        return 2;
    }

    /**
     * Rename and copy iteration 8
     */
    try {
        Configuration conf = new Configuration();

        Path outLinkGraph = new Path((args[1] + tmp + "/job5/8/part-r-00000"));
        FileSystem outLinkGraphFS = outLinkGraph.getFileSystem(conf);

        Path output = new Path(args[1] + "/results/PageRank.iter8.out");
        FileSystem outputFS = output.getFileSystem(conf);
        org.apache.hadoop.fs.FileUtil.copy(outLinkGraphFS, outLinkGraph, outputFS, output, false, true, conf);
    } catch (IOException ex) {
        Logger.getLogger(PageRank.class.getName()).log(Level.SEVERE, ex.toString(), ex);
        System.err.println("Error while copying results.");
        return 2;
    }
    return returnCode;
}