Example usage for org.apache.hadoop.conf Configuration setInt

List of usage examples for org.apache.hadoop.conf Configuration setInt

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setInt.

Prototype

public void setInt(String name, int value) 

Source Link

Document

Set the value of the name property to an int.

Usage

From source file:com.taobao.adfs.distributed.DistributedLockerTest.java

License:Apache License

@Test
public void unlockALockWhichUnlockedByOtherThread() throws Exception {
    Configuration conf = new Configuration(false);
    conf.setInt("distributed.locker.remove.expired.lock.interval", 1);
    ReentrantReadWriteLockExtension getDataLocker = new ReentrantReadWriteLockExtension();

    DistributedLocker locker = new DistributedLocker(conf, new DistributedDataVersion(), getDataLocker);
    assertThat(locker.lock("A", Long.MAX_VALUE, Long.MAX_VALUE, 1) != null, is(true));
    assertThat(locker.unlock("A", 1) != null, is(true));
    assertThat(locker.getLock(1) == null, is(true));
    assertThat(locker.lock("B", Long.MAX_VALUE, Long.MAX_VALUE, 1) != null, is(true));
    assertThat(locker.unlock("B", 1) != null, is(true));
    assertThat(locker.getLock(1) == null, is(true));

    // case: lock->unlock===>unlock-X->lock
    conf.setInt("distributed.locker.remove.expired.lock.interval", 1);
    assertThat(locker.unlockDirectly(//from   ww  w . ja  va 2  s.co  m
            new DistributedLock(new DeepArray(2), 0, "C", -locker.version.get() - 1, "C", false)) != null,
            is(true));
    assertThat(locker.getLock(2) != null, is(true));
    assertThat(locker.getLock(2).version == -locker.version.get(), is(true));
    conf.setInt("distributed.locker.remove.expired.lock.delay.time", 0);
    locker.lock("A", Long.MAX_VALUE, Long.MAX_VALUE, 1);
    locker.unlock("A", 1);
    assertThat(locker.getLock(2) == null, is(true));
    conf.setInt("distributed.locker.remove.expired.lock.delay.time", 600000);
}

From source file:com.taobao.adfs.distributed.DistributedServer.java

License:Apache License

public static void stopServer(Configuration conf) throws Throwable {
    conf = new Configuration(conf);
    String serverName = getServerName(conf);
    conf.set("distributed.server.name", serverName);
    if (conf.getBoolean("distributed.data.format", false))
        configLogger(conf);/* www .  jav  a 2s.co m*/
    else {
        conf.set("distributed.logger.levels",
                "org.apache.zookeeper.ZooKeeper=warn,org.apache.zookeeper.ClientCnxn=warn");
        Utilities.setLoggerLevel(conf, null);
    }
    Utilities.logInfo(logger, serverName, " is stopping");

    // try to request distributed server to stop
    conf.setInt("ipc.client.connect.max.retries", 0);
    new DistributedMonitor(conf).stop();
    // kill process if distributed server is still running
    String[] includes = new String[] { serverName.replace("localhost", "127.0.0.1"), "java" };
    String[] fields = new String[] { "4", "7" };
    List<String> addressList = Utilities.getListenAddressList(includes, null, fields);
    for (String address : addressList) {
        if (address.split(",").length < 2)
            continue;
        String distributedServerPid = address.split(",")[1];
        if (distributedServerPid.split("/").length < 2)
            continue;
        distributedServerPid = distributedServerPid.split("/")[0];
        if (distributedServerPid.equals(Utilities.getPidString()))
            continue;
        Utilities.killProcess(distributedServerPid);
        Utilities.logInfo(logger, serverName, " with pid=", distributedServerPid, " is killed");
        break;
    }
    // kill all other processes which specified the data path in the command line
    String commandForKillRelativePids = "ps -ef| grep -v grep|grep " + getDataPath(conf) + "|awk '{print $2}'";
    String subProcessPids = Utilities.runCommand(commandForKillRelativePids, null, null, null).replaceAll("\n",
            ",");
    if (!subProcessPids.isEmpty()) {
        if (subProcessPids.charAt(subProcessPids.length() - 1) == ',')
            subProcessPids = subProcessPids.substring(0, subProcessPids.length() - 1);
        for (String pid : subProcessPids.split(",")) {
            if (pid.equals(Utilities.getPidString()))
                continue;
            Utilities.killProcess(pid);
            Utilities.logInfo(logger, serverName, "'s sub processes with pid=", subProcessPids, " is killed");
        }
    }
    Utilities.logInfo(logger, serverName, " has been stopped");
}

From source file:com.taobao.adfs.distributed.rpc.Client.java

License:Apache License

/**
 * set the ping interval value in configuration
 * /*  w  w  w  .jav  a 2s.  c o  m*/
 * @param conf
 *          Configuration
 * @param pingInterval
 *          the ping interval
 */
final public static void setPingInterval(Configuration conf, int pingInterval) {
    conf.setInt(PING_INTERVAL_NAME, pingInterval);
}

From source file:com.tdunning.plume.local.lazy.MapRedExecutor.java

License:Apache License

/**
 * This method returns a Job instance out of a {@link MSCR} entity. It puts the Class of 
 * the {@link PlumeWorkflow} argument and the MSCR id in the hadoop configuration.
 * /* w w  w  .  j  a v a 2 s.  c  o  m*/
 * @param mscr The MSCR to convert 
 * @param workflow The workflow whose class will be instantiated by hadoop mappers/reducers
 * @param outputPath The output path of the MapRed job
 * @return A hadoop-executable MapRed Job
 * 
 * @throws IOException
 */
static Job getMapRed(final MSCR mscr, PlumeWorkflow workFlow, String workFlowOutputPath, String outputPath)
        throws IOException {

    Configuration conf = new Configuration();
    conf.set(WORKFLOW_NAME, workFlow.getClass().getName());
    conf.setInt(MSCR_ID, mscr.getId());
    conf.set(TEMP_OUTPUT_PATH, workFlowOutputPath);

    Job job = new Job(conf, "MSCR"); // TODO deprecation

    job.setMapOutputKeyClass(PlumeObject.class);
    job.setMapOutputValueClass(PlumeObject.class);

    job.setJarByClass(MapRedExecutor.class);

    /**
     * Define multiple inputs
     */
    for (PCollection<?> input : mscr.getInputs()) {
        if (!(input instanceof LazyCollection)) {
            throw new IllegalArgumentException("Can't create MapRed from MSCR whose inputs are not LazyTable");
        }
        LazyCollection<Text> l = (LazyCollection<Text>) input;
        if (!(l.isMaterialized() && l.getFile() != null)) {
            // Collections have plume ID only if they are intermediate results - TODO better naming for this
            if (l.getPlumeId().length() < 1) {
                throw new IllegalArgumentException(
                        "Can't create MapRed from MSCR inputs that are not materialized to a file");
            }
        }
        PCollectionType<?> rType = l.getType();
        Class<? extends InputFormat> format = SequenceFileInputFormat.class;
        if (rType instanceof PTableType) {
            PTableType<?, ?> tType = (PTableType<?, ?>) rType;
            if (tType.valueType() instanceof StringType && tType.keyType() instanceof StringType) {
                format = KeyValueTextInputFormat.class;
            }
            MultipleInputs.addInputPath(job, new Path(l.getFile()), format, MSCRMapper.class);
        } else {
            if (rType.elementType() instanceof StringType) {
                format = TextInputFormat.class;
            }
            MultipleInputs.addInputPath(job, new Path(l.getFile()), format, MSCRMapper.class);
        }
    }
    /**
     * Define multiple outputs
     */
    FileOutputFormat.setOutputPath(job, new Path(outputPath));
    for (Map.Entry<PCollection<?>, Integer> entry : mscr.getNumberedChannels().entrySet()) {
        PCollectionType<?> rType = ((LazyCollection<?>) mscr.getOutputChannels().get(entry.getKey()).output)
                .getType();
        if (rType instanceof PTableType) {
            PTableType<?, ?> tType = (PTableType<?, ?>) rType;
            Class<? extends OutputFormat> outputFormat = SequenceFileOutputFormat.class;
            if (tType.keyType() instanceof StringType && tType.valueType() instanceof StringType) {
                outputFormat = TextOutputFormat.class;
            }
            MultipleOutputs.addNamedOutput(job, entry.getValue() + "", outputFormat,
                    getHadoopType(tType.keyType()), getHadoopType(tType.valueType()));
        } else {
            Class<? extends OutputFormat> outputFormat = SequenceFileOutputFormat.class;
            if (rType.elementType() instanceof StringType) {
                outputFormat = TextOutputFormat.class;
            }
            MultipleOutputs.addNamedOutput(job, entry.getValue() + "", outputFormat, NullWritable.class,
                    getHadoopType(rType.elementType()));
        }
    }
    /**
     * Define Reducer & Combiner
     */
    job.setCombinerClass(MSCRCombiner.class);
    job.setReducerClass(MSCRReducer.class);

    job.setNumReduceTasks(1);
    return job;
}

From source file:com.teradata.benchto.generator.HiveTypesGenerator.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    Options options = new Options();
    options.addOption(/*from   w  w  w.  j  av  a 2 s .c o m*/
            Option.builder("format").required().hasArg().desc("file format (orc, parquet or text)").build());
    options.addOption(Option.builder("type").required().hasArg().desc(
            "hive type to be generated (bigint, int, boolean, double, binary, date, timestamp, string, decimal or varchar)")
            .build());
    options.addOption(Option.builder("rows").required().hasArg().desc("total row count").build());
    options.addOption(Option.builder("mappers").required().hasArg().desc("total mappers count").build());
    options.addOption(Option.builder("path").hasArg()
            .desc("base path for generating files, default is: /benchmarks/benchto/types").build());
    options.addOption(Option.builder("regex").numberOfArgs(3)
            .desc("generate varchars from regex pattern, arguments are: pattern, min length, max length")
            .build());

    CommandLine line;
    String format;
    String hiveType;
    long numberOfRows;
    long numberOfFiles;
    String basePath;
    Optional<String> regexPattern = Optional.absent();
    Optional<Integer> regexMinLength = Optional.absent();
    Optional<Integer> regexMaxLength = Optional.absent();
    try {
        line = new DefaultParser().parse(options, args);
        format = line.getOptionValue("format");
        hiveType = line.getOptionValue("type");
        numberOfRows = parseLong(line.getOptionValue("rows"));
        numberOfFiles = parseLong(line.getOptionValue("mappers"));
        basePath = line.getOptionValue("path", "/benchmarks/benchto/types");
        if (line.hasOption("regex")) {
            String[] values = line.getOptionValues("regex");
            regexPattern = Optional.of(values[0]);
            regexMinLength = Optional.of(parseInt(values[1]));
            regexMaxLength = Optional.of(parseInt(values[2]));
        }
    } catch (Exception e) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("benchto-generator", options);
        throw e;
    }

    String jobName = format("GenerateData-%s-%s-%d", format, hiveType, numberOfRows);
    Path outputDir = new Path(format("%s/%s-%s/%d", basePath, format, hiveType, numberOfRows));
    Class<? extends OutputFormat> outputFormatClass = getOutputFormatClass(format);

    LOG.info("Generating " + numberOfRows + " " + hiveType + "s, directory: " + outputDir
            + ", number of files: " + numberOfFiles);

    Configuration configuration = new Configuration();
    configuration.set(FORMAT_PROPERTY_NAME, format);
    configuration.set(HIVE_TYPE_PROPERTY_NAME, hiveType);
    configuration.setLong(NUM_ROWS_PROPERTY_NAME, numberOfRows);
    configuration.setLong(NUM_MAPS, numberOfFiles);
    if (regexPattern.isPresent()) {
        configuration.set(REGEX_PATTERN, regexPattern.get());
        configuration.setInt(REGEX_MIN_LENGTH, regexMinLength.get());
        configuration.setInt(REGEX_MAX_LENGTH, regexMaxLength.get());
    }

    Job generatorJob = Job.getInstance(configuration, jobName);
    FileOutputFormat.setOutputPath(generatorJob, outputDir);
    ParquetOutputFormat.setWriteSupportClass(generatorJob, DataWritableWriteSupport.class);
    generatorJob.setJarByClass(HiveTypesGenerator.class);
    generatorJob.setMapperClass(HiveTypesMapper.class);
    generatorJob.setNumReduceTasks(0);
    generatorJob.setOutputKeyClass(NullWritable.class);
    generatorJob.setOutputValueClass(Writable.class);
    generatorJob.setInputFormatClass(CounterInputFormat.class);
    generatorJob.setOutputFormatClass(outputFormatClass);

    return generatorJob.waitForCompletion(true) ? 0 : 1;
}

From source file:com.toddbodnar.simpleHive.subQueries.colStats.java

@Override
public void writeConfig(Configuration conf) {
    conf.setInt("SIMPLE_HIVE.COLSTATS.COLUMN", statsCol);
    conf.setInt("SIMPLE_HIVE.COLSTATS.GROUPBY", groupByCol);
    conf.set("SIMPLE_HIVE.COLSTATS.INPUT_SEPERATOR",
            controlCharacterConverter.convertToReadable(getInput().getSeperator()));
}

From source file:com.toddbodnar.simpleHive.subQueries.join.java

@Override
public void writeConfig(Configuration conf) {
    conf.set("SIMPLE_HIVE.JOIN.INPUT_SEPERATOR.1",
            controlCharacterConverter.convertToReadable(getInput().getSeperator()));
    conf.setInt("SIMPLE_HIVE.JOIN.KEY.1", mainKey);

    conf.set("SIMPLE_HIVE.JOIN.INPUT_SEPERATOR.2",
            controlCharacterConverter.convertToReadable(getOtherInput().getSeperator()));
    conf.setInt("SIMPLE_HIVE.JOIN.KEY.2", otherKey);
}

From source file:com.tomslabs.grid.avro.AvroWordCount.java

License:Apache License

public static Job createSubmitableJob(final Configuration conf, final Path inputPath, final Path outputPath)
        throws IOException {

    conf.set(AvroFileOutputFormat.OUTPUT_SCHEMA, WordCountSchema.getSchema().toString());

    conf.setInt("mapred.max.split.size", 1024000);
    conf.setInt("mapred.reduce.tasks", 10);
    conf.setBoolean("mapred.reduce.tasks.speculative.execution", true);
    final Job job = new Job(conf, "Word Count");
    job.setJarByClass(AvroWordCount.class);

    job.setInputFormatClass(AvroFileInputFormat.class);

    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);
    job.setMapperClass(WordCountMapper.class);

    job.setReducerClass(WordCountReducer.class);

    job.setOutputKeyClass(GenericRecord.class);
    job.setOutputValueClass(NullWritable.class);
    job.setOutputFormatClass(AvroFileOutputFormat.class);
    AvroFileOutputFormat.setDeflateLevel(job, 3);

    FileInputFormat.addInputPath(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);

    return job;//from   w w  w  .jav a2 s  .co m
}

From source file:com.trendmicro.hdfs.webdav.Main.java

License:Apache License

public static void main(String[] args) {

    HDFSWebDAVServlet servlet = HDFSWebDAVServlet.getServlet();
    Configuration conf = servlet.getConfiguration();

    // Process command line 

    Options options = new Options();
    options.addOption("d", "debug", false, "Enable debug logging");
    options.addOption("p", "port", true, "Port to bind to [default: 8080]");
    options.addOption("b", "bind-address", true, "Address or hostname to bind to [default: 0.0.0.0]");
    options.addOption("g", "ganglia", true, "Send Ganglia metrics to host:port [default: none]");

    CommandLine cmd = null;//from w ww . j a  va2s.c o m
    try {
        cmd = new PosixParser().parse(options, args);
    } catch (ParseException e) {
        printUsageAndExit(options, -1);
    }

    if (cmd.hasOption('d')) {
        Logger rootLogger = Logger.getLogger("com.trendmicro");
        rootLogger.setLevel(Level.DEBUG);
    }

    if (cmd.hasOption('b')) {
        conf.set("hadoop.webdav.bind.address", cmd.getOptionValue('b'));
    }

    if (cmd.hasOption('p')) {
        conf.setInt("hadoop.webdav.port", Integer.valueOf(cmd.getOptionValue('p')));
    }

    String gangliaHost = null;
    int gangliaPort = 8649;
    if (cmd.hasOption('g')) {
        String val = cmd.getOptionValue('g');
        if (val.indexOf(':') != -1) {
            String[] split = val.split(":");
            gangliaHost = split[0];
            gangliaPort = Integer.valueOf(split[1]);
        } else {
            gangliaHost = val;
        }
    }

    InetSocketAddress addr = getAddress(conf);

    // Log in the server principal from keytab

    UserGroupInformation.setConfiguration(conf);
    if (UserGroupInformation.isSecurityEnabled())
        try {
            SecurityUtil.login(conf, "hadoop.webdav.server.kerberos.keytab",
                    "hadoop.webdav.server.kerberos.principal", addr.getHostName());
        } catch (IOException e) {
            LOG.fatal("Could not log in", e);
            System.err.println("Could not log in");
            System.exit(-1);
        }

    // Set up embedded Jetty

    Server server = new Server();

    server.setSendServerVersion(false);
    server.setSendDateHeader(false);
    server.setStopAtShutdown(true);

    // Set up connector
    Connector connector = new SelectChannelConnector();
    connector.setPort(addr.getPort());
    connector.setHost(addr.getHostName());
    server.addConnector(connector);
    LOG.info("Listening on " + addr);

    // Set up context
    Context context = new Context(server, "/", Context.SESSIONS);
    // WebDAV servlet
    ServletHolder servletHolder = new ServletHolder(servlet);
    servletHolder.setInitParameter("authenticate-header", "Basic realm=\"Hadoop WebDAV Server\"");
    context.addServlet(servletHolder, "/*");
    // metrics instrumentation filter
    context.addFilter(new FilterHolder(new DefaultWebappMetricsFilter()), "/*", 0);
    // auth filter
    context.addFilter(new FilterHolder(new AuthFilter(conf)), "/*", 0);
    server.setHandler(context);

    // Set up Ganglia metrics reporting
    if (gangliaHost != null) {
        GangliaReporter.enable(1, TimeUnit.MINUTES, gangliaHost, gangliaPort);
    }

    // Start and join the server thread    
    try {
        server.start();
        server.join();
    } catch (Exception e) {
        LOG.fatal("Failed to start Jetty", e);
        System.err.println("Failed to start Jetty");
        System.exit(-1);
    }
}

From source file:com.tuplejump.calliope.hadoop.ConfigHelper.java

License:Apache License

public static void setThriftFramedTransportSizeInMb(Configuration conf, int frameSizeInMB) {
    conf.setInt(THRIFT_FRAMED_TRANSPORT_SIZE_IN_MB, frameSizeInMB);
}