Example usage for org.apache.hadoop.conf Configuration setInt

List of usage examples for org.apache.hadoop.conf Configuration setInt

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setInt.

Prototype

public void setInt(String name, int value) 

Source Link

Document

Set the value of the name property to an int.

Usage

From source file:com.twitter.hraven.hadoopJobMonitor.AppStatusCheckerTest.java

License:Apache License

@Test
public void testLongJob() throws IOException, ConfigurationAccessException, YarnException {
    Configuration remoteAppConf = new Configuration();
    remoteAppConf.setInt(HadoopJobMonitorConfiguration.JOB_MAX_LEN_MIN, 10);
    remoteAppConf.setBoolean(// w  w  w.ja v a  2  s  . c  o m
            HadoopJobMonitorConfiguration.enforced(HadoopJobMonitorConfiguration.JOB_MAX_LEN_MIN), true);
    when(appReport.getStartTime()).thenReturn(now - 15 * MIN);
    vConf.setBoolean(HadoopJobMonitorConfiguration.DRY_RUN, false);

    AppConfiguraiton appConf = new AppConfiguraiton(remoteAppConf, vConf);
    AppConfCache.getInstance().put(appId, appConf);
    appStatusChecker.init();

    boolean res = appStatusChecker.checkApp();
    Assert.assertFalse("does not fail job duration check even though enforce is set", res);
    verify(rm, times(1)).killApplication(appId);
}

From source file:com.twitter.hraven.hadoopJobMonitor.AppStatusCheckerTest.java

License:Apache License

@Test
public void testShortJob() throws IOException, ConfigurationAccessException {
    Configuration remoteAppConf = new Configuration();
    remoteAppConf.setInt(HadoopJobMonitorConfiguration.JOB_MAX_LEN_MIN, 10);
    remoteAppConf.setBoolean(// w  w  w .j ava2s.c  om
            HadoopJobMonitorConfiguration.enforced(HadoopJobMonitorConfiguration.JOB_MAX_LEN_MIN), true);
    when(appReport.getStartTime()).thenReturn(now - 5 * MIN);

    AppConfiguraiton appConf = new AppConfiguraiton(remoteAppConf, vConf);
    AppConfCache.getInstance().put(appId, appConf);
    appStatusChecker.init();

    boolean res = appStatusChecker.checkApp();
    Assert.assertTrue("fails job duration check even though the job is not too long", res);
}

From source file:com.uber.hoodie.common.table.log.HoodieLogFormatAppendFailureTest.java

License:Apache License

@BeforeClass
public static void setUpClass() throws IOException {
    // NOTE : The MiniClusterDFS leaves behind the directory under which the cluster was created
    baseDir = new File("/tmp/" + UUID.randomUUID().toString());
    FileUtil.fullyDelete(baseDir);/*from   w ww  .j  av a2s .  c  o m*/
    // Append is not supported in LocalFileSystem. HDFS needs to be setup.
    Configuration conf = new Configuration();
    // lower heartbeat interval for fast recognition of DN
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
    cluster = new MiniDFSCluster.Builder(conf).checkExitOnShutdown(true).numDataNodes(4).build();
}

From source file:com.uber.hoodie.hive.util.HiveTestService.java

License:Apache License

private HiveConf configureHive(Configuration conf, String localHiveLocation) throws IOException {
    conf.set("hive.metastore.local", "false");
    conf.set(HiveConf.ConfVars.METASTOREURIS.varname, "thrift://" + bindIP + ":" + metastorePort);
    conf.set(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST.varname, bindIP);
    conf.setInt(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT.varname, serverPort);
    // The following line to turn of SASL has no effect since HiveAuthFactory calls
    // 'new HiveConf()'. This is fixed by https://issues.apache.org/jira/browse/HIVE-6657,
    // in Hive 0.14.
    // As a workaround, the property is set in hive-site.xml in this module.
    //conf.set(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION.varname, "NOSASL");
    File localHiveDir = new File(localHiveLocation);
    localHiveDir.mkdirs();//from w  w w  . j ava2s . c o m
    File metastoreDbDir = new File(localHiveDir, "metastore_db");
    conf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname,
            "jdbc:derby:" + metastoreDbDir.getPath() + ";create=true");
    File derbyLogFile = new File(localHiveDir, "derby.log");
    derbyLogFile.createNewFile();
    setSystemProperty("derby.stream.error.file", derbyLogFile.getPath());
    conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, Files.createTempDir().getAbsolutePath());
    conf.set("datanucleus.schema.autoCreateTables", "true");
    conf.set("hive.metastore.schema.verification", "false");
    setSystemProperty("derby.stream.error.file", derbyLogFile.getPath());

    return new HiveConf(conf, this.getClass());
}

From source file:com.wandisco.s3hdfs.rewrite.filter.TestBase.java

License:Apache License

/**
 * @throws java.lang.Exception//from w  ww. ja va2s  .  c  o m
 */
@Before
public void setUp() throws Exception {
    Configuration conf = new HdfsConfiguration(new S3HdfsConfiguration());
    conf.setInt(S3_PROXY_PORT_KEY, PROXY_PORT);
    conf.setBoolean(DFS_WEBHDFS_ENABLED_KEY, true);
    conf.setInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100);
    conf.setLong(DFS_BLOCK_SIZE_KEY, 1024);
    conf.setLong(DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 512);

    // ^ has to be a multiple of 512
    FsPermission.setUMask(conf, FsPermission.createImmutable((short) 0));
    // ^ eliminate the UMask in HDFS to remove perm denied exceptions in s3Dir
    hostName = conf.get(S3_SERVICE_HOSTNAME_KEY);
    System.out.println("S3HDFS ServiceHostName: " + hostName);

    s3Directory = conf.get(S3_DIRECTORY_KEY);
    cluster = new MiniDFSCluster.Builder(conf).nameNodeHttpPort(HTTP_PORT).numDataNodes(3).build();
    cluster.waitActive();
    hdfs = cluster.getFileSystem();

    //initialize s3 directory
    Path s3Path = new Path(s3Directory);
    assertTrue(hdfs.mkdirs(s3Path));

    testUtil = new S3HdfsTestUtil(hdfs, s3Directory);
    s3Service = testUtil.configureS3Service(hostName, PROXY_PORT);
}

From source file:com.wibidata.maven.plugins.hbase.MiniHBaseCluster.java

License:Apache License

/**
 * Configures an HBase testing utility./* w ww  . j a  va 2  s. c o m*/
 *
 * @param testUtil The test utility to configure.
 * @return The configured utility.
 */
private static HBaseTestingUtility configure(HBaseTestingUtility testUtil) {
    // If HBase servers are running locally, the utility will use
    // the "normal" ports. We override *all* ports first, so that
    // we ensure that this can start without a problem.
    Configuration conf = testUtil.getConfiguration();

    int offset = new Random(System.currentTimeMillis()).nextInt(1500) + 500;

    // Move the master to a hopefully unused port.
    conf.setInt(HConstants.MASTER_PORT, findOpenPort(HConstants.DEFAULT_MASTER_PORT + offset));
    // Disable the master's web UI.
    conf.setInt("hbase.master.info.port", -1);

    // Move the regionserver to a hopefully unused port.
    conf.setInt(HConstants.REGIONSERVER_PORT, findOpenPort(HConstants.DEFAULT_REGIONSERVER_PORT + offset));
    // Disable the regionserver's web UI.
    conf.setInt("hbase.regionserver.info.port", -1);

    // Increase max zookeeper client connections.
    conf.setInt("hbase.zookeeper.property.maxClientCnxns", 80);

    // TODO(gwu): Increasing the port numbers by a constant is not sufficient for multiple
    // executions of this plugin on the same machine.  Allow this to be specified as a
    // maven plugin parameter.

    return testUtil;
}

From source file:com.willetinc.hadoop.mapreduce.dynamodb.AbstractSplitterTest.java

License:Apache License

@Test
public void testSplitWithHashKeyAndInterpolation() throws IOException {

    final int NUM_MAP_TASKS = 2;
    final String VALUE = "007";
    final Types hashKeyType = Types.NUMBER;
    final AttributeValue hashKeyValue = new AttributeValue().withN(VALUE);
    final Types rangeKeyType = Types.NUMBER;
    final AttributeValue minRangeKeyValue = new AttributeValue().withN(Long.toString(Long.MIN_VALUE));
    final AttributeValue maxRangeKeyValue = new AttributeValue().withN(Long.toString(Long.MIN_VALUE));

    DynamoDBSplitter splitter = new AbstractSplitter() {

        @Override/*from w w  w.  j  a v  a2s. co  m*/
        public void generateRangeKeySplits(Configuration conf, List<InputSplit> splits, Types inHashKeyType,
                AttributeValue inHashKeyValue, Types inRangeKeyType, AttributeValue inMinRangeKeyValue,
                AttributeValue inMaxRangeKeyValue, int numRangeSplits) {

            // check values
            assertEquals(hashKeyType, inHashKeyType);
            assertEquals(hashKeyValue, inHashKeyValue);
            assertEquals(rangeKeyType, inRangeKeyType);
            assertEquals(minRangeKeyValue, inMinRangeKeyValue);
            assertEquals(maxRangeKeyValue, inMaxRangeKeyValue);
            assertEquals(NUM_MAP_TASKS, numRangeSplits);

            List<AttributeValue> rangeKeyValues = new ArrayList<AttributeValue>();
            rangeKeyValues.add(inMinRangeKeyValue);
            rangeKeyValues.add(inMaxRangeKeyValue);

            DynamoDBQueryInputFormat.DynamoDBQueryInputSplit split = new DynamoDBQueryInputFormat.DynamoDBQueryInputSplit(
                    hashKeyType, hashKeyValue, rangeKeyType, rangeKeyValues, ComparisonOperator.BETWEEN);

            splits.add(split);
        }
    };

    // configure job
    Job job = new Job();
    Configuration conf = job.getConfiguration();
    conf.setInt("mapred.map.tasks", NUM_MAP_TASKS);
    DynamoDBQueryInputFormat.setHashKeyValue(conf, hashKeyType, hashKeyValue);
    DynamoDBQueryInputFormat.setRangeKeyType(conf, rangeKeyType);
    DynamoDBQueryInputFormat.setRangeKeyInterpolateRange(conf, rangeKeyType, minRangeKeyValue,
            maxRangeKeyValue);

    // generate input splits
    List<InputSplit> splits = splitter.split(conf);
    assertEquals(1, splits.size());

    DynamoDBQueryInputSplit split = (DynamoDBQueryInputSplit) splits.get(0);

    // check results
    assertTrue(split.hasHashKey());
    assertEquals(hashKeyType, split.getHashKeyType());
    assertEquals(hashKeyValue, split.getHashKeyValue());

    assertTrue(split.hasRangeKey());
    assertEquals(rangeKeyType, split.getRangeKeyType());
    assertEquals(ComparisonOperator.BETWEEN, split.getRangeKeyOperator());
    assertEquals(2, split.getRangeKeyValues().size());

    Iterator<AttributeValue> itr = split.getRangeKeyValues().iterator();
    assertEquals(minRangeKeyValue, itr.next());
    assertEquals(maxRangeKeyValue, itr.next());
}

From source file:com.willetinc.hadoop.mapreduce.dynamodb.DynamoDBQueryInputFormat.java

License:Apache License

public static void setHashKeyType(Configuration conf, Types type) {
    conf.setInt(DynamoDBConfiguration.HASH_KEY_TYPE_PROPERTY, type.ordinal());
}

From source file:com.willetinc.hadoop.mapreduce.dynamodb.DynamoDBQueryInputFormat.java

License:Apache License

public static void setRangeKeyType(Configuration conf, Types type) {
    conf.setInt(DynamoDBConfiguration.RANGE_KEY_TYPE_PROPERTY, type.ordinal());
}

From source file:com.willetinc.hadoop.mapreduce.dynamodb.DynamoDBQueryInputFormat.java

License:Apache License

public static void setRangeKeyComparisonOperator(Configuration conf, ComparisonOperator operator) {
    conf.setInt(DynamoDBConfiguration.RANGE_KEY_OPERATOR_PROPERTY, operator.ordinal());
}