Example usage for org.apache.hadoop.conf Configuration setInt

List of usage examples for org.apache.hadoop.conf Configuration setInt

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setInt.

Prototype

public void setInt(String name, int value) 

Source Link

Document

Set the value of the name property to an int.

Usage

From source file:io.covert.dns.collection.CollectionJob.java

License:Apache License

@Override
public int run(String[] args) throws Exception {

    if (args.length != 4) {
        usage("");
    }/*from  ww  w . j  a va2s .co m*/

    String dclass = args[0];
    String types = args[1];
    String inDir = args[2];
    String outDir = args[3];

    Configuration conf = getConf();

    if (conf.get("dns.collection.num.resolvers") == null)
        conf.setInt("dns.collection.num.resolvers", 50);
    if (conf.get("dns.collection.nameservers") == null)
        conf.set("dns.collection.nameservers", "127.0.0.1");

    Job job = new Job(conf);
    job.setJobName(CollectionJob.class.getSimpleName() + ": types=" + types + ", dclass=" + dclass + " inDir="
            + inDir + ", outDir=" + outDir + ", resolvers=" + conf.get("dns.collection.nameservers"));
    job.setJarByClass(getClass());

    job.setMapperClass(CollectionMapper.class);
    job.setNumReduceTasks(0);

    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(BytesWritable.class);

    job.setInputFormatClass(DnsRequestInputFormat.class);
    DnsRequestInputFormat.setInputPaths(job, new Path(inDir));
    DnsRequestInputFormat.configure(job, dclass.toUpperCase(), Arrays.asList(types.split(",")),
            Arrays.asList(""));

    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    SequenceFileOutputFormat.setOutputPath(job, new Path(outDir));
    SequenceFileOutputFormat.setCompressOutput(job, true);
    job.submit();

    int retVal = job.waitForCompletion(true) ? 0 : 1;

    CounterGroup counters = job.getCounters().getGroup(CollectionMapper.RESOLVER_GROUP);
    Counter constructMessageMS = counters.findCounter(CollectionMapper.CONSTRUCT_MESSAGE_MS);
    Counter parseResponseMS = counters.findCounter(CollectionMapper.PARSE_RESPONSE_MS);
    Counter performRequestMS = counters.findCounter(CollectionMapper.PERFORM_REQUEST_MS);
    Counter totalRequestHandlingMS = counters.findCounter(CollectionMapper.TOTAL_REQUEST_HANDLING_MS);

    Log.info("Total ConstructMessage percent: "
            + (double) (constructMessageMS.getValue() * 100L) / ((double) totalRequestHandlingMS.getValue()));
    Log.info("Total ParseResponse percent:    "
            + (double) (parseResponseMS.getValue() * 100L) / ((double) totalRequestHandlingMS.getValue()));
    Log.info("Total PerformRequest percent:   "
            + (double) (performRequestMS.getValue() * 100L) / ((double) totalRequestHandlingMS.getValue()));

    return retVal;
}

From source file:io.covert.dns.storage.accumulo.AccumuloStorageModuleFactory.java

License:Apache License

public static void configure(Job job, String inst, String zooKeepers, String user, String password,
        long maxMemory, long maxLatency, int maxWriteThreads,
        Collection<Class<? extends MutationGeneratorFactory>> generatorFactoryClasses) {
    Configuration conf = job.getConfiguration();
    StringBuilder factories = new StringBuilder();
    boolean first = true;

    for (Class<? extends MutationGeneratorFactory> clz : generatorFactoryClasses) {
        if (first) {
            first = false;/*from   w  ww .  j a va2s.  c  om*/
            factories.append(clz.getName());
        } else {
            factories.append(",").append(clz.getName());
        }
    }

    conf.set("storage.module.factory", AccumuloStorageModuleFactory.class.getName());
    conf.set("accumulo.storage.module.mutation.generator.factories", factories.toString());
    conf.set("accumulo.storage.module.instance.name", inst);
    conf.set("accumulo.storage.module.zookeepers", zooKeepers);
    conf.set("accumulo.storage.module.user", user);
    conf.set("accumulo.storage.module.password", password);
    conf.setLong("accumulo.storage.module.max.memory", maxMemory);
    conf.setLong("accumulo.storage.module.max.latency", maxLatency);
    conf.setInt("accumulo.storage.module.max.write.threads", maxWriteThreads);
}

From source file:io.dataapps.chlorine.hadoop.HDFSScanMR.java

License:Apache License

public static Job makeJob(Configuration conf, Path in, Path out, String matchPath, long scanSince,
        String chlorineConfigFilePath, String queue, String maskPath) throws IOException {
    conf.setBoolean("mapred.output.compress", false);
    conf.setLong("scanSince", scanSince);
    conf.set("matchPath", matchPath);
    conf.set("maskPath", maskPath);
    conf.set("inputPath", in.toString());
    if (queue != null) {
        conf.set("mapred.job.queue.name", queue);
    }//from ww w  . j  a  va 2 s  . c  o m
    conf.set("fs.permissions.umask-mode", "007");
    conf.setInt("input_path_depth", in.depth());
    Job job = Job.getInstance(conf, "Chlorine_HDFS_Scan");
    job.setJarByClass(HDFSScanMR.class);
    if (chlorineConfigFilePath != null) {
        try {
            job.addCacheFile(new URI(chlorineConfigFilePath));
            conf.set("finder_file", (new File(chlorineConfigFilePath)).getName());
        } catch (URISyntaxException e) {
            LOG.error(e);
        }
    }
    job.setMapperClass(DeepScanMapper.class);
    job.setNumReduceTasks(0);
    job.setInputFormatClass(TextInputFormat.class);
    TextInputFormat.addInputPath(job, in);
    TextInputFormat.setInputDirRecursive(job, true);
    TextInputFormat.setInputPathFilter(job, NewFilesFilter.class);
    FileOutputFormat.setOutputPath(job, out);
    LazyOutputFormat.setOutputFormatClass(job, TextOutputFormat.class);
    return job;
}

From source file:io.hops.security.TestUsersGroups.java

License:Apache License

@Test
public void testAddUsers() throws IOException {
    Configuration conf = new Configuration();
    conf.setInt(CommonConfigurationKeys.HOPS_GROUPS_UPDATER_ROUND, 10);
    HdfsStorageFactory.setConfiguration(conf);
    HdfsStorageFactory.formatStorage();/*w w w.  j  a v  a  2  s  . com*/

    UsersGroups.addUserToGroupsTx("user", new String[] { "group1", "group2" });

    int userId = UsersGroups.getUserID("user");
    assertNotSame(0, userId);
    assertEquals(UsersGroups.getUser(userId), "user");

    int groupId = UsersGroups.getGroupID("group1");
    assertNotSame(0, groupId);
    assertEquals(UsersGroups.getGroup(groupId), "group1");

    assertEquals(UsersGroups.getGroups("user"), Arrays.asList("group1", "group2"));

    removeUser(userId);

    userId = UsersGroups.getUserID("user");
    assertNotSame(0, userId);

    //Wait for the group updater to kick in
    try {
        Thread.sleep(10500);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    userId = UsersGroups.getUserID("user");
    assertEquals(0, userId);
    assertNull(UsersGroups.getGroups("user"));

    UsersGroups.addUserToGroupsTx("user", new String[] { "group1", "group2" });

    userId = UsersGroups.getUserID("user");
    assertNotSame(0, userId);

    assertEquals(Arrays.asList("group1", "group2"), UsersGroups.getGroups("user"));

    removeUser(userId);

    UsersGroups.addUserToGroupsTx("user", new String[] { "group3" });

    int newUserId = UsersGroups.getUserID("user");
    assertNotSame(0, userId);
    assertEquals(userId, newUserId);

    UsersGroups.addUserToGroupsTx("user", new String[] { "group1", "group2" });

    assertEquals(Arrays.asList("group3", "group1", "group2"), UsersGroups.getGroups("user"));
}

From source file:io.hops.transaction.TestTransaction.java

License:Apache License

@BeforeClass
public static void setupCluster() throws Exception {
    Configuration conf = new HdfsConfiguration();

    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 10);

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).storagesPerDatanode(1).build();

    cluster.waitActive();/*from   w w  w. j  av  a 2  s. c  om*/

}

From source file:io.prestosql.plugin.hive.HdfsConfigurationInitializer.java

License:Apache License

public void initializeConfiguration(Configuration config) {
    copy(resourcesConfiguration, config);

    // this is to prevent dfs client from doing reverse DNS lookups to determine whether nodes are rack local
    config.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, NoOpDNSToSwitchMapping.class,
            DNSToSwitchMapping.class);

    if (socksProxy != null) {
        config.setClass(HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, SocksSocketFactory.class,
                SocketFactory.class);
        config.set(HADOOP_SOCKS_SERVER_KEY, socksProxy.toString());
    }/*  w ww.j  a v  a  2 s  .  com*/

    if (domainSocketPath != null) {
        config.setStrings(DFS_DOMAIN_SOCKET_PATH_KEY, domainSocketPath);
    }

    // only enable short circuit reads if domain socket path is properly configured
    if (!config.get(DFS_DOMAIN_SOCKET_PATH_KEY, "").trim().isEmpty()) {
        config.setBooleanIfUnset(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
    }

    config.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, toIntExact(dfsTimeout.toMillis()));
    config.setInt(IPC_PING_INTERVAL_KEY, toIntExact(ipcPingInterval.toMillis()));
    config.setInt(IPC_CLIENT_CONNECT_TIMEOUT_KEY, toIntExact(dfsConnectTimeout.toMillis()));
    config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, dfsConnectMaxRetries);

    if (isHdfsWireEncryptionEnabled) {
        config.set(HADOOP_RPC_PROTECTION, "privacy");
        config.setBoolean("dfs.encrypt.data.transfer", true);
    }

    config.setInt("fs.cache.max-size", fileSystemMaxCacheSize);

    config.setInt(LineRecordReader.MAX_LINE_LENGTH, textMaxLineLength);

    configureCompression(config, compressionCodec);

    s3ConfigurationUpdater.updateConfiguration(config);
    gcsConfigurationInitialize.updateConfiguration(config);
}

From source file:io.prestosql.plugin.hive.s3.PrestoS3ConfigurationUpdater.java

License:Apache License

@Override
public void updateConfiguration(Configuration config) {
    // re-map filesystem schemes to match Amazon Elastic MapReduce
    config.set("fs.s3.impl", PrestoS3FileSystem.class.getName());
    config.set("fs.s3a.impl", PrestoS3FileSystem.class.getName());
    config.set("fs.s3n.impl", PrestoS3FileSystem.class.getName());

    if (awsAccessKey != null) {
        config.set(S3_ACCESS_KEY, awsAccessKey);
    }/*w  ww.j av a 2 s  . co m*/
    if (awsSecretKey != null) {
        config.set(S3_SECRET_KEY, awsSecretKey);
    }
    if (endpoint != null) {
        config.set(S3_ENDPOINT, endpoint);
    }
    if (signerType != null) {
        config.set(S3_SIGNER_TYPE, signerType.name());
    }
    config.setBoolean(S3_PATH_STYLE_ACCESS, pathStyleAccess);
    config.setBoolean(S3_USE_INSTANCE_CREDENTIALS, useInstanceCredentials);
    config.setBoolean(S3_SSL_ENABLED, sslEnabled);
    config.setBoolean(S3_SSE_ENABLED, sseEnabled);
    config.set(S3_SSE_TYPE, sseType.name());
    if (encryptionMaterialsProvider != null) {
        config.set(S3_ENCRYPTION_MATERIALS_PROVIDER, encryptionMaterialsProvider);
    }
    if (kmsKeyId != null) {
        config.set(S3_KMS_KEY_ID, kmsKeyId);
    }
    if (sseKmsKeyId != null) {
        config.set(S3_SSE_KMS_KEY_ID, sseKmsKeyId);
    }
    config.setInt(S3_MAX_CLIENT_RETRIES, maxClientRetries);
    config.setInt(S3_MAX_ERROR_RETRIES, maxErrorRetries);
    config.set(S3_MAX_BACKOFF_TIME, maxBackoffTime.toString());
    config.set(S3_MAX_RETRY_TIME, maxRetryTime.toString());
    config.set(S3_CONNECT_TIMEOUT, connectTimeout.toString());
    config.set(S3_SOCKET_TIMEOUT, socketTimeout.toString());
    config.set(S3_STAGING_DIRECTORY, stagingDirectory.toString());
    config.setInt(S3_MAX_CONNECTIONS, maxConnections);
    config.setLong(S3_MULTIPART_MIN_FILE_SIZE, multipartMinFileSize.toBytes());
    config.setLong(S3_MULTIPART_MIN_PART_SIZE, multipartMinPartSize.toBytes());
    config.setBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION, pinClientToCurrentRegion);
    config.set(S3_USER_AGENT_PREFIX, userAgentPrefix);
    config.set(S3_ACL_TYPE, aclType.name());
}

From source file:io.prestosql.plugin.hive.s3.TestPrestoS3FileSystem.java

License:Apache License

@SuppressWarnings({ "ResultOfMethodCallIgnored", "OverlyStrongTypeCast", "ConstantConditions" })
@Test/*www .  j  a v a2s. c o m*/
public void testReadRetryCounters() throws Exception {
    try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
        int maxRetries = 2;
        MockAmazonS3 s3 = new MockAmazonS3();
        s3.setGetObjectHttpErrorCode(HTTP_INTERNAL_ERROR);
        Configuration configuration = new Configuration();
        configuration.set(S3_MAX_BACKOFF_TIME, "1ms");
        configuration.set(S3_MAX_RETRY_TIME, "5s");
        configuration.setInt(S3_MAX_CLIENT_RETRIES, maxRetries);
        fs.initialize(new URI("s3n://test-bucket/"), configuration);
        fs.setS3Client(s3);
        try (FSDataInputStream inputStream = fs.open(new Path("s3n://test-bucket/test"))) {
            inputStream.read();
        } catch (Throwable expected) {
            assertInstanceOf(expected, AmazonS3Exception.class);
            assertEquals(((AmazonS3Exception) expected).getStatusCode(), HTTP_INTERNAL_ERROR);
            assertEquals(PrestoS3FileSystem.getFileSystemStats().getReadRetries().getTotalCount(), maxRetries);
            assertEquals(PrestoS3FileSystem.getFileSystemStats().getGetObjectRetries().getTotalCount(),
                    (maxRetries + 1L) * maxRetries);
        }
    }
}

From source file:io.prestosql.plugin.hive.s3.TestPrestoS3FileSystem.java

License:Apache License

@SuppressWarnings({ "OverlyStrongTypeCast", "ConstantConditions" })
@Test/*w w w.j a v  a2  s .  c  o  m*/
public void testGetMetadataRetryCounter() {
    int maxRetries = 2;
    try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
        MockAmazonS3 s3 = new MockAmazonS3();
        s3.setGetObjectMetadataHttpCode(HTTP_INTERNAL_ERROR);
        Configuration configuration = new Configuration();
        configuration.set(S3_MAX_BACKOFF_TIME, "1ms");
        configuration.set(S3_MAX_RETRY_TIME, "5s");
        configuration.setInt(S3_MAX_CLIENT_RETRIES, maxRetries);
        fs.initialize(new URI("s3n://test-bucket/"), configuration);
        fs.setS3Client(s3);
        fs.getS3ObjectMetadata(new Path("s3n://test-bucket/test"));
    } catch (Throwable expected) {
        assertInstanceOf(expected, AmazonS3Exception.class);
        assertEquals(((AmazonS3Exception) expected).getStatusCode(), HTTP_INTERNAL_ERROR);
        assertEquals(PrestoS3FileSystem.getFileSystemStats().getGetMetadataRetries().getTotalCount(),
                maxRetries);
    }
}

From source file:io.svectors.hbase.cdc.BaseTest.java

License:Apache License

@Before
public void setUp() throws Exception {
    final Configuration hbaseConf = HBaseConfiguration.create();
    hbaseConf.setInt("replication.stats.thread.period.seconds", 5);
    hbaseConf.setLong("replication.sleep.before.failover", 2000);
    hbaseConf.setInt("replication.source.maxretriesmultiplier", 10);
    hbaseConf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);

    // add kafka properties. we prefix each property with kafka
    addKafkaProperties(hbaseConf);/*from ww  w . ja v a 2 s  .c  o m*/

    utility = new HBaseTestingUtility(hbaseConf);
    utility.startMiniCluster();
    numRegionServers = utility.getHBaseCluster().getRegionServerThreads().size();

    // setup kafka
    kafkaServer = new KafkaServer(utility.getZkCluster().getClientPort(), 9092);

}