Example usage for org.apache.hadoop.conf Configuration setLong

List of usage examples for org.apache.hadoop.conf Configuration setLong

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setLong.

Prototype

public void setLong(String name, long value) 

Source Link

Document

Set the value of the name property to a long.

Usage

From source file:com.inmobi.conduit.distcp.tools.CopyListing.java

License:Apache License

/**
 * Build listing function creates the input listing that distcp uses to
 * perform the copy./*w  w  w.ja v a 2 s.c  o m*/
 *
 * The build listing is a sequence file that has relative path of a file in the key
 * and the file status information of the source file in the value
 *
 * For instance if the source path is /tmp/data and the traversed path is
 * /tmp/data/dir1/dir2/file1, then the sequence file would contain
 *
 * key: /dir1/dir2/file1 and value: FileStatus(/tmp/data/dir1/dir2/file1)
 *
 * File would also contain directory entries. Meaning, if /tmp/data/dir1/dir2/file1
 * is the only file under /tmp/data, the resulting sequence file would contain the
 * following entries
 *
 * key: /dir1 and value: FileStatus(/tmp/data/dir1)
 * key: /dir1/dir2 and value: FileStatus(/tmp/data/dir1/dir2)
 * key: /dir1/dir2/file1 and value: FileStatus(/tmp/data/dir1/dir2/file1)
 *
 * Cases requiring special handling:
 * If source path is a file (/tmp/file1), contents of the file will be as follows
 *
 * TARGET DOES NOT EXIST: Key-"", Value-FileStatus(/tmp/file1)
 * TARGET IS FILE       : Key-"", Value-FileStatus(/tmp/file1)
 * TARGET IS DIR        : Key-"/file1", Value-FileStatus(/tmp/file1)
 *
 * @param pathToListFile - Output file where the listing would be stored
 * @param options - Input options to distcp
 * @throws IOException - Exception if any
 */
public final void buildListing(Path pathToListFile, DistCpOptions options) throws IOException {
    validatePaths(options);
    doBuildListing(pathToListFile, options);
    Configuration config = getConf();

    config.set(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, pathToListFile.toString());
    config.setLong(DistCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED, getBytesToCopy());
    config.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, getNumberOfPaths());

    checkForDuplicates(pathToListFile);
}

From source file:com.kenshoo.integrations.plugins.connectors.GCSFileProtocol.java

License:Apache License

@Override
public void setupAuthentication(GenericConfiguration genericConfiguration, Configuration configuration,
        String arg1, String arg2) {
    configuration.setBoolean("fs.gcsfs.impl.disable.cache", true);
    configuration.setStrings("fs.gcsfs.impl", "com.kenshoo.integrations.plugins.connectors.GCSFileSystem");

    String accessToken = genericConfiguration.getStringProperty(PROPERTY_KEY_ACCESS_TOKEN, null);
    if (accessToken != null) {
        configuration.setStrings(PROPERTY_KEY_ACCESS_TOKEN, accessToken);
    }/*from w  w  w .jav a 2  s  . c o m*/
    String refreshToken = genericConfiguration.getStringProperty(PROPERTY_KEY_REFRESH_TOKEN, null);
    if (refreshToken != null) {
        configuration.setStrings(PROPERTY_KEY_REFRESH_TOKEN, refreshToken);
    }
    Long accessTokenCreationTime = genericConfiguration.getLongProperty(PROPERTY_KEY_ACCESS_TOKEN_CREATION_TIME,
            null);
    if (accessTokenCreationTime != null) {
        configuration.setLong(PROPERTY_KEY_ACCESS_TOKEN_CREATION_TIME, accessTokenCreationTime);
    }
    Long accessTokenExpirationAfter = genericConfiguration
            .getLongProperty(PROPERTY_KEY_ACCESS_TOKEN_EXPIRES_AFTER, null);
    if (accessTokenExpirationAfter != null) {
        configuration.setLong(PROPERTY_KEY_ACCESS_TOKEN_EXPIRES_AFTER, accessTokenExpirationAfter);
    }
    String relayURL = genericConfiguration.getStringProperty(PROPERTY_KEY_OAUTH_RELAY_URL, null);
    if (relayURL != null) {
        configuration.setStrings(PROPERTY_KEY_OAUTH_RELAY_URL, relayURL);
    }
    String clientId = genericConfiguration.getStringProperty(PROPERTY_KEY_OAUTH_CLIENT_ID, null);
    if (clientId != null) {
        configuration.setStrings(PROPERTY_KEY_OAUTH_CLIENT_ID, clientId);
    }
    String secret = genericConfiguration.getStringProperty(PROPERTY_KEY_OAUTH_SECRET, null);
    if (secret != null) {
        configuration.setStrings(PROPERTY_KEY_OAUTH_SECRET, secret);
    }
}

From source file:com.koda.integ.hbase.test.OffHeapBlockCacheMultiPerfTest.java

License:Open Source License

/**
 * Sets the up./*from  w  ww . j ava 2s  .  co m*/
 *
 * @throws Exception the exception
 */
protected static void setUp() throws Exception {

    Configuration config = new Configuration();

    // Set L2 config
    config.set(OffHeapBlockCache.BLOCK_CACHE_MEMORY_SIZE, Long.toString(sRAMCacheSize));

    config.setBoolean(OffHeapBlockCache.BLOCK_CACHE_OVERFLOW_TO_EXT_STORAGE_ENABLED, true);
    config.setLong(OffHeapBlockCache.BLOCK_CACHE_EXT_STORAGE_MEMORY_SIZE,
            (long) (sDiskMetaRatio * sRAMCacheSize));
    config.set(OffHeapBlockCache.BLOCK_CACHE_COMPRESSION, "LZ4");

    config.setBoolean(OffHeapBlockCache.BLOCK_CACHE_PERSISTENT, sIsPersistent);

    config.set(OffHeapBlockCache.BLOCK_CACHE_DATA_ROOTS, sSystemDataDir);

    // Set L3 config 
    config.set(FileExtStorage.FILE_STORAGE_BASE_DIR, baseDir);
    // 120G
    config.set(FileExtStorage.FILE_STORAGE_MAX_SIZE, Long.toString(sDiskCacheSize));
    config.set(OffHeapBlockCache.BLOCK_CACHE_EXT_STORAGE_IMPL,
            "com.koda.integ.hbase.storage.FileExtMultiStorage");
    // 2G file size limit
    config.setLong(FileExtStorage.FILE_STORAGE_FILE_SIZE_LIMIT, sMaxFileSize);
    // 8MB buffer size 
    config.setInt(FileExtStorage.FILE_STORAGE_BUFFER_SIZE, 8 * 1024 * 1024);

    config.setInt(FileExtStorage.FILE_STORAGE_NUM_BUFFERS, 2);

    if (sIsPersistent == false) {
        //checkDir();
        deleteData();
    } else {
        // Set deserializer
        CacheableSerializer.setSerializer(ByteArrayCacheable.deserializer);
    }

    // Create block cache      
    sCache = new OffHeapBlockCache(config);

    sStorage = (FileExtMultiStorage) sCache.getExternalStorage();

}

From source file:com.koda.integ.hbase.test.OffHeapBlockCachePerfTest.java

License:Open Source License

/**
 * Sets the up.//  w w w .  ja v a 2 s. com
 *
 * @throws Exception the exception
 */
protected static void setUp() throws Exception {

    Configuration config = new Configuration();

    // Set L2 config
    config.set(OffHeapBlockCache.BLOCK_CACHE_MEMORY_SIZE, Long.toString(sRAMCacheSize));

    config.setBoolean(OffHeapBlockCache.BLOCK_CACHE_OVERFLOW_TO_EXT_STORAGE_ENABLED, true);
    config.setLong(OffHeapBlockCache.BLOCK_CACHE_EXT_STORAGE_MEMORY_SIZE,
            (long) (sDiskMetaRatio * sRAMCacheSize));
    config.set(OffHeapBlockCache.BLOCK_CACHE_COMPRESSION, "LZ4");

    config.setBoolean(OffHeapBlockCache.BLOCK_CACHE_PERSISTENT, sIsPersistent);

    config.set(OffHeapBlockCache.BLOCK_CACHE_DATA_ROOTS, sSystemDataDir);

    // Set L3 config 
    config.set(FileExtStorage.FILE_STORAGE_BASE_DIR, baseDir);
    // 120G
    config.set(FileExtStorage.FILE_STORAGE_MAX_SIZE, Long.toString(sDiskCacheSize));
    config.set(OffHeapBlockCache.BLOCK_CACHE_EXT_STORAGE_IMPL, "com.koda.integ.hbase.storage.FileExtStorage");
    // 2G file size limit
    config.setLong(FileExtStorage.FILE_STORAGE_FILE_SIZE_LIMIT, sMaxFileSize);
    // 8MB buffer size 
    config.setInt(FileExtStorage.FILE_STORAGE_BUFFER_SIZE, 8 * 1024 * 1024);

    config.setInt(FileExtStorage.FILE_STORAGE_NUM_BUFFERS, 2);

    if (sIsPersistent == false) {
        checkDir();
    } else {
        // Set deserializer
        CacheableSerializer.setSerializer(ByteArrayCacheable.deserializer);
    }

    // Create block cache      
    sCache = new OffHeapBlockCache(config);

    sStorage = (FileExtStorage) sCache.getExternalStorage();

}

From source file:com.lightboxtechnologies.nsrl.HashLoader.java

License:Apache License

public static void main(String[] args) throws Exception {
    final Configuration conf = new Configuration();

    final String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();

    if (otherArgs.length != 6) {
        System.err/*from  w w  w .ja v  a 2  s . c o  m*/
                .println("Usage: HashLoader <mfgfile> <osfile> <prodfile> <hashfile> <outpath> <num_reducers>");
        System.exit(2);
    }

    final String mfg_filename = otherArgs[0];
    final String os_filename = otherArgs[1];
    final String prod_filename = otherArgs[2];
    final String hash_filename = otherArgs[3];
    final String output_filename = otherArgs[4];

    conf.set("mfg_filename", mfg_filename);
    conf.set("os_filename", os_filename);
    conf.set("prod_filename", prod_filename);

    conf.setLong("timestamp", System.currentTimeMillis());
    SKJobFactory.addDependencies(conf);

    final Job job = new Job(conf, "HashLoader");
    job.setJarByClass(HashLoader.class);
    job.setMapperClass(HashLoaderMapper.class);
    job.setReducerClass(KeyValueSortReducer.class);
    job.setNumReduceTasks(Integer.parseInt(otherArgs[5]));

    job.setOutputKeyClass(ImmutableBytesWritable.class);
    job.setOutputValueClass(KeyValue.class);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(HFileOutputFormat.class);

    TextInputFormat.addInputPath(job, new Path(hash_filename));
    HFileOutputFormat.setOutputPath(job, new Path(output_filename));

    System.exit(job.waitForCompletion(true) ? 0 : 1);
}

From source file:com.lightboxtechnologies.spectrum.BlockHasher.java

License:Apache License

public int run(String[] args) throws Exception {
    if (args.length != 3) {
        System.err.println("Usage: BlockHasher <imageID> <image> <output>");
        return 2;
    }/*ww  w .  j a  v  a2 s.  c o  m*/

    final String imageID = args[0];
    final String image = args[1];
    final String output = args[2];

    Configuration conf = getConf();

    final Job job = SKJobFactory.createJobFromConf(imageID, image, "BlockHasher", conf);
    job.setJarByClass(BlockHasher.class);
    job.setMapperClass(BlockHashMapper.class);
    // job.setReducerClass(Reducer.class);
    job.setNumReduceTasks(0);

    // job ctor copies the Configuration we pass it, get the real one
    conf = job.getConfiguration();

    conf.setLong("timestamp", System.currentTimeMillis());

    job.setInputFormatClass(RawFileInputFormat.class);
    RawFileInputFormat.addInputPath(job, new Path(image));

    job.setOutputFormatClass(TextOutputFormat.class);
    job.setOutputKeyClass(LongWritable.class);
    job.setOutputValueClass(MD5Hash.class);
    FileOutputFormat.setOutputPath(job, new Path(output));

    conf.setInt("mapred.job.reuse.jvm.num.tasks", -1);

    return job.waitForCompletion(true) ? 0 : 1;
}

From source file:com.lightboxtechnologies.spectrum.ExtractData.java

License:Apache License

public int run(String[] args) throws Exception {
    if (args.length != 4) {
        System.err.println("Usage: ExtractData <imageID> <friendly_name> <extents_file> <evidence file>");
        return 2;
    }//from   w  w w. jav  a 2 s .  c  o m

    final String imageID = args[0];
    final String friendlyName = args[1];
    final String extentsPath = args[2];
    final String image = args[3];

    Configuration conf = getConf();

    final Job job = SKJobFactory.createJobFromConf(imageID, friendlyName, "ExtractData", conf);
    job.setJarByClass(ExtractData.class);
    job.setMapperClass(ExtractDataMapper.class);
    job.setReducerClass(KeyValueSortReducer.class);
    job.setNumReduceTasks(1);

    // job ctor copies the Configuration we pass it, get the real one
    conf = job.getConfiguration();

    conf.setLong("timestamp", System.currentTimeMillis());

    job.setInputFormatClass(RawFileInputFormat.class);
    RawFileInputFormat.addInputPath(job, new Path(image));

    job.setOutputFormatClass(HFileOutputFormat.class);
    job.setOutputKeyClass(ImmutableBytesWritable.class);
    job.setOutputValueClass(KeyValue.class);

    conf.setInt("mapreduce.job.jvm.numtasks", -1);

    final FileSystem fs = FileSystem.get(conf);
    Path hfileDir = new Path("/texaspete/ev/tmp", UUID.randomUUID().toString());
    hfileDir = hfileDir.makeQualified(fs);
    LOG.info("Hashes will be written temporarily to " + hfileDir);

    HFileOutputFormat.setOutputPath(job, hfileDir);

    final Path extp = new Path(extentsPath);
    final URI extents = extp.toUri();
    LOG.info("extents file is " + extents);

    DistributedCache.addCacheFile(extents, conf);
    conf.set("com.lbt.extentsname", extp.getName());
    // job.getConfiguration().setBoolean("mapred.task.profile", true);
    // job.getConfiguration().setBoolean("mapreduce.task.profile", true);

    HBaseTables.summon(conf, HBaseTables.HASH_TBL_B, HBaseTables.HASH_COLFAM_B);

    HBaseTables.summon(conf, HBaseTables.ENTRIES_TBL_B, HBaseTables.ENTRIES_COLFAM_B);

    final boolean result = job.waitForCompletion(true);
    if (result) {
        LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
        HBaseConfiguration.addHbaseResources(conf);
        loader.setConf(conf);
        LOG.info("Loading hashes into hbase");
        chmodR(fs, hfileDir);
        loader.doBulkLoad(hfileDir, new HTable(conf, HBaseTables.HASH_TBL_B));
        //      result = fs.delete(hfileDir, true);
    }
    return result ? 0 : 1;
}

From source file:com.lightboxtechnologies.spectrum.MRCoffeeJob.java

License:Apache License

public static int run(String imageID, String outpath, String[] command, Configuration conf)
        throws ClassNotFoundException, DecoderException, IOException, InterruptedException {
    conf.setStrings("command", command);
    conf.setLong("timestamp", System.currentTimeMillis());

    final Job job = new Job(conf, "MRCoffeeJob");
    job.setJarByClass(MRCoffeeJob.class);

    job.setMapperClass(MRCoffeeMapper.class);

    //    job.setReducerClass(KeyValueSortReducer.class);
    //    job.setNumReduceTasks(1);
    job.setNumReduceTasks(0);/* w  w  w .j  ava 2  s .  c  o m*/

    FsEntryHBaseInputFormat.setupJob(job, imageID);
    job.setInputFormatClass(FsEntryHBaseInputFormat.class);

    job.setOutputKeyClass(ImmutableHexWritable.class);
    //    job.setOutputValueClass(KeyValue.class);
    job.setOutputValueClass(JsonWritable.class);
    //    job.setOutputFormatClass(HFileOutputFormat.class);
    job.setOutputFormatClass(TextOutputFormat.class);

    //    HFileOutputFormat.setOutputPath(job, new Path(outpath));
    TextOutputFormat.setOutputPath(job, new Path(outpath));

    return job.waitForCompletion(true) ? 0 : 1;
}

From source file:com.linkedin.cubert.io.rubix.RubixRecordReader.java

License:Open Source License

public void initialize(InputSplit split, Configuration conf) throws IOException, InterruptedException {
    @SuppressWarnings("unchecked")
    RubixInputSplit<K, V> rsplit = (RubixInputSplit<K, V>) split;

    SerializationFactory serializationFactory = new SerializationFactory(conf);
    switch (rsplit.getBlockSerializationType()) {
    case DEFAULT:
        valueDeserializer = serializationFactory.getDeserializer(rsplit.getValueClass());
        break;//from www  .  j a v  a 2 s.c o  m
    case COMPACT:
        BlockSchema schema = rsplit.getSchema();
        valueDeserializer = new CompactDeserializer<V>(schema);
        break;
    }

    key = rsplit.getKey();

    // store the blockid and partition key in the conf
    conf.setLong("MY_BLOCK_ID", rsplit.getBlockId());
    conf.setLong("MY_NUM_RECORDS", rsplit.getNumRecords());
    ByteArrayOutputStream tmpOut = new ByteArrayOutputStream();
    ((Tuple) key).write(new DataOutputStream(tmpOut));
    String keySerialized = SerializerUtils.serializeToString(tmpOut.toByteArray());
    conf.set("MY_PARTITION_KEY", keySerialized);

    Path path = rsplit.getFilename();
    offset = rsplit.getOffset();
    length = rsplit.getLength();

    FileSystem fs = path.getFileSystem(conf);
    FSDataInputStream fsin = fs.open(path);
    fsin.seek(offset);

    in = new BlockInputStream(fsin, length);
    CompressionCodec codec = new CompressionCodecFactory(conf).getCodec(path);
    if (codec != null) {
        print.f("codec is not null and it is %s", codec.getClass().toString());
        in = codec.createInputStream(in);
    } else {
        print.f("codec is null");
    }

    valueDeserializer.open(in);
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

/**
 * Modify the config and start up additional DataNodes. The info port for
 * DataNodes is guaranteed to use a free port.
 * /*from   w  w w.  j  ava  2 s .c  o  m*/
 * Data nodes can run with the name node in the mini cluster or
 * a real name node. For example, running with a real name node is useful
 * when running simulated data nodes with a real name node.
 * If minicluster's name node is null assume that the conf has been
 * set with the right address:port of the name node.
 *
 * @param conf
 *            the base configuration to use in starting the DataNodes. This
 *            will be modified as necessary.
 * @param numDataNodes
 *            Number of DataNodes to start; may be zero
 * @param manageDfsDirs
 *            if true, the data directories for DataNodes will be
 *            created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be
 *            set in the conf
 * @param operation
 *            the operation with which to start the DataNodes. If null
 *            or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
 * @param racks
 *            array of strings indicating the rack that each DataNode is on
 * @param hosts
 *            array of strings indicating the hostnames for each DataNode
 * @param simulatedCapacities
 *            array of capacities of the simulated data nodes
 * @param setupHostsFile
 *            add new nodes to dfs hosts files
 * @param checkDataNodeAddrConfig
 *            if true, only set DataNode port addresses if not already set in config
 * @param checkDataNodeHostConfig
 *            if true, only set DataNode hostname key if not already set in config
 * @param dnConfOverlays
 *            An array of {@link Configuration} objects that will overlay the
 *            global MiniDFSCluster Configuration for the corresponding DataNode.
 * @throws IllegalStateException
 *             if NameNode has been shutdown
 */
public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType storageType,
        boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts,
        long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig,
        boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays) throws IOException {
    if (operation == StartupOption.RECOVER) {
        return;
    }
    if (checkDataNodeHostConfig) {
        conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    } else {
        conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    }

    int curDatanodesNum = dataNodes.size();
    // for mincluster's the default initialDelay for BRs is 0
    if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
        conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
    }
    // If minicluster's name node is null assume that the conf has been
    // set with the right address:port of the name node.
    //
    if (racks != null && numDataNodes > racks.length) {
        throw new IllegalArgumentException("The length of racks [" + racks.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    if (hosts != null && numDataNodes > hosts.length) {
        throw new IllegalArgumentException("The length of hosts [" + hosts.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    // Generate some hostnames if required
    if (racks != null && hosts == null) {
        hosts = new String[numDataNodes];
        for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
            hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
        }
    }

    if (simulatedCapacities != null && numDataNodes > simulatedCapacities.length) {
        throw new IllegalArgumentException("The length of simulatedCapacities [" + simulatedCapacities.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    if (dnConfOverlays != null && numDataNodes > dnConfOverlays.length) {
        throw new IllegalArgumentException("The length of dnConfOverlays [" + dnConfOverlays.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    String[] dnArgs = (operation == null || operation != StartupOption.ROLLBACK) ? null
            : new String[] { operation.getName() };

    for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
        Configuration dnConf = new HdfsConfiguration(conf);
        if (dnConfOverlays != null) {
            dnConf.addResource(dnConfOverlays[i]);
        }
        // Set up datanode address
        setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
        if (manageDfsDirs) {
            String dirs = makeDataNodeDirs(i, storageType);
            dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
            conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
        }
        if (simulatedCapacities != null) {
            SimulatedFSDataset.setFactory(dnConf);
            dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
                    simulatedCapacities[i - curDatanodesNum]);
        }
        LOG.info("Starting DataNode " + i + " with " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
                + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
        if (hosts != null) {
            dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
            LOG.info("Starting DataNode " + i + " with hostname set to: "
                    + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
        }
        if (racks != null) {
            String name = hosts[i - curDatanodesNum];
            LOG.info("Adding node with hostname : " + name + " to rack " + racks[i - curDatanodesNum]);
            StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum]);
        }
        Configuration newconf = new HdfsConfiguration(dnConf); // save config
        if (hosts != null) {
            NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
        }

        SecureResources secureResources = null;
        if (UserGroupInformation.isSecurityEnabled() && conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY) == null) {
            try {
                secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
            } catch (Exception ex) {
                ex.printStackTrace();
            }
        }
        final int maxRetriesOnSasl = conf.getInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY,
                IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT);
        int numRetries = 0;
        DataNode dn = null;
        while (true) {
            try {
                dn = DataNode.instantiateDataNode(dnArgs, dnConf, secureResources);
                break;
            } catch (IOException e) {
                // Work around issue testing security where rapidly starting multiple
                // DataNodes using the same principal gets rejected by the KDC as a
                // replay attack.
                if (UserGroupInformation.isSecurityEnabled() && numRetries < maxRetriesOnSasl) {
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException ie) {
                        Thread.currentThread().interrupt();
                        break;
                    }
                    ++numRetries;
                    continue;
                }
                throw e;
            }
        }
        if (dn == null)
            throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
        // since the HDFS does things based on host|ip:port, we need to add the
        // mapping for the service to rackId
        String service = SecurityUtil.buildTokenService(dn.getXferAddress()).toString();
        if (racks != null) {
            LOG.info("Adding node with service : " + service + " to rack " + racks[i - curDatanodesNum]);
            StaticMapping.addNodeToRack(service, racks[i - curDatanodesNum]);
        }
        dn.runDatanodeDaemon();
        dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort()));
    }
    curDatanodesNum += numDataNodes;
    this.numDataNodes += numDataNodes;
    waitActive();
}