Example usage for org.apache.hadoop.conf Configuration getInt

List of usage examples for org.apache.hadoop.conf Configuration getInt

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getInt.

Prototype

public int getInt(String name, int defaultValue) 

Source Link

Document

Get the value of the name property as an int.

Usage

From source file:com.marklogic.mapreduce.ContentOutputFormat.java

License:Apache License

protected LinkedMapWritable getForestStatusMap(Configuration conf) throws IOException {
    String forestHost = conf.get(OUTPUT_FOREST_HOST);
    if (forestHost != null) {
        //Restores the object from the configuration.
        LinkedMapWritable fhmap = DefaultStringifier.load(conf, OUTPUT_FOREST_HOST, LinkedMapWritable.class);
        // must be in fast load mode, otherwise won't reach here
        String s = conf.get(ASSIGNMENT_POLICY);
        //EXECUTION_MODE must have a value in mlcp;
        //default is "distributed" in hadoop connector
        String mode = conf.get(EXECUTION_MODE, MODE_DISTRIBUTED);
        if (MODE_DISTRIBUTED.equals(mode)) {
            AssignmentPolicy.Kind policy = AssignmentPolicy.Kind.forName(s);
            am.initialize(policy, fhmap, conf.getInt(BATCH_SIZE, 10));
        }/* w ww  . j  ava2s  .  c  o m*/
        return fhmap;
    } else {
        try {
            // try getting a connection
            ContentSource cs = InternalUtilities.getOutputContentSource(conf, conf.get(OUTPUT_HOST));
            //get policy
            initialize(cs.newSession());
            // query forest status mapping
            return queryForestInfo(cs);
        } catch (Exception ex) {
            throw new IOException(ex);
        }
    }
}

From source file:com.marklogic.mapreduce.ContentWriter.java

License:Apache License

public ContentWriter(Configuration conf, Map<String, ContentSource> forestSourceMap, boolean fastLoad,
        AssignmentManager am) {// w ww .  ja  v a2s.  c o m
    super(conf, null);

    this.fastLoad = fastLoad;

    this.forestSourceMap = forestSourceMap;

    this.am = am;

    permsMap = new HashMap<String, ContentPermission[]>();

    int srcMapSize = forestSourceMap.size();
    forestIds = new String[srcMapSize];
    // key order in key set is guaranteed by LinkedHashMap,
    // i.e., the order keys are inserted
    forestIds = forestSourceMap.keySet().toArray(forestIds);
    hostId = (int) (Math.random() * srcMapSize);

    // arraySize is the number of forests in fast load mode; 1 otherwise.
    int arraySize = fastLoad ? srcMapSize : 1;
    sessions = new Session[arraySize];
    stmtCounts = new int[arraySize];

    outputDir = conf.get(OUTPUT_DIRECTORY);
    batchSize = conf.getInt(BATCH_SIZE, DEFAULT_BATCH_SIZE);

    pendingUris = new HashMap[arraySize];
    for (int i = 0; i < arraySize; i++) {
        pendingUris[i] = new HashMap<Content, DocumentURI>();
    }

    if (fastLoad && (am.getPolicy().getPolicyKind() == AssignmentPolicy.Kind.STATISTICAL
            || am.getPolicy().getPolicyKind() == AssignmentPolicy.Kind.RANGE)) {
        countBased = true;
        if (batchSize > 1) {
            forestContents = new Content[1][batchSize];
            counts = new int[1];
        }
        sfId = -1;
    } else {
        if (batchSize > 1) {
            forestContents = new Content[arraySize][batchSize];
            counts = new int[arraySize];
        }
        sfId = 0;
    }

    String[] perms = conf.getStrings(OUTPUT_PERMISSION);
    List<ContentPermission> permissions = null;
    if (perms != null && perms.length > 0) {
        int i = 0;
        while (i + 1 < perms.length) {
            String roleName = perms[i++];
            if (roleName == null || roleName.isEmpty()) {
                LOG.error("Illegal role name: " + roleName);
                continue;
            }
            String perm = perms[i].trim();
            ContentCapability capability = null;
            if (perm.equalsIgnoreCase(ContentCapability.READ.toString())) {
                capability = ContentCapability.READ;
            } else if (perm.equalsIgnoreCase(ContentCapability.EXECUTE.toString())) {
                capability = ContentCapability.EXECUTE;
            } else if (perm.equalsIgnoreCase(ContentCapability.INSERT.toString())) {
                capability = ContentCapability.INSERT;
            } else if (perm.equalsIgnoreCase(ContentCapability.UPDATE.toString())) {
                capability = ContentCapability.UPDATE;
            } else {
                LOG.error("Illegal permission: " + perm);
            }
            if (capability != null) {
                if (permissions == null) {
                    permissions = new ArrayList<ContentPermission>();
                }
                permissions.add(new ContentPermission(capability, roleName));
            }
            i++;
        }
    }

    options = new ContentCreateOptions();
    String[] collections = conf.getStrings(OUTPUT_COLLECTION);
    if (collections != null) {
        for (int i = 0; i < collections.length; i++) {
            collections[i] = collections[i].trim();
        }
        options.setCollections(collections);
    }

    options.setQuality(conf.getInt(OUTPUT_QUALITY, 0));
    if (permissions != null) {
        options.setPermissions(permissions.toArray(new ContentPermission[permissions.size()]));
    }
    String contentTypeStr = conf.get(CONTENT_TYPE, DEFAULT_CONTENT_TYPE);
    ContentType contentType = ContentType.valueOf(contentTypeStr);
    if (contentType == ContentType.UNKNOWN) {
        formatNeeded = true;
    } else {
        options.setFormat(contentType.getDocumentFormat());
    }

    options.setLanguage(conf.get(OUTPUT_CONTENT_LANGUAGE));
    String repairLevel = conf.get(OUTPUT_XML_REPAIR_LEVEL, DEFAULT_OUTPUT_XML_REPAIR_LEVEL).toLowerCase();
    options.setNamespace(conf.get(OUTPUT_CONTENT_NAMESPACE));
    if (DocumentRepairLevel.DEFAULT.toString().equals(repairLevel)) {
        options.setRepairLevel(DocumentRepairLevel.DEFAULT);
    } else if (DocumentRepairLevel.NONE.toString().equals(repairLevel)) {
        options.setRepairLevel(DocumentRepairLevel.NONE);
    } else if (DocumentRepairLevel.FULL.toString().equals(repairLevel)) {
        options.setRepairLevel(DocumentRepairLevel.FULL);
    }

    streaming = conf.getBoolean(OUTPUT_STREAMING, false);
    tolerateErrors = conf.getBoolean(OUTPUT_TOLERATE_ERRORS, false);

    String encoding = conf.get(MarkLogicConstants.OUTPUT_CONTENT_ENCODING);
    if (encoding != null) {
        options.setEncoding(encoding);
    }

    options.setTemporalCollection(conf.get(TEMPORAL_COLLECTION));

    needCommit = txnSize > 1 || (batchSize > 1 && tolerateErrors);
    if (needCommit) {
        commitUris = new ArrayList[arraySize];
        for (int i = 0; i < arraySize; i++) {
            commitUris[i] = new ArrayList<DocumentURI>(txnSize * batchSize);
        }
    }
}

From source file:com.marklogic.mapreduce.ContentWriter.java

License:Apache License

@Override
public int getTransactionSize(Configuration conf) {
    // return the specified txn size
    if (conf.get(TXN_SIZE) != null) {
        int txnSize = conf.getInt(TXN_SIZE, 0);
        return txnSize <= 0 ? 1 : txnSize;
    }//from  w ww . j a va 2s.co  m
    return 1000 / conf.getInt(BATCH_SIZE, DEFAULT_BATCH_SIZE);
}

From source file:com.marklogic.mapreduce.MarkLogicRecordWriter.java

License:Apache License

public int getTransactionSize(Configuration conf) {
    return conf.getInt(TXN_SIZE, 1000);
}

From source file:com.mellanox.r4h.DFSClient.java

License:Apache License

/**
 * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
 * If HA is enabled and a positive value is set for {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
 * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} as its RetryInvocationHandler. Otherwise one of nameNodeUri or
 * rpcNamenode/*from www.ja va2  s .c om*/
 * must be null.
 */
@VisibleForTesting
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats)
        throws IOException {
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
    traceSampler = new SamplerBuilder(TraceUtils.wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf))
            .build();
    // Copy only the required DFSClient configuration
    this.dfsClientConf = new DFSClientConfBridge2_7(conf);
    if (this.dfsClientConf.isUseLegacyBlockReaderLocal()) {
        LOG.debug("Using legacy short-circuit local reads.");
    }
    this.conf = conf;
    this.stats = stats;
    this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
    this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);

    this.ugi = UserGroupInformation.getCurrentUser();

    this.authority = nameNodeUri == null ? "null" : nameNodeUri.getAuthority();
    this.clientName = "DFSClient_" + dfsClientConf.getTaskId() + "_" + DFSUtil.getRandom().nextInt() + "_"
            + Thread.currentThread().getId();
    provider = DFSUtil.createKeyProvider(conf);
    if (LOG.isDebugEnabled()) {
        if (provider == null) {
            LOG.debug("No KeyProvider found.");
        } else {
            LOG.debug("Found KeyProvider: " + provider.toString());
        }
    }
    int numResponseToDrop = conf.getInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
            DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
    NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null;
    AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
    if (numResponseToDrop > 0) {
        // This case is used for testing.
        LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to "
                + numResponseToDrop + ", this hacked client will proactively drop responses");
        proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class,
                numResponseToDrop, nnFallbackToSimpleAuth);
    }

    if (proxyInfo != null) {
        this.dtService = proxyInfo.getDelegationTokenService();
        this.namenode = proxyInfo.getProxy();
    } else if (rpcNamenode != null) {
        // This case is used for testing.
        Preconditions.checkArgument(nameNodeUri == null);
        this.namenode = rpcNamenode;
        dtService = null;
    } else {
        Preconditions.checkArgument(nameNodeUri != null, "null URI");
        proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class,
                nnFallbackToSimpleAuth);
        this.dtService = proxyInfo.getDelegationTokenService();
        this.namenode = proxyInfo.getProxy();
    }

    String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
    localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
    if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
        LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces) + "] with addresses ["
                + Joiner.on(',').join(localInterfaceAddrs) + "]");
    }

    Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null
            : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false);
    Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null
            : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0);
    Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null
            : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false);
    this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead);
    this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead);
    this.clientContext = ClientContext.get(conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT),
            dfsClientConf);
    this.hedgedReadThresholdMillis = conf.getLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
            DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS);
    int numThreads = conf.getInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,
            DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE);
    if (numThreads > 0) {
        this.initThreadsNumForHedgedReads(numThreads);
    }
    this.saslClient = new SaslDataTransferClient(conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
            TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

private void initMiniDFSCluster(Configuration conf, int numDataNodes, StorageType storageType, boolean format,
        boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
        boolean manageDataDfsDirs, StartupOption startOpt, StartupOption dnStartOpt, String[] racks,
        String[] hosts, long[] simulatedCapacities, String clusterId, boolean waitSafeMode,
        boolean setupHostsFile, MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
        boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays)
        throws IOException {
    ExitUtil.disableSystemExit();/*from  www .  j  a  va2s  .com*/

    // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
    FileSystem.enableSymlinks();

    synchronized (MiniDFSCluster.class) {
        instanceId = instanceCount++;
    }

    this.conf = conf;
    base_dir = new File(determineDfsBaseDir());
    data_dir = new File(base_dir, "data");
    this.waitSafeMode = waitSafeMode;
    this.checkExitOnShutdown = checkExitOnShutdown;

    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    int safemodeExtension = conf.getInt(DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class);

    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
    // it needs to know the HTTP port of the Active. So, if ephemeral ports
    // are chosen, disable checkpoints for the test.
    if (!nnTopology.allHttpPortsSpecified() && nnTopology.isHA()) {
        LOG.info("MiniDFSCluster disabling checkpointing in the Standby node "
                + "since no HTTP ports have been specified.");
        conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
    }
    if (!nnTopology.allIpcPortsSpecified() && nnTopology.isHA()) {
        LOG.info("MiniDFSCluster disabling log-roll triggering in the "
                + "Standby node since no IPC ports have been specified.");
        conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
    }

    federation = nnTopology.isFederated();
    try {
        createNameNodesAndSetConf(nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
                enableManagedDfsDirsRedundancy, format, startOpt, clusterId, conf);
    } catch (IOException ioe) {
        LOG.error("IOE creating namenodes. Permissions dump:\n" + createPermissionsDiagnosisString(data_dir));
        throw ioe;
    }
    if (format) {
        if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
            throw new IOException(
                    "Cannot remove data directory: " + data_dir + createPermissionsDiagnosisString(data_dir));
        }
    }

    if (startOpt == StartupOption.RECOVER) {
        return;
    }

    // Start the DataNodes
    startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
            dnStartOpt != null ? dnStartOpt : startOpt, racks, hosts, simulatedCapacities, setupHostsFile,
            checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
    waitClusterUp();
    // make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

/**
 * Modify the config and start up additional DataNodes. The info port for
 * DataNodes is guaranteed to use a free port.
 * //from www  .j ava2s  .co m
 * Data nodes can run with the name node in the mini cluster or
 * a real name node. For example, running with a real name node is useful
 * when running simulated data nodes with a real name node.
 * If minicluster's name node is null assume that the conf has been
 * set with the right address:port of the name node.
 *
 * @param conf
 *            the base configuration to use in starting the DataNodes. This
 *            will be modified as necessary.
 * @param numDataNodes
 *            Number of DataNodes to start; may be zero
 * @param manageDfsDirs
 *            if true, the data directories for DataNodes will be
 *            created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be
 *            set in the conf
 * @param operation
 *            the operation with which to start the DataNodes. If null
 *            or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
 * @param racks
 *            array of strings indicating the rack that each DataNode is on
 * @param hosts
 *            array of strings indicating the hostnames for each DataNode
 * @param simulatedCapacities
 *            array of capacities of the simulated data nodes
 * @param setupHostsFile
 *            add new nodes to dfs hosts files
 * @param checkDataNodeAddrConfig
 *            if true, only set DataNode port addresses if not already set in config
 * @param checkDataNodeHostConfig
 *            if true, only set DataNode hostname key if not already set in config
 * @param dnConfOverlays
 *            An array of {@link Configuration} objects that will overlay the
 *            global MiniDFSCluster Configuration for the corresponding DataNode.
 * @throws IllegalStateException
 *             if NameNode has been shutdown
 */
public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType storageType,
        boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts,
        long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig,
        boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays) throws IOException {
    if (operation == StartupOption.RECOVER) {
        return;
    }
    if (checkDataNodeHostConfig) {
        conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    } else {
        conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    }

    int curDatanodesNum = dataNodes.size();
    // for mincluster's the default initialDelay for BRs is 0
    if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
        conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
    }
    // If minicluster's name node is null assume that the conf has been
    // set with the right address:port of the name node.
    //
    if (racks != null && numDataNodes > racks.length) {
        throw new IllegalArgumentException("The length of racks [" + racks.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    if (hosts != null && numDataNodes > hosts.length) {
        throw new IllegalArgumentException("The length of hosts [" + hosts.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    // Generate some hostnames if required
    if (racks != null && hosts == null) {
        hosts = new String[numDataNodes];
        for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
            hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
        }
    }

    if (simulatedCapacities != null && numDataNodes > simulatedCapacities.length) {
        throw new IllegalArgumentException("The length of simulatedCapacities [" + simulatedCapacities.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    if (dnConfOverlays != null && numDataNodes > dnConfOverlays.length) {
        throw new IllegalArgumentException("The length of dnConfOverlays [" + dnConfOverlays.length
                + "] is less than the number of datanodes [" + numDataNodes + "].");
    }

    String[] dnArgs = (operation == null || operation != StartupOption.ROLLBACK) ? null
            : new String[] { operation.getName() };

    for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
        Configuration dnConf = new HdfsConfiguration(conf);
        if (dnConfOverlays != null) {
            dnConf.addResource(dnConfOverlays[i]);
        }
        // Set up datanode address
        setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
        if (manageDfsDirs) {
            String dirs = makeDataNodeDirs(i, storageType);
            dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
            conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
        }
        if (simulatedCapacities != null) {
            SimulatedFSDataset.setFactory(dnConf);
            dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
                    simulatedCapacities[i - curDatanodesNum]);
        }
        LOG.info("Starting DataNode " + i + " with " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
                + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
        if (hosts != null) {
            dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
            LOG.info("Starting DataNode " + i + " with hostname set to: "
                    + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
        }
        if (racks != null) {
            String name = hosts[i - curDatanodesNum];
            LOG.info("Adding node with hostname : " + name + " to rack " + racks[i - curDatanodesNum]);
            StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum]);
        }
        Configuration newconf = new HdfsConfiguration(dnConf); // save config
        if (hosts != null) {
            NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
        }

        SecureResources secureResources = null;
        if (UserGroupInformation.isSecurityEnabled() && conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY) == null) {
            try {
                secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
            } catch (Exception ex) {
                ex.printStackTrace();
            }
        }
        final int maxRetriesOnSasl = conf.getInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY,
                IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT);
        int numRetries = 0;
        DataNode dn = null;
        while (true) {
            try {
                dn = DataNode.instantiateDataNode(dnArgs, dnConf, secureResources);
                break;
            } catch (IOException e) {
                // Work around issue testing security where rapidly starting multiple
                // DataNodes using the same principal gets rejected by the KDC as a
                // replay attack.
                if (UserGroupInformation.isSecurityEnabled() && numRetries < maxRetriesOnSasl) {
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException ie) {
                        Thread.currentThread().interrupt();
                        break;
                    }
                    ++numRetries;
                    continue;
                }
                throw e;
            }
        }
        if (dn == null)
            throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
        // since the HDFS does things based on host|ip:port, we need to add the
        // mapping for the service to rackId
        String service = SecurityUtil.buildTokenService(dn.getXferAddress()).toString();
        if (racks != null) {
            LOG.info("Adding node with service : " + service + " to rack " + racks[i - curDatanodesNum]);
            StaticMapping.addNodeToRack(service, racks[i - curDatanodesNum]);
        }
        dn.runDatanodeDaemon();
        dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort()));
    }
    curDatanodesNum += numDataNodes;
    this.numDataNodes += numDataNodes;
    waitActive();
}

From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java

License:Apache License

public static int getLimit(final Configuration conf) {
    return conf.getInt(INPUT_LIMIT, 0);
}

From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java

License:Apache License

public static int getSkip(final Configuration conf) {
    return conf.getInt(INPUT_SKIP, 0);
}

From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java

License:Apache License

public static int getSplitSize(final Configuration conf) {
    return conf.getInt(INPUT_SPLIT_SIZE, DEFAULT_SPLIT_SIZE);
}