Example usage for java.net InetSocketAddress getHostName

List of usage examples for java.net InetSocketAddress getHostName

Introduction

In this page you can find the example usage for java.net InetSocketAddress getHostName.

Prototype

public final String getHostName() 

Source Link

Document

Gets the hostname .

Usage

From source file:com.alibaba.wasp.master.FMaster.java

/**
 * Initializes the FMaster. The steps are as follows:
 * <p>/*  ww  w  .j  av  a  2  s .c  om*/
 * <ol>
 * <li>Initialize FMaster RPC and address
 * <li>Connect to ZooKeeper.
 * </ol>
 * <p>
 * Remaining steps of initialization occur in {@link #run()} so that they run
 * in their own thread rather than within the context of the constructor.
 * 
 * @throws InterruptedException
 */
public FMaster(final Configuration conf) throws IOException, KeeperException, InterruptedException {
    this.conf = new Configuration(conf);
    // Set how many times to retry talking to another server over HConnection.
    FConnectionManager.setServerSideFConnectionRetries(this.conf, LOG);
    // Server to handle client requests.
    String hostname = Strings
            .domainNamePointerToHostName(DNS.getDefaultHost(conf.get("wasp.master.dns.interface", "default"),
                    conf.get("wasp.master.dns.nameserver", "default")));
    int port = conf.getInt(FConstants.MASTER_PORT, FConstants.DEFAULT_MASTER_PORT);
    // Creation of a ISA will force a resolve.
    InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
    if (initialIsa.getAddress() == null) {
        throw new IllegalArgumentException("Failed resolve of " + initialIsa);
    }
    this.rpcServer = WaspRPC.getServer(FMaster.class, this,
            new Class<?>[] { FMasterMonitorProtocol.class, FMasterAdminProtocol.class,
                    FServerStatusProtocol.class, FMetaServerProtocol.class },
            initialIsa.getHostName(), // BindAddress is IP we got for this server.
            initialIsa.getPort(), conf);
    // Set our address.
    this.isa = this.rpcServer.getListenerAddress();
    this.serverName = new ServerName(this.isa.getHostName(), this.isa.getPort(), System.currentTimeMillis());

    // set the thread name now we have an address
    setName(MASTER + "-" + this.serverName.toString());

    this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" + isa.getPort(), this, true);

    // metrics interval: using the same property as fserver.
    this.msgInterval = conf.getInt("wasp.fserver.msginterval", 3 * 1000);

    this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));
}

From source file:org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager.java

/**
 * For generating datanode reports/*from   w  w w. ja v  a  2s.  com*/
 */
public List<DatanodeDescriptor> getDatanodeListForReport(final DatanodeReportType type) {
    final boolean listLiveNodes = type == DatanodeReportType.ALL || type == DatanodeReportType.LIVE;
    final boolean listDeadNodes = type == DatanodeReportType.ALL || type == DatanodeReportType.DEAD;
    final boolean listDecommissioningNodes = type == DatanodeReportType.ALL
            || type == DatanodeReportType.DECOMMISSIONING;

    ArrayList<DatanodeDescriptor> nodes;
    final HostFileManager.HostSet foundNodes = new HostFileManager.HostSet();
    final HostFileManager.HostSet includedNodes = hostFileManager.getIncludes();
    final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes();

    synchronized (datanodeMap) {
        nodes = new ArrayList<DatanodeDescriptor>(datanodeMap.size());
        for (DatanodeDescriptor dn : datanodeMap.values()) {
            final boolean isDead = isDatanodeDead(dn);
            final boolean isDecommissioning = dn.isDecommissionInProgress();
            if ((listLiveNodes && !isDead) || (listDeadNodes && isDead)
                    || (listDecommissioningNodes && isDecommissioning)) {
                nodes.add(dn);
            }
            foundNodes.add(HostFileManager.resolvedAddressFromDatanodeID(dn));
        }
    }

    if (listDeadNodes) {
        for (InetSocketAddress addr : includedNodes) {
            if (foundNodes.matchedBy(addr) || excludedNodes.match(addr)) {
                continue;
            }
            // The remaining nodes are ones that are referenced by the hosts
            // files but that we do not know about, ie that we have never
            // head from. Eg. an entry that is no longer part of the cluster
            // or a bogus entry was given in the hosts files
            //
            // If the host file entry specified the xferPort, we use that.
            // Otherwise, we guess that it is the default xfer port.
            // We can't ask the DataNode what it had configured, because it's
            // dead.
            DatanodeDescriptor dn = new DatanodeDescriptor(this.storageMap,
                    new DatanodeID(addr.getAddress().getHostAddress(), addr.getHostName(), "",
                            addr.getPort() == 0 ? defaultXferPort : addr.getPort(), defaultInfoPort,
                            defaultInfoSecurePort, defaultIpcPort));
            dn.setLastUpdate(0); // Consider this node dead for reporting
            nodes.add(dn);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("getDatanodeListForReport with " + "includedNodes = " + hostFileManager.getIncludes()
                + ", excludedNodes = " + hostFileManager.getExcludes() + ", foundNodes = " + foundNodes
                + ", nodes = " + nodes);
    }
    return nodes;
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestCheckpoint.java

/**
 * Starts two namenodes and two secondary namenodes, verifies that secondary
 * namenodes are configured correctly to talk to their respective namenodes
 * and can do the checkpoint.//  ww w .j  a  v  a 2s.c om
 * 
 * @throws IOException
 */
@Test
public void testMultipleSecondaryNamenodes() throws IOException {
    Configuration conf = new HdfsConfiguration();
    String nameserviceId1 = "ns1";
    String nameserviceId2 = "ns2";
    conf.set(DFSConfigKeys.DFS_NAMESERVICES, nameserviceId1 + "," + nameserviceId2);
    MiniDFSCluster cluster = null;
    SecondaryNameNode secondary1 = null;
    SecondaryNameNode secondary2 = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf)
                .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
                .build();
        Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
        Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
        InetSocketAddress nn1RpcAddress = cluster.getNameNode(0).getNameNodeAddress();
        InetSocketAddress nn2RpcAddress = cluster.getNameNode(1).getNameNodeAddress();
        String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
        String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();

        // Set the Service Rpc address to empty to make sure the node specific
        // setting works
        snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
        snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");

        // Set the nameserviceIds
        snConf1.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1),
                nn1);
        snConf2.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2),
                nn2);

        secondary1 = startSecondaryNameNode(snConf1);
        secondary2 = startSecondaryNameNode(snConf2);

        // make sure the two secondary namenodes are talking to correct namenodes.
        assertEquals(secondary1.getNameNodeAddress().getPort(), nn1RpcAddress.getPort());
        assertEquals(secondary2.getNameNodeAddress().getPort(), nn2RpcAddress.getPort());
        assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2.getNameNodeAddress().getPort());

        // both should checkpoint.
        secondary1.doCheckpoint();
        secondary2.doCheckpoint();
    } finally {
        cleanup(secondary1);
        secondary1 = null;
        cleanup(secondary2);
        secondary2 = null;
        cleanup(cluster);
        cluster = null;
    }
}

From source file:com.alibaba.wasp.fserver.FServer.java

/**
 * Starts a FServer at the default location
 * /*from  ww w.  j av a2 s .co m*/
 * @param conf
 * @throws java.io.IOException
 * @throws InterruptedException
 */
public FServer(Configuration conf) throws IOException, InterruptedException {
    this.conf = conf;
    this.isOnline = false;
    // Set how many times to retry talking to another server over FConnection.
    FConnectionManager.setServerSideFConnectionRetries(this.conf, LOG);

    // Config'ed params
    this.msgInterval = conf.getInt("wasp.fserver.msginterval", 3 * 1000);

    this.sleeper = new Sleeper(this.msgInterval, this);

    this.numEntityGroupsToReport = conf.getInt("wasp.fserver.numentitygroupstoreport", 10);

    this.rpcTimeout = conf.getInt(FConstants.WASP_RPC_TIMEOUT_KEY, FConstants.DEFAULT_WASP_RPC_TIMEOUT);

    this.abortRequested = false;
    this.stopped = false;
    this.actionManager = new StorageActionManager(conf);

    // Server to handle client requests.
    String hostname = Strings
            .domainNamePointerToHostName(DNS.getDefaultHost(conf.get("wasp.fserver.dns.interface", "default"),
                    conf.get("wasp.fserver.dns.nameserver", "default")));
    int port = conf.getInt(FConstants.FSERVER_PORT, FConstants.DEFAULT_FSERVER_PORT);
    // Creation of a HSA will force a resolve.
    InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
    if (initialIsa.getAddress() == null) {
        throw new IllegalArgumentException("Failed resolve of " + initialIsa);
    }

    this.rpcServer = WaspRPC.getServer(FServer.class, this,
            new Class<?>[] { ClientProtocol.class, AdminProtocol.class, WaspRPCErrorHandler.class,
                    OnlineEntityGroups.class },
            initialIsa.getHostName(), // BindAddress is
            // IP we got for
            // this server.
            initialIsa.getPort(), conf);
    // Set our address.
    this.isa = this.rpcServer.getListenerAddress();

    this.leases = new Leases(conf.getInt(FConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));

    this.startcode = System.currentTimeMillis();

    int maxThreads = conf.getInt("wasp.transaction.threads.max", 150);

    this.pool = new ThreadPoolExecutor(1, maxThreads, 60, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
            new DaemonThreadFactory("thread factory"));
    ((ThreadPoolExecutor) this.pool).allowCoreThreadTimeOut(true);

    this.scannerLeaseTimeoutPeriod = conf.getInt(FConstants.WASP_CLIENT_SCANNER_TIMEOUT_PERIOD,
            FConstants.DEFAULT_WASP_CLIENT_SCANNER_TIMEOUT_PERIOD);

    this.driver = new BaseDriver(this);
    this.splitThread = new SplitThread(this);
    this.globalEntityGroup = new GlobalEntityGroup(this);
}

From source file:org.apache.hadoop.gateway.GatewayBasicFuncTest.java

@Test
public void testBasicOutboundHeaderUseCase() throws IOException {
    String root = "/tmp/GatewayBasicFuncTest/testBasicOutboundHeaderUseCase";
    String username = "hdfs";
    String password = "hdfs-password";
    InetSocketAddress gatewayAddress = driver.gateway.getAddresses()[0];

    driver.getMock("WEBHDFS").expect().method("PUT").pathInfo("/v1" + root + "/dir/file")
            .header("Host", driver.getRealAddr("WEBHDFS")).queryParam("op", "CREATE")
            .queryParam("user.name", username).respond().status(HttpStatus.SC_TEMPORARY_REDIRECT)
            .header("Location",
                    driver.getRealUrl("DATANODE") + "/v1" + root + "/dir/file?op=CREATE&user.name=hdfs");
    Response response = given()//w  w  w  .  j  av  a  2  s . co m
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "CREATE").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_TEMPORARY_REDIRECT).when()
            .put(driver.getUrl("WEBHDFS") + "/v1" + root + "/dir/file");
    String location = response.getHeader("Location");
    //System.out.println( location );
    log.debug("Redirect location: " + response.getHeader("Location"));
    if (driver.isUseGateway()) {
        MatcherAssert.assertThat(location,
                startsWith("http://" + gatewayAddress.getHostName() + ":" + gatewayAddress.getPort() + "/"));
        MatcherAssert.assertThat(location, containsString("?_="));
    }
    MatcherAssert.assertThat(location, not(containsString("host=")));
    MatcherAssert.assertThat(location, not(containsString("port=")));
}

From source file:org.cloudata.core.master.CloudataMaster.java

public void init(CloudataConf conf) throws IOException {
    InetSocketAddress serverAddress = NetworkUtil.getLocalAddress(conf.getInt("masterServer.port", 7000));

    this.hostName = serverAddress.getHostName() + ":" + serverAddress.getPort();

    this.threadGroup = new ThreadGroup("CloudataMaster_" + hostName);

    this.conf = conf;

    this.fs = CloudataFileSystem.get(conf);
    if (this.fs == null) {
        LOG.fatal("FileSystem is not ready. CloudataMaster shutdown");
        shutdown();//from   www .j  av a  2 s . c om
    }

    this.zk = LockUtil.getZooKeeper(conf, hostName, this);
    this.schemaMap = new TableSchemaMap(conf, zk);

    this.server = CRPC.getServer(zk, this, serverAddress.getHostName(), serverAddress.getPort(),
            conf.getInt("masterServer.handler.count", 10), false, conf);

    this.server.start();

    LOG.info("Netune master started at " + hostName);
}

From source file:org.cloudata.core.master.CloudataMaster.java

/**
 * Master lock? acquire ?? ? ./*  ww w  .ja v  a 2 s.  c  om*/
 * 
 * @throws IOException
 */
private void masterInit() throws IOException {
    if (!fs.isReady()) {
        LOG.fatal("FileSystem is not ready. " + "check " + conf.get("cloudata.root")
                + " directory. CloudataMaster shutdown");
        shutdown();
    }
    masterInitTime = new Date();
    masterElected = true;
    setClusterReady(false);

    masterMetrics = new CloudataMasterMetrics(conf);
    (new Thread(threadGroup, new UpdateMetricsThread())).start();

    addLockEventHandler();

    try {
        loadAllTableSchemas(conf);
        synchronized (Constants.SC_LOCK_PATH) {
            LockUtil.createNodes(zk, LockUtil.getZKPath(conf, Constants.SC_LOCK_PATH), "0".getBytes(),
                    CreateMode.PERSISTENT, true);
            LockUtil.createNodes(zk, LockUtil.getZKPath(conf, Constants.MC_LOCK_PATH), "0".getBytes(),
                    CreateMode.PERSISTENT, true);
        }
    } catch (IOException e) {
        LOG.fatal("CloudataMaster shutdown cause:" + e.getMessage(), e);
        shutdown();
        return;
    }

    Thread rootTabletAssignmentThread = new Thread(threadGroup, new RootTabletAssignmentThread());
    rootTabletAssignmentThread.start();

    InetSocketAddress infoServerAddress = NetworkUtil
            .getAddress(conf.get("masterServer.info.address", "0.0.0.0:57000"));

    try {
        this.infoServer = new CStatusHttpServer("master", infoServerAddress.getHostName(),
                infoServerAddress.getPort());
        this.infoServer.start();
        LOG.info("Info Http Server started: " + infoServerAddress.toString());
    } catch (Exception e) {
        LOG.warn("Error while info server init:" + e.getMessage());
    }

    // ? ? tablet drop?  
    List<String> dropTables = null;
    try {
        dropTables = zk.getChildren(LockUtil.getZKPath(conf, Constants.TABLE_DROP), false);
    } catch (NoNodeException e) {
    } catch (Exception e) {
        throw new IOException(e);
    }
    if (dropTables != null) {
        for (String eachDropTable : dropTables) {
            asyncTaskManager.runAsyncTask(new TableDropTask(eachDropTable));
        }
    }
}

From source file:skewtune.mapreduce.STJobTracker.java

@SuppressWarnings("unchecked")
STJobTracker(final JobConf conf, String jobtrackerIndentifier) throws IOException, InterruptedException {
    // find the owner of the process
    // get the desired principal to load
    String keytabFilename = conf.get(JTConfig.JT_KEYTAB_FILE);
    UserGroupInformation.setConfiguration(conf);
    if (keytabFilename != null) {
        String desiredUser = conf.get(JTConfig.JT_USER_NAME, System.getProperty("user.name"));
        UserGroupInformation.loginUserFromKeytab(desiredUser, keytabFilename);
        mrOwner = UserGroupInformation.getLoginUser();
    } else {/*  w  ww  .j a  va  2  s. c  om*/
        mrOwner = UserGroupInformation.getCurrentUser();
    }

    supergroup = conf.get(MR_SUPERGROUP, "supergroup");
    LOG.info("Starting jobtracker with owner as " + mrOwner.getShortUserName() + " and supergroup as "
            + supergroup);

    long secretKeyInterval = conf.getLong(MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_KEY,
            MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
    long tokenMaxLifetime = conf.getLong(MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_KEY,
            MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
    long tokenRenewInterval = conf.getLong(MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
            MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
    secretManager = new DelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime, tokenRenewInterval,
            DELEGATION_TOKEN_GC_INTERVAL);
    secretManager.startThreads();

    //
    // Grab some static constants
    //

    NUM_HEARTBEATS_IN_SECOND = conf.getInt(JT_HEARTBEATS_IN_SECOND, DEFAULT_NUM_HEARTBEATS_IN_SECOND);
    if (NUM_HEARTBEATS_IN_SECOND < MIN_NUM_HEARTBEATS_IN_SECOND) {
        NUM_HEARTBEATS_IN_SECOND = DEFAULT_NUM_HEARTBEATS_IN_SECOND;
    }

    HEARTBEATS_SCALING_FACTOR = conf.getFloat(JT_HEARTBEATS_SCALING_FACTOR, DEFAULT_HEARTBEATS_SCALING_FACTOR);
    if (HEARTBEATS_SCALING_FACTOR < MIN_HEARTBEATS_SCALING_FACTOR) {
        HEARTBEATS_SCALING_FACTOR = DEFAULT_HEARTBEATS_SCALING_FACTOR;
    }

    // whether to dump or not every heartbeat message even when DEBUG is enabled
    dumpHeartbeat = conf.getBoolean(JT_HEARTBEATS_DUMP, false);

    // This is a directory of temporary submission files. We delete it
    // on startup, and can delete any files that we're done with
    this.conf = conf;
    JobConf jobConf = new JobConf(conf);

    // Set ports, start RPC servers, setup security policy etc.
    InetSocketAddress addr = getAddress(conf);
    this.localMachine = addr.getHostName();
    this.port = addr.getPort();

    int handlerCount = conf.getInt(JT_IPC_HANDLER_COUNT, 10);
    this.interTrackerServer = RPC.getServer(SkewTuneClientProtocol.class, this, addr.getHostName(),
            addr.getPort(), handlerCount, false, conf, secretManager);
    if (LOG.isDebugEnabled()) {
        Properties p = System.getProperties();
        for (Iterator it = p.keySet().iterator(); it.hasNext();) {
            String key = (String) it.next();
            String val = p.getProperty(key);
            LOG.debug("Property '" + key + "' is " + val);
        }
    }

    InetSocketAddress infoSocAddr = NetUtils
            .createSocketAddr(conf.get(JT_HTTP_ADDRESS, String.format("%s:0", this.localMachine)));
    String infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.startTime = System.currentTimeMillis();
    infoServer = new HttpServer("job", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf);
    infoServer.setAttribute("job.tracker", this);
    infoServer.addServlet("jobcompletion", "/completion", JobCompletionServlet.class);
    infoServer.addServlet("taskspeculation", "/speculation", SpeculationEventServlet.class);
    infoServer.addServlet("skewreport", "/skew", SkewReportServlet.class);
    infoServer.addServlet("tasksplit", "/split/*", SplitTaskServlet.class);
    infoServer.addServlet("tasksplitV2", "/splitV2/*", SplitTaskV2Servlet.class);
    infoServer.start();

    this.trackerIdentifier = jobtrackerIndentifier;

    // The rpc/web-server ports can be ephemeral ports...
    // ... ensure we have the correct info
    this.port = interTrackerServer.getListenerAddress().getPort();
    this.conf.set(JT_IPC_ADDRESS, (this.localMachine + ":" + this.port));
    LOG.info("JobTracker up at: " + this.port);
    this.infoPort = this.infoServer.getPort();
    this.conf.set(JT_HTTP_ADDRESS, infoBindAddress + ":" + this.infoPort);
    LOG.info("JobTracker webserver: " + this.infoServer.getPort());
    this.defaultNotificationUrl = String.format("http://%s:%d/completion?jobid=$jobId&status=$jobStatus",
            infoBindAddress, this.infoPort);
    LOG.info("JobTracker completion URI: " + defaultNotificationUrl);
    //        this.defaultSpeculationEventUrl = String.format("http://%s:%d/speculation?taskid=$taskId&remainTime=$taskRemainTime",infoBindAddress,this.infoPort);
    this.defaultSpeculationEventUrl = String.format("http://%s:%d/speculation?jobid=$jobId", infoBindAddress,
            this.infoPort);
    LOG.info("JobTracker speculation event URI: " + defaultSpeculationEventUrl);
    this.defaultSkewReportUrl = String.format("http://%s:%d/skew", infoBindAddress, this.infoPort);
    LOG.info("JobTracker skew report event URI: " + defaultSkewReportUrl);
    this.trackerHttp = String.format("http://%s:%d", infoBindAddress, this.infoPort);

    while (!Thread.currentThread().isInterrupted()) {
        try {
            // if we haven't contacted the namenode go ahead and do it
            if (fs == null) {
                fs = mrOwner.doAs(new PrivilegedExceptionAction<FileSystem>() {
                    @Override
                    public FileSystem run() throws IOException {
                        return FileSystem.get(conf);
                    }
                });
            }

            // clean up the system dir, which will only work if hdfs is out
            // of safe mode
            if (systemDir == null) {
                systemDir = new Path(getSystemDir());
            }
            try {
                FileStatus systemDirStatus = fs.getFileStatus(systemDir);
                if (!systemDirStatus.getOwner().equals(mrOwner.getShortUserName())) {
                    throw new AccessControlException(
                            "The systemdir " + systemDir + " is not owned by " + mrOwner.getShortUserName());
                }
                if (!systemDirStatus.getPermission().equals(SYSTEM_DIR_PERMISSION)) {
                    LOG.warn("Incorrect permissions on " + systemDir + ". Setting it to "
                            + SYSTEM_DIR_PERMISSION);
                    fs.setPermission(systemDir, new FsPermission(SYSTEM_DIR_PERMISSION));
                } else {
                    break;
                }
            } catch (FileNotFoundException fnf) {
            } // ignore
        } catch (AccessControlException ace) {
            LOG.warn("Failed to operate on " + JTConfig.JT_SYSTEM_DIR + "(" + systemDir
                    + ") because of permissions.");
            LOG.warn("Manually delete the " + JTConfig.JT_SYSTEM_DIR + "(" + systemDir
                    + ") and then start the JobTracker.");
            LOG.warn("Bailing out ... ");
            throw ace;
        } catch (IOException ie) {
            LOG.info("problem cleaning system directory: " + systemDir, ie);
        }
        Thread.sleep(FS_ACCESS_RETRY_PERIOD);
    }

    if (Thread.currentThread().isInterrupted()) {
        throw new InterruptedException();
    }

    // initialize cluster variable
    cluster = new Cluster(this.conf);

    // now create a job client proxy
    jtClient = (ClientProtocol) RPC.getProxy(ClientProtocol.class, ClientProtocol.versionID,
            JobTracker.getAddress(conf), mrOwner, this.conf,
            NetUtils.getSocketFactory(conf, ClientProtocol.class));

    new SpeculativeScheduler().start();

    // initialize task event fetcher
    new TaskCompletionEventFetcher().start();

    // Same with 'localDir' except it's always on the local disk.
    asyncDiskService = new MRAsyncDiskService(FileSystem.getLocal(conf), conf.getLocalDirs());
    asyncDiskService.moveAndDeleteFromEachVolume(SUBDIR);

    // keep at least one asynchronous worker per CPU core
    int numProcs = Runtime.getRuntime().availableProcessors();
    LOG.info("# of available processors = " + numProcs);
    int maxFactor = conf.getInt(JT_MAX_ASYNC_WORKER_FACTOR, 2);
    asyncWorkers = new ThreadPoolExecutor(numProcs, numProcs * maxFactor, 30, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(true), new ThreadPoolExecutor.CallerRunsPolicy());

    speculativeSplit = conf.getBoolean(JT_SPECULATIVE_SPLIT, false);
}

From source file:org.cloudata.core.tabletserver.TabletServer.java

public void init(CloudataConf conf) throws IOException {
    this.serverStartTime = new Date();
    this.testMode = conf.getBoolean("testmode", false);
    this.conf = conf;

    this.maxMajorCompactionThread = this.conf.getInt("tabletServer.maxMajorCompactionThread", 5);
    this.maxSplitThread = this.conf.getInt("tabletServer.maxSplitThread", 5);

    this.compactionExecutor = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxMajorCompactionThread);
    this.splitExecutor = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxSplitThread);
    this.actionExecutor = (ThreadPoolExecutor) Executors
            .newFixedThreadPool(this.conf.getInt("tabletServer.maxMinorCompactionThread", 10));

    this.maxTabletCount = conf.getInt("tabletServer.max.tablet.count", 2000);

    this.maxResultRecord = conf.getInt("client.max.resultRecord", 5000);

    this.maxMemoryCacheCapacity = conf.getLong("memory.maxColumnCacheCapacity", 200) * 1024 * 1024;

    this.fs = CloudataFileSystem.get(conf);

    if (fs == null || !fs.isReady()) {
        LOG.fatal("FileSystem is not ready. TabletServer shutdown");
        shutdown();/*from w  ww .  ja  v  a  2s.co m*/
    }

    InetSocketAddress serverAddress = NetworkUtil.getAddress(
            InetAddress.getLocalHost().getHostName() + ":" + conf.getInt("tabletServer.port", 7001));

    this.hostName = serverAddress.getHostName() + ":" + serverAddress.getPort();

    this.threadGroup = new ThreadGroup("TabletServer_" + hostName);

    this.leaseHolder = new LeaseHolder(threadGroup);

    this.tabletServerLockPath = Constants.SERVER + "/" + hostName;

    this.zk = LockUtil.getZooKeeper(conf, hostName, this);

    //<Split Lock >
    try {
        LockUtil.delete(zk, LockUtil.getZKPath(conf, Constants.TABLETSERVER_SPLIT + "/" + hostName), true);
    } catch (Exception e) {
        throw new IOException(e);
    }
    //</Split Lock >

    schemaMap = new TableSchemaMap(conf, zk);

    tabletServerMetrics = new TabletServerMetrics(conf, this);

    this.server = CRPC.getServer(zk, this, serverAddress.getHostName(), serverAddress.getPort(),
            conf.getInt("tabletServer.handler.count", 10), false, conf, tabletServerMetrics);

    ServerSocket ss = null;
    int port = conf.getInt("tabletServer.scanner.port", 50100);
    String bindAddress = "0.0.0.0";

    try {
        //      LOG.info("Opened Scanner Handler at " + hostName  + ", port=" + port);
        ss = new ServerSocket(port, 0, InetAddress.getByName(bindAddress));
        ss.setReuseAddress(true);
    } catch (IOException ie) {
        LOG.error("Could not open scanner server at " + port + ", stop server and Stop tablet server", ie);
        exit();
    }
    this.dataXceiveServer = new Daemon(new DataXceiveServer(ss));

    try {
        LockUtil.createNodes(zk, LockUtil.getZKPath(conf, tabletServerLockPath), hostName.getBytes(),
                CreateMode.EPHEMERAL);
        LOG.info("TableServer lock created:" + LockUtil.getZKPath(conf, tabletServerLockPath));
    } catch (Exception e) {
        LOG.fatal("TabletServer stopped. Can't server lock:" + tabletServerLockPath, e);
        exit();
    }

    if (tabletDistributionMode.get()) {
        LOG.info("Turn on tablet distribution mode");
    }

    heartbeatThread = new HeartbeatThread();
    heartbeatThread.setDaemon(true);
    heartbeatThread.start();
}