List of usage examples for java.net ServerSocket setReceiveBufferSize
public synchronized void setReceiveBufferSize(int size) throws SocketException
From source file:io.github.kitarek.elasthttpd.server.networking.HttpConfiguredServerSocket.java
private static void setSocketReceiveBufferSizeWhenProvided(final SocketConfiguration socketConfiguration, final ServerSocket serverSocket) { socketConfiguration.getSocketReceiveBufferSizeInBytes().map(new OptionalMapper<Integer>() { public void present(Integer receiveBufferSize) { try { serverSocket.setReceiveBufferSize(receiveBufferSize); } catch (SocketException e) { throw new IllegalStateException("Cannot set socket receive buffer size", e); }/* w w w .j av a2 s . c o m*/ } }); }
From source file:com.l2jfree.network.mmocore.AcceptorThread.java
public void openServerSocket(InetAddress address, int port) throws IOException { ServerSocketChannel selectable = ServerSocketChannel.open(); selectable.configureBlocking(false); ServerSocket ss = selectable.socket(); ss.setReuseAddress(true);/*from w w w .j a va 2 s .c om*/ ss.setReceiveBufferSize(getBufferSize()); if (address == null) { ss.bind(new InetSocketAddress(port)); } else { ss.bind(new InetSocketAddress(address, port)); } selectable.register(getSelector(), SelectionKey.OP_ACCEPT); }
From source file:common.DataNode.java
/** * This method starts the data node with the specified conf. * //from w w w.j a va2 s . c o m * @param conf - the configuration * if conf's CONFIG_PROPERTY_SIMULATED property is set * then a simulated storage based data node is created. * * @param dataDirs - only for a non-simulated storage data node * @throws IOException */ void startDataNode(Configuration conf, AbstractList<File> dataDirs, DatanodeProtocol namenode) throws IOException { // use configured nameserver & interface to get local hostname if (conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY) != null) { machineName = conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY); } if (machineName == null) { machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"), conf.get("dfs.datanode.dns.nameserver", "default")); } this.nameNodeAddr = NameNode.getAddress(conf); this.socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT); this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT); /* Based on results on different platforms, we might need set the default * to false on some of them. */ this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true); this.writePacketSize = conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); InetSocketAddress socAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.address", "0.0.0.0:50010")); int tmpPort = socAddr.getPort(); storage = new DataStorage(); // construct registration this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort); // connect to name node this.namenode = namenode; // get version and id info from the name-node NamespaceInfo nsInfo = handshake(); StartupOption startOpt = getStartupOption(conf); assert startOpt != null : "Startup option must be set."; boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false); if (simulatedFSDataset) { setNewStorageID(dnRegistration); dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION; dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID; // it would have been better to pass storage as a parameter to // constructor below - need to augment ReflectionUtils used below. conf.set(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, dnRegistration.getStorageID()); try { //Equivalent of following (can't do because Simulated is in test dir) // this.data = new SimulatedFSDataset(conf); this.data = (FSDatasetInterface) ReflectionUtils.newInstance( Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf); } catch (ClassNotFoundException e) { throw new IOException(StringUtils.stringifyException(e)); } } else { // real storage // read storage info, lock data dirs and transition fs state if necessary storage.recoverTransitionRead(nsInfo, dataDirs, startOpt); // adjust this.dnRegistration.setStorageInfo(storage); // initialize data node internal structure this.data = new FSDataset(storage, conf); } // find free port ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(ss, socAddr, 0); ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); // adjust machine name with the actual port tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort); this.dnRegistration.setName(machineName + ":" + tmpPort); LOG.info("Opened info server at " + tmpPort); this.threadGroup = new ThreadGroup("dataXceiverServer"); this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this)); this.threadGroup.setDaemon(true); // auto destroy when empty this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL); this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY) * 1000L; if (this.initialBlockReportDelay >= blockReportInterval) { this.initialBlockReportDelay = 0; LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:"); } this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L; //initialize periodic block scanner String reason = null; if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) { reason = "verification is turned off by configuration"; } else if (!(data instanceof FSDataset)) { reason = "verifcation is supported only with FSDataset"; } if (reason == null) { blockScanner = new DataBlockScanner(this, (FSDataset) data, conf); } else { LOG.info("Periodic Block Verification is disabled because " + reason + "."); } //create a servlet to serve full-file content InetSocketAddress infoSocAddr = NetUtils .createSocketAddr(conf.get("dfs.datanode.http.address", "0.0.0.0:50075")); String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); this.infoServer = new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf); if (conf.getBoolean("dfs.https.enable", false)) { boolean needClientAuth = conf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); InetSocketAddress secInfoSocAddr = NetUtils .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0)); Configuration sslConf = new HdfsConfiguration(false); sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); } this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class); this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class); this.infoServer.setAttribute("datanode.blockScanner", blockScanner); this.infoServer.setAttribute("datanode.conf", conf); this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class); this.infoServer.start(); // adjust info port this.dnRegistration.setInfoPort(this.infoServer.getPort()); myMetrics = new DataNodeMetrics(conf, dnRegistration.getName()); // set service-level authorization security policy if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider()); } //init ipc server InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address")); ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(), ipcAddr.getPort(), conf.getInt("dfs.datanode.handler.count", 3), false, conf); ipcServer.start(); dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort()); LOG.info("dnRegistration = " + dnRegistration); plugins = conf.getInstances("dfs.datanode.plugins", ServicePlugin.class); for (ServicePlugin p : plugins) { try { p.start(this); LOG.info("Started plug-in " + p); } catch (Throwable t) { LOG.warn("ServicePlugin " + p + " could not be started", t); } } }
From source file:org.apache.geode.internal.net.SocketCreator.java
public ServerSocket createServerSocket(int nport, int backlog, InetAddress bindAddr, List<GatewayTransportFilter> transportFilters, int socketBufferSize) throws IOException { if (transportFilters.isEmpty()) { return createServerSocket(nport, backlog, bindAddr, socketBufferSize); } else {/*from ww w. j ava2 s. c om*/ printConfig(); ServerSocket result = new TransportFilterServerSocket(transportFilters); result.setReuseAddress(true); // Set the receive buffer size before binding the socket so // that large buffers will be allocated on accepted sockets (see // java.net.ServerSocket.setReceiverBufferSize javadocs) result.setReceiveBufferSize(socketBufferSize); try { result.bind(new InetSocketAddress(bindAddr, nport), backlog); } catch (BindException e) { BindException throwMe = new BindException( LocalizedStrings.SocketCreator_FAILED_TO_CREATE_SERVER_SOCKET_ON_0_1 .toLocalizedString(new Object[] { bindAddr, Integer.valueOf(nport) })); throwMe.initCause(e); throw throwMe; } return result; } }
From source file:org.apache.geode.internal.net.SocketCreator.java
private ServerSocket createServerSocket(int nport, int backlog, InetAddress bindAddr, int socketBufferSize, boolean sslConnection) throws IOException { printConfig();/*from ww w . j a va2 s . co m*/ if (sslConnection) { if (this.sslContext == null) { throw new GemFireConfigException("SSL not configured correctly, Please look at previous error"); } ServerSocketFactory ssf = this.sslContext.getServerSocketFactory(); SSLServerSocket serverSocket = (SSLServerSocket) ssf.createServerSocket(); serverSocket.setReuseAddress(true); // If necessary, set the receive buffer size before binding the socket so // that large buffers will be allocated on accepted sockets (see // java.net.ServerSocket.setReceiverBufferSize javadocs) if (socketBufferSize != -1) { serverSocket.setReceiveBufferSize(socketBufferSize); } serverSocket.bind(new InetSocketAddress(bindAddr, nport), backlog); finishServerSocket(serverSocket); return serverSocket; } else { // log.info("Opening server socket on " + nport, new Exception("SocketCreation")); ServerSocket result = new ServerSocket(); result.setReuseAddress(true); // If necessary, set the receive buffer size before binding the socket so // that large buffers will be allocated on accepted sockets (see // java.net.ServerSocket.setReceiverBufferSize javadocs) if (socketBufferSize != -1) { result.setReceiveBufferSize(socketBufferSize); } try { result.bind(new InetSocketAddress(bindAddr, nport), backlog); } catch (BindException e) { BindException throwMe = new BindException( LocalizedStrings.SocketCreator_FAILED_TO_CREATE_SERVER_SOCKET_ON_0_1 .toLocalizedString(new Object[] { bindAddr, Integer.valueOf(nport) })); throwMe.initCause(e); throw throwMe; } return result; } }
From source file:org.apache.hadoop.dfs.DataNode.java
/** * This method starts the data node with the specified conf. * // w w w. j a va 2 s.c o m * @param conf - the configuration * if conf's CONFIG_PROPERTY_SIMULATED property is set * then a simulated storage based data node is created. * * @param dataDirs - only for a non-simulated storage data node * @throws IOException */ void startDataNode(Configuration conf, AbstractList<File> dataDirs) throws IOException { // use configured nameserver & interface to get local hostname if (conf.get("slave.host.name") != null) { machineName = conf.get("slave.host.name"); } if (machineName == null) { machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"), conf.get("dfs.datanode.dns.nameserver", "default")); } InetSocketAddress nameNodeAddr = NameNode.getAddress(conf); this.estimateBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE); this.socketTimeout = conf.getInt("dfs.socket.timeout", FSConstants.READ_TIMEOUT); this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", FSConstants.WRITE_TIMEOUT); /* Based on results on different platforms, we might need set the default * to false on some of them. */ this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true); this.writePacketSize = conf.getInt("dfs.write.packet.size", 64 * 1024); String address = NetUtils.getServerAddress(conf, "dfs.datanode.bindAddress", "dfs.datanode.port", "dfs.datanode.address"); InetSocketAddress socAddr = NetUtils.createSocketAddr(address); int tmpPort = socAddr.getPort(); storage = new DataStorage(); // construct registration this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort); // connect to name node this.namenode = (DatanodeProtocol) RPC.waitForProxy(DatanodeProtocol.class, DatanodeProtocol.versionID, nameNodeAddr, conf); // get version and id info from the name-node NamespaceInfo nsInfo = handshake(); StartupOption startOpt = getStartupOption(conf); assert startOpt != null : "Startup option must be set."; boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false); if (simulatedFSDataset) { setNewStorageID(dnRegistration); dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION; dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID; // it would have been better to pass storage as a parameter to // constructor below - need to augment ReflectionUtils used below. conf.set("StorageId", dnRegistration.getStorageID()); try { //Equivalent of following (can't do because Simulated is in test dir) // this.data = new SimulatedFSDataset(conf); this.data = (FSDatasetInterface) ReflectionUtils .newInstance(Class.forName("org.apache.hadoop.dfs.SimulatedFSDataset"), conf); } catch (ClassNotFoundException e) { throw new IOException(StringUtils.stringifyException(e)); } } else { // real storage // read storage info, lock data dirs and transition fs state if necessary storage.recoverTransitionRead(nsInfo, dataDirs, startOpt); // adjust this.dnRegistration.setStorageInfo(storage); // initialize data node internal structure this.data = new FSDataset(storage, conf); } // find free port ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(ss, socAddr, 0); ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); ss.setSoTimeout(conf.getInt("dfs.dataXceiver.timeoutInMS", 30000)); //30s // adjust machine name with the actual port tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort); this.dnRegistration.setName(machineName + ":" + tmpPort); LOG.info("Opened info server at " + tmpPort); this.maxXceiverCount = conf.getInt("dfs.datanode.max.xcievers", MAX_XCEIVER_COUNT); this.threadGroup = new ThreadGroup("dataXceiveServer"); this.dataXceiveServer = new Daemon(threadGroup, new DataXceiveServer(ss)); this.threadGroup.setDaemon(true); // auto destroy when empty this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL); this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY) * 1000L; if (this.initialBlockReportDelay >= blockReportInterval) { this.initialBlockReportDelay = 0; LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:"); } this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L; DataNode.nameNodeAddr = nameNodeAddr; this.balancingThrottler = new BlockBalanceThrottler( conf.getLong("dfs.balance.bandwidthPerSec", 1024L * 1024)); //initialize periodic block scanner String reason = null; if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) { reason = "verification is turned off by configuration"; } else if (!(data instanceof FSDataset)) { reason = "verifcation is supported only with FSDataset"; } if (reason == null) { blockScanner = new DataBlockScanner(this, (FSDataset) data, conf); } else { LOG.info("Periodic Block Verification is disabled because " + reason + "."); } //create a servlet to serve full-file content String infoAddr = NetUtils.getServerAddress(conf, "dfs.datanode.info.bindAddress", "dfs.datanode.info.port", "dfs.datanode.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); this.infoServer = new StatusHttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0); InetSocketAddress secInfoSocAddr = NetUtils .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0)); Configuration sslConf = new Configuration(conf); sslConf.addResource(conf.get("https.keystore.info.rsrc", "sslinfo.xml")); String keyloc = sslConf.get("https.keystore.location"); if (null != keyloc) { this.infoServer.addSslListener(secInfoSocAddr, keyloc, sslConf.get("https.keystore.password", ""), sslConf.get("https.keystore.keypassword", "")); } this.infoServer.addServlet(null, "/streamFile/*", StreamFile.class); this.infoServer.setAttribute("datanode.blockScanner", blockScanner); this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class); this.infoServer.start(); // adjust info port this.dnRegistration.setInfoPort(this.infoServer.getPort()); myMetrics = new DataNodeMetrics(conf, dnRegistration.getStorageID()); //init ipc server InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address")); ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(), conf.getInt("dfs.datanode.handler.count", 3), false, conf); ipcServer.start(); dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort()); LOG.info("dnRegistration = " + dnRegistration); }
From source file:org.apache.hadoop.hdfs.server.datanode.CachingDataNode.java
private void reconfigureDataXceiver(final Configuration conf) throws IOException { final ServerSocket ss = ((DataXceiverServer) this.dataNode.dataXceiverServer.getRunnable()).ss; // Shut down old dataXceiverServer and replace it with our version if (this.dataNode.dataXceiverServer != null) { ((DataXceiverServer) this.dataNode.dataXceiverServer.getRunnable()).kill(); this.dataNode.dataXceiverServer.interrupt(); // wait for all data receiver threads to exit if (this.dataNode.threadGroup != null) { int sleepMs = 2; while (true) { this.dataNode.threadGroup.interrupt(); LOG.info("Waiting for threadgroup to exit, active threads is " + this.dataNode.threadGroup.activeCount()); if (this.dataNode.threadGroup.activeCount() == 0) { break; }//from ww w . j av a 2 s. c o m try { Thread.sleep(sleepMs); } catch (InterruptedException e) { } sleepMs = sleepMs * 3 / 2; // exponential backoff if (sleepMs > 1000) { sleepMs = 1000; } } } // wait for dataXceiveServer to terminate try { this.dataNode.dataXceiverServer.join(); } catch (InterruptedException ie) { } } // find free port or use privileged port provided final ServerSocket newServerSocket = (dnConf.socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); newServerSocket.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); Server.bind(newServerSocket, (InetSocketAddress) ss.getLocalSocketAddress(), 0); newServerSocket.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); this.dataNode.threadGroup = new ThreadGroup("cachingDataXceiverServer"); this.dataNode.dataXceiverServer = new Daemon(this.dataNode.threadGroup, new CachingDataXceiverServer(newServerSocket, conf, this.dataNode, this.blockCache)); this.dataNode.threadGroup.setDaemon(true); // auto destroy when empty }
From source file:org.apache.hadoop.hdfs.server.datanode.DataNode.java
/** * This method starts the data node with the specified conf. * /* w ww . jav a2 s . c o m*/ * @param conf - the configuration * if conf's CONFIG_PROPERTY_SIMULATED property is set * then a simulated storage based data node is created. * * @param dataDirs - only for a non-simulated storage data node * @throws IOException * @throws MalformedObjectNameException * @throws MBeanRegistrationException * @throws InstanceAlreadyExistsException */ void startDataNode(Configuration conf, AbstractList<File> dataDirs, SecureResources resources) throws IOException { if (UserGroupInformation.isSecurityEnabled() && resources == null) throw new RuntimeException("Cannot start secure cluster without " + "privileged resources."); this.secureResources = resources; // use configured nameserver & interface to get local hostname if (conf.get("slave.host.name") != null) { machineName = conf.get("slave.host.name"); } if (machineName == null) { machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"), conf.get("dfs.datanode.dns.nameserver", "default")); } InetSocketAddress nameNodeAddr = NameNode.getServiceAddress(conf, true); this.socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT); /* Based on results on different platforms, we might need set the default * to false on some of them. */ this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true); this.writePacketSize = conf.getInt("dfs.write.packet.size", 64 * 1024); InetSocketAddress socAddr = DataNode.getStreamingAddr(conf); int tmpPort = socAddr.getPort(); storage = new DataStorage(); // construct registration this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort); // connect to name node this.namenode = (DatanodeProtocol) RPC.waitForProxy(DatanodeProtocol.class, DatanodeProtocol.versionID, nameNodeAddr, conf); // get version and id info from the name-node NamespaceInfo nsInfo = handshake(); StartupOption startOpt = getStartupOption(conf); assert startOpt != null : "Startup option must be set."; boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false); if (simulatedFSDataset) { setNewStorageID(dnRegistration); dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION; dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID; // it would have been better to pass storage as a parameter to // constructor below - need to augment ReflectionUtils used below. conf.set("StorageId", dnRegistration.getStorageID()); try { //Equivalent of following (can't do because Simulated is in test dir) // this.data = new SimulatedFSDataset(conf); this.data = (FSDatasetInterface) ReflectionUtils.newInstance( Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf); } catch (ClassNotFoundException e) { throw new IOException(StringUtils.stringifyException(e)); } } else { // real storage // read storage info, lock data dirs and transition fs state if necessary storage.recoverTransitionRead(nsInfo, dataDirs, startOpt); // adjust this.dnRegistration.setStorageInfo(storage); // initialize data node internal structure this.data = new FSDataset(storage, conf); } // register datanode MXBean this.registerMXBean(conf); // register the MXBean for DataNode // Allow configuration to delay block reports to find bugs artificialBlockReceivedDelay = conf.getInt("dfs.datanode.artificialBlockReceivedDelay", 0); // find free port or use privileged port provide ServerSocket ss; if (secureResources == null) { ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(ss, socAddr, 0); } else { ss = resources.getStreamingSocket(); } ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); // adjust machine name with the actual port tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort); this.dnRegistration.setName(machineName + ":" + tmpPort); LOG.info("Opened info server at " + tmpPort); this.threadGroup = new ThreadGroup("dataXceiverServer"); this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this)); this.threadGroup.setDaemon(true); // auto destroy when empty this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL); this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY) * 1000L; if (this.initialBlockReportDelay >= blockReportInterval) { this.initialBlockReportDelay = 0; LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:"); } this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L; DataNode.nameNodeAddr = nameNodeAddr; //initialize periodic block scanner String reason = null; if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) { reason = "verification is turned off by configuration"; } else if (!(data instanceof FSDataset)) { reason = "verifcation is supported only with FSDataset"; } if (reason == null) { blockScanner = new DataBlockScanner(this, (FSDataset) data, conf); } else { LOG.info("Periodic Block Verification is disabled because " + reason + "."); } //create a servlet to serve full-file content InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf); String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); this.infoServer = (secureResources == null) ? new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf, SecurityUtil.getAdminAcls(conf, DFSConfigKeys.DFS_ADMIN)) : new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf, SecurityUtil.getAdminAcls(conf, DFSConfigKeys.DFS_ADMIN), secureResources.getListener()); if (conf.getBoolean("dfs.https.enable", false)) { boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); InetSocketAddress secInfoSocAddr = NetUtils .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0)); Configuration sslConf = new Configuration(false); sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); } this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class); this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class); this.infoServer.setAttribute("datanode", this); this.infoServer.setAttribute("datanode.blockScanner", blockScanner); this.infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class); if (WebHdfsFileSystem.isEnabled(conf, LOG)) { infoServer.addJerseyResourcePackage( DatanodeWebHdfsMethods.class.getPackage().getName() + ";" + Param.class.getPackage().getName(), WebHdfsFileSystem.PATH_PREFIX + "/*"); } this.infoServer.start(); // adjust info port this.dnRegistration.setInfoPort(this.infoServer.getPort()); myMetrics = DataNodeInstrumentation.create(conf, dnRegistration.getStorageID()); // set service-level authorization security policy if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider()); } // BlockTokenSecretManager is created here, but it shouldn't be // used until it is initialized in register(). this.blockTokenSecretManager = new BlockTokenSecretManager(false, 0, 0); //init ipc server InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address")); ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(), conf.getInt("dfs.datanode.handler.count", 3), false, conf, blockTokenSecretManager); dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort()); LOG.info("dnRegistration = " + dnRegistration); }