List of usage examples for java.nio.channels ServerSocketChannel open
public static ServerSocketChannel open() throws IOException
From source file:org.openhab.binding.irtrans.handler.EthernetBridgeHandler.java
private void configureListener(String listenerPort) { try {//from w w w . j a v a 2 s. c o m listenerChannel = ServerSocketChannel.open(); listenerChannel.socket().bind(new InetSocketAddress(Integer.parseInt(listenerPort))); listenerChannel.configureBlocking(false); logger.info("Listening for incoming connections on {}", listenerChannel.getLocalAddress()); synchronized (selector) { selector.wakeup(); try { listenerKey = listenerChannel.register(selector, SelectionKey.OP_ACCEPT); } catch (ClosedChannelException e1) { logger.debug("An exception occurred while registering a selector: '{}'", e1.getMessage()); updateStatus(ThingStatus.OFFLINE, ThingStatusDetail.COMMUNICATION_ERROR, e1.getMessage()); } } } catch (IOException e3) { logger.error( "An exception occurred while creating configuring the listener channel on port number {}: '{}'", Integer.parseInt(listenerPort), e3.getMessage()); } }
From source file:com.sun.grizzly.http.jk.common.ChannelNioSocket.java
/** * jmx:managed-operation/* w ww.ja v a 2 s. c o m*/ */ @Override public void init() throws IOException { // Find a port. if (startPort == 0) { port = 0; LoggerUtils.getLogger().info("JK: ajp13 disabling channelNioSocket"); running = true; return; } if (maxPort < startPort) { maxPort = startPort; } ServerSocketChannel ssc = ServerSocketChannel.open(); ssc.configureBlocking(false); for (int i = startPort; i <= maxPort; i++) { try { InetSocketAddress iddr = null; if (inet == null) { iddr = new InetSocketAddress(i); } else { iddr = new InetSocketAddress(inet, i); } sSocket = ssc.socket(); sSocket.bind(iddr); port = i; break; } catch (IOException ex) { LoggerUtils.getLogger().info("Port busy " + i + " " + ex.toString()); sSocket = null; } } if (sSocket == null) { LoggerUtils.getLogger().log(Level.SEVERE, "Can't find free port " + startPort + " " + maxPort); return; } LoggerUtils.getLogger().info("JK: ajp13 listening on " + getAddress() + ":" + port); selector = Utils.openSelector(); ssc.register(selector, SelectionKey.OP_ACCEPT); // If this is not the base port and we are the 'main' channleSocket and // SHM didn't already set the localId - we'll set the instance id if ("channelNioSocket".equals(name) && port != startPort && (wEnv.getLocalId() == 0)) { wEnv.setLocalId(port - startPort); } // XXX Reverse it -> this is a notification generator !! if (next == null && wEnv != null) { if (nextName != null) { setNext(wEnv.getHandler(nextName)); } if (next == null) { next = wEnv.getHandler("dispatch"); } if (next == null) { next = wEnv.getHandler("request"); } } JMXRequestNote = wEnv.getNoteId(WorkerEnv.ENDPOINT_NOTE, "requestNote"); running = true; // Run a thread that will accept connections. // XXX Try to find a thread first - not sure how... if (this.domain != null) { try { tpOName = new ObjectName(domain + ":type=ThreadPool,name=" + getChannelName()); Registry.getRegistry(null, null).registerComponent(tp, tpOName, null); rgOName = new ObjectName(domain + ":type=GlobalRequestProcessor,name=" + getChannelName()); Registry.getRegistry(null, null).registerComponent(global, rgOName, null); } catch (Exception e) { LoggerUtils.getLogger().log(Level.SEVERE, "Can't register threadpool"); } } tp.start(); Poller pollAjp = new Poller(); tp.runIt(pollAjp); }
From source file:org.apache.nifi.processors.standard.ListenTCPRecord.java
@OnScheduled public void onScheduled(final ProcessContext context) throws IOException { this.port = context.getProperty(PORT).evaluateAttributeExpressions().asInteger(); final int readTimeout = context.getProperty(READ_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue(); final int maxSocketBufferSize = context.getProperty(MAX_SOCKET_BUFFER_SIZE).asDataSize(DataUnit.B) .intValue();//from w ww.j a v a2s . c om final int maxConnections = context.getProperty(MAX_CONNECTIONS).asInteger(); final RecordReaderFactory recordReaderFactory = context.getProperty(RECORD_READER) .asControllerService(RecordReaderFactory.class); // if the Network Interface Property wasn't provided then a null InetAddress will indicate to bind to all interfaces final InetAddress nicAddress; final String nicAddressStr = context.getProperty(NETWORK_INTF_NAME).evaluateAttributeExpressions() .getValue(); if (!StringUtils.isEmpty(nicAddressStr)) { NetworkInterface netIF = NetworkInterface.getByName(nicAddressStr); nicAddress = netIF.getInetAddresses().nextElement(); } else { nicAddress = null; } SSLContext sslContext = null; SslContextFactory.ClientAuth clientAuth = null; final SSLContextService sslContextService = context.getProperty(SSL_CONTEXT_SERVICE) .asControllerService(SSLContextService.class); if (sslContextService != null) { final String clientAuthValue = context.getProperty(CLIENT_AUTH).getValue(); sslContext = sslContextService.createSSLContext(SSLContextService.ClientAuth.valueOf(clientAuthValue)); clientAuth = SslContextFactory.ClientAuth.valueOf(clientAuthValue); } // create a ServerSocketChannel in non-blocking mode and bind to the given address and port final ServerSocketChannel serverSocketChannel = ServerSocketChannel.open(); serverSocketChannel.configureBlocking(false); serverSocketChannel.bind(new InetSocketAddress(nicAddress, port)); this.dispatcher = new SocketChannelRecordReaderDispatcher(serverSocketChannel, sslContext, clientAuth, readTimeout, maxSocketBufferSize, maxConnections, recordReaderFactory, socketReaders, getLogger()); // start a thread to run the dispatcher final Thread readerThread = new Thread(dispatcher); readerThread.setName(getClass().getName() + " [" + getIdentifier() + "]"); readerThread.setDaemon(true); readerThread.start(); }
From source file:org.apache.hadoop.dfs.DataNode.java
/** * This method starts the data node with the specified conf. * /* w w w . ja v a2 s. c o m*/ * @param conf - the configuration * if conf's CONFIG_PROPERTY_SIMULATED property is set * then a simulated storage based data node is created. * * @param dataDirs - only for a non-simulated storage data node * @throws IOException */ void startDataNode(Configuration conf, AbstractList<File> dataDirs) throws IOException { // use configured nameserver & interface to get local hostname if (conf.get("slave.host.name") != null) { machineName = conf.get("slave.host.name"); } if (machineName == null) { machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"), conf.get("dfs.datanode.dns.nameserver", "default")); } InetSocketAddress nameNodeAddr = NameNode.getAddress(conf); this.estimateBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE); this.socketTimeout = conf.getInt("dfs.socket.timeout", FSConstants.READ_TIMEOUT); this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", FSConstants.WRITE_TIMEOUT); /* Based on results on different platforms, we might need set the default * to false on some of them. */ this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true); this.writePacketSize = conf.getInt("dfs.write.packet.size", 64 * 1024); String address = NetUtils.getServerAddress(conf, "dfs.datanode.bindAddress", "dfs.datanode.port", "dfs.datanode.address"); InetSocketAddress socAddr = NetUtils.createSocketAddr(address); int tmpPort = socAddr.getPort(); storage = new DataStorage(); // construct registration this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort); // connect to name node this.namenode = (DatanodeProtocol) RPC.waitForProxy(DatanodeProtocol.class, DatanodeProtocol.versionID, nameNodeAddr, conf); // get version and id info from the name-node NamespaceInfo nsInfo = handshake(); StartupOption startOpt = getStartupOption(conf); assert startOpt != null : "Startup option must be set."; boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false); if (simulatedFSDataset) { setNewStorageID(dnRegistration); dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION; dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID; // it would have been better to pass storage as a parameter to // constructor below - need to augment ReflectionUtils used below. conf.set("StorageId", dnRegistration.getStorageID()); try { //Equivalent of following (can't do because Simulated is in test dir) // this.data = new SimulatedFSDataset(conf); this.data = (FSDatasetInterface) ReflectionUtils .newInstance(Class.forName("org.apache.hadoop.dfs.SimulatedFSDataset"), conf); } catch (ClassNotFoundException e) { throw new IOException(StringUtils.stringifyException(e)); } } else { // real storage // read storage info, lock data dirs and transition fs state if necessary storage.recoverTransitionRead(nsInfo, dataDirs, startOpt); // adjust this.dnRegistration.setStorageInfo(storage); // initialize data node internal structure this.data = new FSDataset(storage, conf); } // find free port ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(ss, socAddr, 0); ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); ss.setSoTimeout(conf.getInt("dfs.dataXceiver.timeoutInMS", 30000)); //30s // adjust machine name with the actual port tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort); this.dnRegistration.setName(machineName + ":" + tmpPort); LOG.info("Opened info server at " + tmpPort); this.maxXceiverCount = conf.getInt("dfs.datanode.max.xcievers", MAX_XCEIVER_COUNT); this.threadGroup = new ThreadGroup("dataXceiveServer"); this.dataXceiveServer = new Daemon(threadGroup, new DataXceiveServer(ss)); this.threadGroup.setDaemon(true); // auto destroy when empty this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL); this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY) * 1000L; if (this.initialBlockReportDelay >= blockReportInterval) { this.initialBlockReportDelay = 0; LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:"); } this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L; DataNode.nameNodeAddr = nameNodeAddr; this.balancingThrottler = new BlockBalanceThrottler( conf.getLong("dfs.balance.bandwidthPerSec", 1024L * 1024)); //initialize periodic block scanner String reason = null; if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) { reason = "verification is turned off by configuration"; } else if (!(data instanceof FSDataset)) { reason = "verifcation is supported only with FSDataset"; } if (reason == null) { blockScanner = new DataBlockScanner(this, (FSDataset) data, conf); } else { LOG.info("Periodic Block Verification is disabled because " + reason + "."); } //create a servlet to serve full-file content String infoAddr = NetUtils.getServerAddress(conf, "dfs.datanode.info.bindAddress", "dfs.datanode.info.port", "dfs.datanode.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); this.infoServer = new StatusHttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0); InetSocketAddress secInfoSocAddr = NetUtils .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0)); Configuration sslConf = new Configuration(conf); sslConf.addResource(conf.get("https.keystore.info.rsrc", "sslinfo.xml")); String keyloc = sslConf.get("https.keystore.location"); if (null != keyloc) { this.infoServer.addSslListener(secInfoSocAddr, keyloc, sslConf.get("https.keystore.password", ""), sslConf.get("https.keystore.keypassword", "")); } this.infoServer.addServlet(null, "/streamFile/*", StreamFile.class); this.infoServer.setAttribute("datanode.blockScanner", blockScanner); this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class); this.infoServer.start(); // adjust info port this.dnRegistration.setInfoPort(this.infoServer.getPort()); myMetrics = new DataNodeMetrics(conf, dnRegistration.getStorageID()); //init ipc server InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address")); ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(), conf.getInt("dfs.datanode.handler.count", 3), false, conf); ipcServer.start(); dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort()); LOG.info("dnRegistration = " + dnRegistration); }
From source file:x10.x10rt.yarn.ApplicationMaster.java
private void setup() throws IOException, YarnException { LOG.info("Starting ApplicationMaster"); // Remove the AM->RM token so that containers cannot access it. Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); Iterator<Token<?>> iter = credentials.getAllTokens().iterator(); LOG.info("Executing with tokens:"); while (iter.hasNext()) { Token<?> token = iter.next(); LOG.info(token);/*w ww. j av a 2 s.com*/ if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { iter.remove(); } } allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); // Create appSubmitterUgi and add original tokens to it String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name()); UserGroupInformation appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName); appSubmitterUgi.addCredentials(credentials); resourceManager = AMRMClientAsync.createAMRMClientAsync(1000, new RMCallbackHandler()); resourceManager.init(conf); resourceManager.start(); nodeManager = new NMClientAsyncImpl(new NMCallbackHandler(this)); nodeManager.init(conf); nodeManager.start(); // Register self with ResourceManager // This will start heartbeating to the RM appMasterHostname = NetUtils.getHostname(); RegisterApplicationMasterResponse response = resourceManager.registerApplicationMaster(appMasterHostname, appMasterRpcPort, appMasterTrackingUrl); { int slash = appMasterHostname.indexOf('/'); if (slash != -1) appMasterHostname = appMasterHostname.substring(0, slash); } // Dump out information about cluster capability as seen by the // resource manager int maxMem = response.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); int maxVCores = response.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores); // A resource ask cannot exceed the max. // TODO: should we reject instead of modifying to fit? if (memoryPerPlaceInMb > maxMem) { LOG.info("Container memory specified above max threshold of cluster." + " Using max value." + ", specified=" + memoryPerPlaceInMb + ", max=" + maxMem); memoryPerPlaceInMb = maxMem; } if (coresPerPlace > maxVCores) { LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value." + ", specified=" + coresPerPlace + ", max=" + maxVCores); coresPerPlace = maxVCores; } else if (coresPerPlace == 0) { LOG.info("Container virtual cores specified as auto (X10_NTHREADS=0)." + " Using max value." + ", specified=" + coresPerPlace + ", max=" + maxVCores); coresPerPlace = maxVCores; } List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts(); LOG.info(appAttemptID + " received " + previousAMRunningContainers.size() + " previous attempts' running containers on AM registration."); numAllocatedContainers.addAndGet(previousAMRunningContainers.size()); int numTotalContainersToRequest = initialNumPlaces - previousAMRunningContainers.size(); // open a local port for X10rt management, and register it with the selector launcherChannel = ServerSocketChannel.open(); //launcherChannel.bind(new InetSocketAddress(appMasterHostname, 0)); // bind to the visible network hostname and random port launcherChannel.bind(null); launcherChannel.configureBlocking(false); appMasterPort = launcherChannel.socket().getLocalPort(); launcherChannel.register(selector, SelectionKey.OP_ACCEPT); numRequestedContainers.set(initialNumPlaces); // Send request for containers to RM for (int i = 0; i < numTotalContainersToRequest; ++i) { Resource capability = Resource.newInstance(memoryPerPlaceInMb, coresPerPlace); ContainerRequest request = new ContainerRequest(capability, null, null, Priority.newInstance(0)); LOG.info("Requested container ask: " + request.toString()); resourceManager.addContainerRequest(request); pendingRequests.add(request); } }
From source file:common.DataNode.java
/** * This method starts the data node with the specified conf. * /*from ww w. j av a2s .c om*/ * @param conf - the configuration * if conf's CONFIG_PROPERTY_SIMULATED property is set * then a simulated storage based data node is created. * * @param dataDirs - only for a non-simulated storage data node * @throws IOException */ void startDataNode(Configuration conf, AbstractList<File> dataDirs, DatanodeProtocol namenode) throws IOException { // use configured nameserver & interface to get local hostname if (conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY) != null) { machineName = conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY); } if (machineName == null) { machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"), conf.get("dfs.datanode.dns.nameserver", "default")); } this.nameNodeAddr = NameNode.getAddress(conf); this.socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT); this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT); /* Based on results on different platforms, we might need set the default * to false on some of them. */ this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true); this.writePacketSize = conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); InetSocketAddress socAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.address", "0.0.0.0:50010")); int tmpPort = socAddr.getPort(); storage = new DataStorage(); // construct registration this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort); // connect to name node this.namenode = namenode; // get version and id info from the name-node NamespaceInfo nsInfo = handshake(); StartupOption startOpt = getStartupOption(conf); assert startOpt != null : "Startup option must be set."; boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false); if (simulatedFSDataset) { setNewStorageID(dnRegistration); dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION; dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID; // it would have been better to pass storage as a parameter to // constructor below - need to augment ReflectionUtils used below. conf.set(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, dnRegistration.getStorageID()); try { //Equivalent of following (can't do because Simulated is in test dir) // this.data = new SimulatedFSDataset(conf); this.data = (FSDatasetInterface) ReflectionUtils.newInstance( Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf); } catch (ClassNotFoundException e) { throw new IOException(StringUtils.stringifyException(e)); } } else { // real storage // read storage info, lock data dirs and transition fs state if necessary storage.recoverTransitionRead(nsInfo, dataDirs, startOpt); // adjust this.dnRegistration.setStorageInfo(storage); // initialize data node internal structure this.data = new FSDataset(storage, conf); } // find free port ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(ss, socAddr, 0); ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); // adjust machine name with the actual port tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort); this.dnRegistration.setName(machineName + ":" + tmpPort); LOG.info("Opened info server at " + tmpPort); this.threadGroup = new ThreadGroup("dataXceiverServer"); this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this)); this.threadGroup.setDaemon(true); // auto destroy when empty this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL); this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY) * 1000L; if (this.initialBlockReportDelay >= blockReportInterval) { this.initialBlockReportDelay = 0; LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:"); } this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L; //initialize periodic block scanner String reason = null; if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) { reason = "verification is turned off by configuration"; } else if (!(data instanceof FSDataset)) { reason = "verifcation is supported only with FSDataset"; } if (reason == null) { blockScanner = new DataBlockScanner(this, (FSDataset) data, conf); } else { LOG.info("Periodic Block Verification is disabled because " + reason + "."); } //create a servlet to serve full-file content InetSocketAddress infoSocAddr = NetUtils .createSocketAddr(conf.get("dfs.datanode.http.address", "0.0.0.0:50075")); String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); this.infoServer = new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf); if (conf.getBoolean("dfs.https.enable", false)) { boolean needClientAuth = conf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); InetSocketAddress secInfoSocAddr = NetUtils .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0)); Configuration sslConf = new HdfsConfiguration(false); sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); } this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class); this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class); this.infoServer.setAttribute("datanode.blockScanner", blockScanner); this.infoServer.setAttribute("datanode.conf", conf); this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class); this.infoServer.start(); // adjust info port this.dnRegistration.setInfoPort(this.infoServer.getPort()); myMetrics = new DataNodeMetrics(conf, dnRegistration.getName()); // set service-level authorization security policy if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider()); } //init ipc server InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address")); ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(), ipcAddr.getPort(), conf.getInt("dfs.datanode.handler.count", 3), false, conf); ipcServer.start(); dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort()); LOG.info("dnRegistration = " + dnRegistration); plugins = conf.getInstances("dfs.datanode.plugins", ServicePlugin.class); for (ServicePlugin p : plugins) { try { p.start(this); LOG.info("Started plug-in " + p); } catch (Throwable t) { LOG.warn("ServicePlugin " + p + " could not be started", t); } } }
From source file:com.springrts.springls.ServerThread.java
public boolean startServer() { context.starting();/* w ww .j a v a 2s.c om*/ Configuration configuration = getContext().getService(Configuration.class); int port = configuration.getInt(ServerConfiguration.PORT); try { context.getServer().setCharset("ISO-8859-1"); // open a non-blocking server socket channel sSockChan = ServerSocketChannel.open(); sSockChan.configureBlocking(false); // bind to localhost on designated port //***InetAddress addr = InetAddress.getLocalHost(); //***sSockChan.socket().bind(new InetSocketAddress(addr, port)); sSockChan.socket().bind(new InetSocketAddress(port)); // get a selector for multiplexing the client channels readSelector = Selector.open(); } catch (IOException ex) { LOG.error("Could not listen on port: " + port, ex); return false; } LOG.info("Listening for connections on TCP port {} ...", port); context.started(); return true; }
From source file:org.apache.hadoop.hdfs.server.datanode.DataNode.java
/** * This method starts the data node with the specified conf. * /*w w w .j a va2s. co m*/ * @param conf - the configuration * if conf's CONFIG_PROPERTY_SIMULATED property is set * then a simulated storage based data node is created. * * @param dataDirs - only for a non-simulated storage data node * @throws IOException * @throws MalformedObjectNameException * @throws MBeanRegistrationException * @throws InstanceAlreadyExistsException */ void startDataNode(Configuration conf, AbstractList<File> dataDirs, SecureResources resources) throws IOException { if (UserGroupInformation.isSecurityEnabled() && resources == null) throw new RuntimeException("Cannot start secure cluster without " + "privileged resources."); this.secureResources = resources; // use configured nameserver & interface to get local hostname if (conf.get("slave.host.name") != null) { machineName = conf.get("slave.host.name"); } if (machineName == null) { machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"), conf.get("dfs.datanode.dns.nameserver", "default")); } InetSocketAddress nameNodeAddr = NameNode.getServiceAddress(conf, true); this.socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT); /* Based on results on different platforms, we might need set the default * to false on some of them. */ this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true); this.writePacketSize = conf.getInt("dfs.write.packet.size", 64 * 1024); InetSocketAddress socAddr = DataNode.getStreamingAddr(conf); int tmpPort = socAddr.getPort(); storage = new DataStorage(); // construct registration this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort); // connect to name node this.namenode = (DatanodeProtocol) RPC.waitForProxy(DatanodeProtocol.class, DatanodeProtocol.versionID, nameNodeAddr, conf); // get version and id info from the name-node NamespaceInfo nsInfo = handshake(); StartupOption startOpt = getStartupOption(conf); assert startOpt != null : "Startup option must be set."; boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false); if (simulatedFSDataset) { setNewStorageID(dnRegistration); dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION; dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID; // it would have been better to pass storage as a parameter to // constructor below - need to augment ReflectionUtils used below. conf.set("StorageId", dnRegistration.getStorageID()); try { //Equivalent of following (can't do because Simulated is in test dir) // this.data = new SimulatedFSDataset(conf); this.data = (FSDatasetInterface) ReflectionUtils.newInstance( Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf); } catch (ClassNotFoundException e) { throw new IOException(StringUtils.stringifyException(e)); } } else { // real storage // read storage info, lock data dirs and transition fs state if necessary storage.recoverTransitionRead(nsInfo, dataDirs, startOpt); // adjust this.dnRegistration.setStorageInfo(storage); // initialize data node internal structure this.data = new FSDataset(storage, conf); } // register datanode MXBean this.registerMXBean(conf); // register the MXBean for DataNode // Allow configuration to delay block reports to find bugs artificialBlockReceivedDelay = conf.getInt("dfs.datanode.artificialBlockReceivedDelay", 0); // find free port or use privileged port provide ServerSocket ss; if (secureResources == null) { ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(ss, socAddr, 0); } else { ss = resources.getStreamingSocket(); } ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); // adjust machine name with the actual port tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort); this.dnRegistration.setName(machineName + ":" + tmpPort); LOG.info("Opened info server at " + tmpPort); this.threadGroup = new ThreadGroup("dataXceiverServer"); this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this)); this.threadGroup.setDaemon(true); // auto destroy when empty this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL); this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY) * 1000L; if (this.initialBlockReportDelay >= blockReportInterval) { this.initialBlockReportDelay = 0; LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:"); } this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L; DataNode.nameNodeAddr = nameNodeAddr; //initialize periodic block scanner String reason = null; if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) { reason = "verification is turned off by configuration"; } else if (!(data instanceof FSDataset)) { reason = "verifcation is supported only with FSDataset"; } if (reason == null) { blockScanner = new DataBlockScanner(this, (FSDataset) data, conf); } else { LOG.info("Periodic Block Verification is disabled because " + reason + "."); } //create a servlet to serve full-file content InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf); String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); this.infoServer = (secureResources == null) ? new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf, SecurityUtil.getAdminAcls(conf, DFSConfigKeys.DFS_ADMIN)) : new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf, SecurityUtil.getAdminAcls(conf, DFSConfigKeys.DFS_ADMIN), secureResources.getListener()); if (conf.getBoolean("dfs.https.enable", false)) { boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); InetSocketAddress secInfoSocAddr = NetUtils .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0)); Configuration sslConf = new Configuration(false); sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); } this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class); this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class); this.infoServer.setAttribute("datanode", this); this.infoServer.setAttribute("datanode.blockScanner", blockScanner); this.infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class); if (WebHdfsFileSystem.isEnabled(conf, LOG)) { infoServer.addJerseyResourcePackage( DatanodeWebHdfsMethods.class.getPackage().getName() + ";" + Param.class.getPackage().getName(), WebHdfsFileSystem.PATH_PREFIX + "/*"); } this.infoServer.start(); // adjust info port this.dnRegistration.setInfoPort(this.infoServer.getPort()); myMetrics = DataNodeInstrumentation.create(conf, dnRegistration.getStorageID()); // set service-level authorization security policy if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider()); } // BlockTokenSecretManager is created here, but it shouldn't be // used until it is initialized in register(). this.blockTokenSecretManager = new BlockTokenSecretManager(false, 0, 0); //init ipc server InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address")); ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(), conf.getInt("dfs.datanode.handler.count", 3), false, conf, blockTokenSecretManager); dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort()); LOG.info("dnRegistration = " + dnRegistration); }
From source file:org.apache.hadoop.mapred.buffer.Manager.java
public void open() throws IOException { Configuration conf = tracker.conf(); int maxMaps = conf.getInt("mapred.tasktracker.map.tasks.maximum", 2); int maxReduces = conf.getInt("mapred.tasktracker.reduce.tasks.maximum", 1); InetSocketAddress serverAddress = getServerAddress(conf); this.server = RPC.getServer(this, serverAddress.getHostName(), serverAddress.getPort(), maxMaps + maxReduces, false, conf); this.server.start(); this.requestTransfer.setPriority(Thread.MAX_PRIORITY); this.requestTransfer.start(); /** The server socket and selector registration */ InetSocketAddress controlAddress = getControlAddress(conf); this.controlPort = controlAddress.getPort(); this.channel = ServerSocketChannel.open(); this.channel.socket().bind(controlAddress); this.acceptor = new Thread() { @Override//from w w w . j a v a2 s.c om public void run() { while (!isInterrupted()) { SocketChannel connection = null; try { connection = channel.accept(); DataInputStream in = new DataInputStream(connection.socket().getInputStream()); int numRequests = in.readInt(); for (int i = 0; i < numRequests; i++) { BufferRequest request = BufferRequest.read(in); if (request instanceof ReduceBufferRequest) { add((ReduceBufferRequest) request); LOG.info("add new request " + request); } else if (request instanceof MapBufferRequest) { add((MapBufferRequest) request); } } } catch (IOException e) { e.printStackTrace(); } finally { try { if (connection != null) connection.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } } } }; this.acceptor.setDaemon(true); this.acceptor.setPriority(Thread.MAX_PRIORITY); this.acceptor.start(); this.serviceQueue = new Thread() { public void run() { List<OutputFile> service = new ArrayList<OutputFile>(); while (!isInterrupted()) { try { OutputFile o = queue.take(); service.add(o); queue.drainTo(service); for (OutputFile file : service) { try { if (file != null) add(file); } catch (Throwable t) { t.printStackTrace(); LOG.error("Error service file: " + file + ". " + t); } } } catch (Throwable t) { t.printStackTrace(); LOG.error(t); } finally { service.clear(); } } LOG.info("Service queue thread exit."); } }; this.serviceQueue.setPriority(Thread.MAX_PRIORITY); this.serviceQueue.setDaemon(true); this.serviceQueue.start(); }
From source file:org.openhab.binding.tcp.AbstractSocketChannelBinding.java
protected void configureListenerChannel() { // open the listener port try {/*from w w w . ja v a 2 s .c om*/ listenerChannel = ServerSocketChannel.open(); listenerChannel.socket().bind(new InetSocketAddress(listenerPort)); listenerChannel.configureBlocking(false); logger.info("Listening for incoming connections on {}", listenerChannel.getLocalAddress()); synchronized (selector) { selector.wakeup(); try { listenerKey = listenerChannel.register(selector, SelectionKey.OP_ACCEPT); } catch (ClosedChannelException e1) { logger.error("An exception occurred while registering a selector: {}", e1.getMessage()); } } } catch (Exception e3) { logger.error("An exception occurred while creating the Listener Channel on port number {} ({})", listenerPort, e3.getMessage()); } }