List of usage examples for java.net InetSocketAddress getHostName
public final String getHostName()
From source file:org.apache.hama.bsp.GroomServer.java
public synchronized void initialize() throws IOException { if (this.conf.get(Constants.PEER_HOST) != null) { this.localHostname = conf.get(Constants.PEER_HOST); }//w w w. ja v a 2 s . com if (localHostname == null) { this.localHostname = DNS.getDefaultHost(conf.get("bsp.dns.interface", "default"), conf.get("bsp.dns.nameserver", "default")); } // check local disk checkLocalDirs(getLocalDirs()); deleteLocalFiles(SUBDIR); // Clear out state tables this.tasks.clear(); this.runningJobs = new TreeMap<BSPJobID, RunningJob>(); this.runningTasks = new ConcurrentHashMap<TaskAttemptID, TaskInProgress>(); this.finishedTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>(); this.conf.set(Constants.PEER_HOST, localHostname); this.conf.set(Constants.GROOM_RPC_HOST, localHostname); this.maxCurrentTasks = conf.getInt(Constants.MAX_TASKS_PER_GROOM, 3); this.assignedPeerNames = new HashMap<TaskAttemptID, Integer>(2 * this.maxCurrentTasks); int rpcPort = -1; String rpcAddr = null; if (false == this.initialized) { rpcAddr = conf.get(Constants.GROOM_RPC_HOST, Constants.DEFAULT_GROOM_RPC_HOST); rpcPort = conf.getInt(Constants.GROOM_RPC_PORT, Constants.DEFAULT_GROOM_RPC_PORT); if (-1 == rpcPort || null == rpcAddr) throw new IllegalArgumentException("Error rpc address " + rpcAddr + " port" + rpcPort); this.workerServer = RPC.getServer(this, rpcAddr, rpcPort, conf); this.workerServer.start(); this.rpcServer = rpcAddr + ":" + rpcPort; } server = new HttpServer("groomserver", rpcAddr, conf.getInt("bsp.http.groomserver.port", Constants.DEFAULT_GROOM_INFO_SERVER), true, conf); FileSystem local = FileSystem.getLocal(conf); server.setAttribute("groom.server", this); server.setAttribute("local.file.system", local); server.setAttribute("conf", conf); server.setAttribute("log", LOG); server.addServlet("taskLog", "/tasklog", TaskLogServlet.class); LOG.info("starting webserver: " + rpcAddr); server.start(); String address = BSPNetUtils.getServerAddress(conf, "bsp.groom.report.bindAddress", "bsp.groom.report.port", "bsp.groom.report.address"); InetSocketAddress socAddr = BSPNetUtils.createSocketAddr(address); String bindAddress = socAddr.getHostName(); int tmpPort = socAddr.getPort(); // RPC initialization this.taskReportServer = RPC.getServer(this, bindAddress, tmpPort, conf.getInt("hama.groom.report.server.handler.threads.num", 5), false, this.conf); this.taskReportServer.start(); // get the assigned address this.taskReportAddress = taskReportServer.getListenerAddress(); this.conf.set("bsp.groom.report.address", taskReportAddress.getHostName() + ":" + taskReportAddress.getPort()); LOG.info("TaskReportServer up at: " + this.taskReportAddress); this.groomHostName = rpcAddr; this.groomServerName = "groomd_" + this.rpcServer.replace(':', '_'); LOG.info("Starting groom: " + this.rpcServer); // establish the communication link to bsp master this.masterClient = (MasterProtocol) RPC.waitForProxy(MasterProtocol.class, HamaRPCProtocolVersion.versionID, bspMasterAddr, conf); this.instructor = new Instructor(); this.instructor.bind(DispatchTasksDirective.class, new DispatchTasksHandler()); instructor.start(); if (this.taskMonitorService == null) { this.taskMonitorService = Executors.newScheduledThreadPool(1); long monitorPeriod = this.conf.getLong(Constants.GROOM_PING_PERIOD, Constants.DEFAULT_GROOM_PING_PERIOD); if (monitorPeriod > 0) { this.taskMonitorService.scheduleWithFixedDelay(new BSPTasksMonitor(), 1000, monitorPeriod, TimeUnit.MILLISECONDS); } } if (conf.getBoolean("bsp.monitor.enabled", false)) { new Monitor(conf, zk, this.groomServerName).start(); } if (conf.getBoolean("bsp.monitor.fd.enabled", false)) { this.sensor.set(FDProvider.createSensor( conf.getClass("bsp.monitor.fd.sensor.class", UDPSensor.class, Sensor.class), (HamaConfiguration) conf)); this.sensor.get().start(); } // enroll in bsp master once the GroomServer is ready to accept tasks if (-1 == rpcPort || null == rpcAddr) throw new IllegalArgumentException("Error rpc address " + rpcAddr + " port" + rpcPort); if (!this.masterClient.register(new GroomServerStatus(groomServerName, cloneAndResetRunningTaskStatuses(), failures, maxCurrentTasks, this.rpcServer, groomHostName))) { LOG.error("There is a problem in establishing communication" + " link with BSPMaster"); throw new IOException("There is a problem in establishing" + " communication link with BSPMaster."); } this.running = true; this.initialized = true; }
From source file:com.thejoshwa.ultrasonic.androidapp.service.ssl.SSLSocketFactory.java
/** * @since 4.1//from www . j a va 2 s .c om */ public Socket connectSocket(final Socket sock, final InetSocketAddress remoteAddress, final InetSocketAddress localAddress, final HttpParams params) throws IOException, UnknownHostException, ConnectTimeoutException { if (remoteAddress == null) { throw new IllegalArgumentException("Remote address may not be null"); } if (params == null) { throw new IllegalArgumentException("HTTP parameters may not be null"); } SSLSocket sslsock = (SSLSocket) (sock != null ? sock : createSocket()); if (localAddress != null) { // sslsock.setReuseAddress(HttpConnectionParams.getSoReuseaddr(params)); sslsock.bind(localAddress); } int connTimeout = HttpConnectionParams.getConnectionTimeout(params); int soTimeout = HttpConnectionParams.getSoTimeout(params); try { sslsock.connect(remoteAddress, connTimeout); } catch (SocketTimeoutException ex) { throw new ConnectTimeoutException( "Connect to " + remoteAddress.getHostName() + "/" + remoteAddress.getAddress() + " timed out"); } sslsock.setSoTimeout(soTimeout); if (this.hostnameVerifier != null) { try { this.hostnameVerifier.verify(remoteAddress.getHostName(), sslsock); // verifyHostName() didn't blowup - good! } catch (IOException iox) { // close the socket before re-throwing the exception try { sslsock.close(); } catch (Exception x) { /*ignore*/ } throw iox; } } return sslsock; }
From source file:org.apache.hadoop.hdfs.server.namenode.NameNode.java
@SuppressWarnings("deprecation") private void startHttpServer(final Configuration conf) throws IOException { final String infoAddr = NetUtils.getServerAddress(conf, "dfs.info.bindAddress", "dfs.info.port", "dfs.http.address"); final InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); if (UserGroupInformation.isSecurityEnabled()) { String httpsUser = SecurityUtil.getServerPrincipal( conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), infoSocAddr.getHostName()); if (httpsUser == null) { LOG.warn(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY + " not defined in config. Starting http server as " + SecurityUtil.getServerPrincipal(conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), serverAddress.getHostName()) + ": Kerberized SSL may be not function correctly."); } else {//ww w . j a v a 2s.c o m // Kerberized SSL servers must be run from the host principal... LOG.info("Logging in as " + httpsUser + " to start http server."); SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY, infoSocAddr.getHostName()); } } UserGroupInformation ugi = UserGroupInformation.getLoginUser(); try { this.httpServer = ugi.doAs(new PrivilegedExceptionAction<HttpServer>() { @Override public HttpServer run() throws IOException, InterruptedException { String infoHost = infoSocAddr.getHostName(); int infoPort = infoSocAddr.getPort(); httpServer = new HttpServer("hdfs", infoHost, infoPort, infoPort == 0, conf, SecurityUtil.getAdminAcls(conf, DFSConfigKeys.DFS_ADMIN)) { { if (WebHdfsFileSystem.isEnabled(conf, LOG)) { //add SPNEGO authentication filter for webhdfs final String name = "SPNEGO"; final String classname = AuthFilter.class.getName(); final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; Map<String, String> params = getAuthFilterParams(conf); defineFilter(webAppContext, name, classname, params, new String[] { pathSpec }); LOG.info("Added filter '" + name + "' (class=" + classname + ")"); // add webhdfs packages addJerseyResourcePackage(NamenodeWebHdfsMethods.class.getPackage().getName() + ";" + Param.class.getPackage().getName(), pathSpec); } } private Map<String, String> getAuthFilterParams(Configuration conf) throws IOException { Map<String, String> params = new HashMap<String, String>(); String principalInConf = conf .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY); if (principalInConf != null && !principalInConf.isEmpty()) { params.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SecurityUtil .getServerPrincipal(principalInConf, serverAddress.getHostName())); } String httpKeytab = conf.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY); if (httpKeytab != null && !httpKeytab.isEmpty()) { params.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, httpKeytab); } return params; } }; boolean certSSL = conf.getBoolean("dfs.https.enable", false); boolean useKrb = UserGroupInformation.isSecurityEnabled(); if (certSSL || useKrb) { boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); InetSocketAddress secInfoSocAddr = NetUtils .createSocketAddr(infoHost + ":" + conf.get("dfs.https.port", infoHost + ":" + 0)); Configuration sslConf = new Configuration(false); if (certSSL) { sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); } httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth, useKrb); // assume same ssl port for all datanodes InetSocketAddress datanodeSslPort = NetUtils .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 50475)); httpServer.setAttribute("datanode.https.port", datanodeSslPort.getPort()); } httpServer.setAttribute("name.node", NameNode.this); httpServer.setAttribute("name.node.address", getNameNodeAddress()); httpServer.setAttribute("name.system.image", getFSImage()); httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); httpServer.addInternalServlet("getDelegationToken", GetDelegationTokenServlet.PATH_SPEC, GetDelegationTokenServlet.class, true); httpServer.addInternalServlet("renewDelegationToken", RenewDelegationTokenServlet.PATH_SPEC, RenewDelegationTokenServlet.class, true); httpServer.addInternalServlet("cancelDelegationToken", CancelDelegationTokenServlet.PATH_SPEC, CancelDelegationTokenServlet.class, true); httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class, true); httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class, true); httpServer.addInternalServlet("listPaths", "/listPaths/*", ListPathsServlet.class, false); httpServer.addInternalServlet("data", "/data/*", FileDataServlet.class, false); httpServer.addInternalServlet("checksum", "/fileChecksum/*", FileChecksumServlets.RedirectServlet.class, false); httpServer.addInternalServlet("contentSummary", "/contentSummary/*", ContentSummaryServlet.class, false); httpServer.start(); // The web-server port can be ephemeral... ensure we have the correct info infoPort = httpServer.getPort(); httpAddress = new InetSocketAddress(infoHost, infoPort); conf.set("dfs.http.address", infoHost + ":" + infoPort); LOG.info("Web-server up at: " + infoHost + ":" + infoPort); return httpServer; } }); } catch (InterruptedException e) { throw new IOException(e); } finally { if (UserGroupInformation.isSecurityEnabled() && conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY) != null) { // Go back to being the correct Namenode principal LOG.info("Logging back in as " + SecurityUtil.getServerPrincipal(conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), serverAddress.getHostName()) + " following http server start."); SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, serverAddress.getHostName()); } } }
From source file:org.apache.hadoop.hdfs.server.namenode.AvatarNodeNew.java
/** * Initialize AvatarNode/*from w w w. j av a2 s . c o m*/ * @param conf the configuration */ private void initialize(Configuration conf) throws IOException { InetSocketAddress socAddr = AvatarNodeNew.getAddress(conf); int handlerCount = conf.getInt("hdfs.avatarnode.handler.count", 3); try { UserGroupInformation.setCurrentUser(UnixUserGroupInformation.login(conf)); } catch (LoginException e) { } // create rpc server this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(), handlerCount, false, conf); // The rpc-server port can be ephemeral... ensure we have the // correct info this.serverAddress = this.server.getListenerAddress(); LOG.info("AvatarNode up at: " + this.serverAddress); this.server.start(); }
From source file:org.apache.hadoop.mapred.ThriftJobTrackerPlugin.java
@Override public void start(Object service) { LOG.info("Starting ThriftJobTrackerPlugin"); this.jobTracker = (JobTracker) service; try {/*from w w w .j a va 2 s . com*/ InetSocketAddress address = NetUtils .createSocketAddr(conf.get(THRIFT_ADDRESS_PROPERTY, DEFAULT_THRIFT_ADDRESS)); this.thriftServer = new ThriftPluginServer(address, new ProcessorFactory()); thriftServer.setConf(conf); thriftServer.start(); // The port may have been 0, so we update it. conf.set(THRIFT_ADDRESS_PROPERTY, address.getHostName() + ":" + thriftServer.getPort()); } catch (java.io.IOException ioe) { LOG.warn("Cannot start Thrift jobtracker plug-in", ioe); throw new RuntimeException("Cannot start Thrift jobtracker plug-in", ioe); } }
From source file:net.spy.memcached.CouchbaseConnection.java
private List<CouchbaseNode> createConnections(List<InetSocketAddress> addrs) throws IOException { List<CouchbaseNode> nodeList = new LinkedList<CouchbaseNode>(); for (InetSocketAddress a : addrs) { HttpParams params = new SyncBasicHttpParams(); params.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, 5000) .setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 5000) .setIntParameter(CoreConnectionPNames.SOCKET_BUFFER_SIZE, 8 * 1024) .setBooleanParameter(CoreConnectionPNames.STALE_CONNECTION_CHECK, false) .setBooleanParameter(CoreConnectionPNames.TCP_NODELAY, true) .setParameter(CoreProtocolPNames.USER_AGENT, "Spymemcached Client/1.1"); HttpProcessor httpproc = new ImmutableHttpProcessor( new HttpRequestInterceptor[] { new RequestContent(), new RequestTargetHost(), new RequestConnControl(), new RequestUserAgent(), new RequestExpectContinue(), }); AsyncNHttpClientHandler protocolHandler = new AsyncNHttpClientHandler(httpproc, new MyHttpRequestExecutionHandler(), new DefaultConnectionReuseStrategy(), new DirectByteBufferAllocator(), params); protocolHandler.setEventListener(new EventLogger()); AsyncConnectionManager connMgr = new AsyncConnectionManager(new HttpHost(a.getHostName(), a.getPort()), NUM_CONNS, protocolHandler, params); getLogger().info("Added %s to connect queue", a); CouchbaseNode node = connFactory.createCouchDBNode(a, connMgr); node.init();/*from w w w .j a v a2s . c om*/ nodeList.add(node); } return nodeList; }
From source file:org.apache.synapse.transport.nhttp.HttpCoreNIOListener.java
/** * Start specific end points given by InetSockeAddress list * * @param endpointsClosed InetSocketAddresses of endpoints to be started * @throws AxisFault/*w w w . ja v a2 s. c om*/ */ private void startSpecificEndpoints(List<InetSocketAddress> endpointsClosed) throws AxisFault { Queue<ListenerEndpoint> endpoints = new LinkedList<ListenerEndpoint>(); // Ensure simple but stable order List<InetSocketAddress> addressList = endpointsClosed; Collections.sort(addressList, new Comparator<InetSocketAddress>() { public int compare(InetSocketAddress a1, InetSocketAddress a2) { String s1 = a1.toString(); String s2 = a2.toString(); return s1.compareTo(s2); } }); for (InetSocketAddress address : addressList) { endpoints.add(ioReactor.listen(address)); } // Wait for the endpoint to become ready, i.e. for the listener to start accepting // requests. while (!endpoints.isEmpty()) { ListenerEndpoint endpoint = endpoints.remove(); try { endpoint.waitFor(); if (log.isInfoEnabled()) { InetSocketAddress address = (InetSocketAddress) endpoint.getAddress(); if (!address.isUnresolved()) { log.info(name + " started on " + address.getHostName() + ":" + address.getPort()); } else { log.info(name + " started on " + address); } } } catch (InterruptedException e) { log.warn("Listener startup was interrupted"); break; } } }
From source file:org.apache.synapse.transport.nhttp.HttpCoreNIOListener.java
private void startEndpoints() throws AxisFault { Queue<ListenerEndpoint> endpoints = new LinkedList<ListenerEndpoint>(); Set<InetSocketAddress> addressSet = new HashSet<InetSocketAddress>(); addressSet.addAll(connFactory.getBindAddresses()); if (NHttpConfiguration.getInstance().getMaxActiveConnections() != -1) { addMaxConnectionCountController(NHttpConfiguration.getInstance().getMaxActiveConnections()); }//w w w .j av a 2 s . c o m if (listenerContext.getBindAddress() != null) { addressSet.add(new InetSocketAddress(listenerContext.getBindAddress(), listenerContext.getPort())); } if (addressSet.isEmpty()) { addressSet.add(new InetSocketAddress(listenerContext.getPort())); } // Ensure simple but stable order List<InetSocketAddress> addressList = new ArrayList<InetSocketAddress>(addressSet); Collections.sort(addressList, new Comparator<InetSocketAddress>() { public int compare(InetSocketAddress a1, InetSocketAddress a2) { String s1 = a1.toString(); String s2 = a2.toString(); return s1.compareTo(s2); } }); for (InetSocketAddress address : addressList) { endpoints.add(ioReactor.listen(address)); } // Wait for the endpoint to become ready, i.e. for the listener to start accepting // requests. while (!endpoints.isEmpty()) { ListenerEndpoint endpoint = endpoints.remove(); try { endpoint.waitFor(); if (log.isInfoEnabled()) { InetSocketAddress address = (InetSocketAddress) endpoint.getAddress(); if (!address.isUnresolved()) { log.info(name + " started on " + address.getHostName() + ":" + address.getPort()); } else { log.info(name + " started on " + address); } } } catch (InterruptedException e) { log.warn("Listener startup was interrupted"); break; } } }
From source file:org.apache.hadoop.hdfs.server.namenode.ClusterJspHelper.java
/** * JSP helper function that generates cluster health report. When * encountering exception while getting Namenode status, the exception will * be listed on the page with corresponding stack trace. *///from w w w .j a va2 s. c o m ClusterStatus generateClusterHealthReport() { ClusterStatus cs = new ClusterStatus(); Configuration conf = new Configuration(); List<ConfiguredNNAddress> nns = null; try { nns = DFSUtil.flattenAddressMap(DFSUtil.getNNServiceRpcAddresses(conf)); } catch (Exception e) { // Could not build cluster status cs.setError(e); return cs; } // Process each namenode and add it to ClusterStatus for (ConfiguredNNAddress cnn : nns) { InetSocketAddress isa = cnn.getAddress(); NamenodeMXBeanHelper nnHelper = null; try { nnHelper = new NamenodeMXBeanHelper(isa, conf); String mbeanProps = queryMbean(nnHelper.httpAddress, conf); NamenodeStatus nn = nnHelper.getNamenodeStatus(mbeanProps); if (cs.clusterid.isEmpty() || cs.clusterid.equals("")) { // Set clusterid only once cs.clusterid = nnHelper.getClusterId(mbeanProps); } cs.addNamenodeStatus(nn); } catch (Exception e) { // track exceptions encountered when connecting to namenodes cs.addException(isa.getHostName(), e); continue; } } return cs; }