List of usage examples for java.net InetSocketAddress getPort
public final int getPort()
From source file:org.apache.hadoop.mapred.resourceutilization.Collector.java
protected void initialize(Configuration conf) throws IOException { InetSocketAddress socAddr = Collector.getAddress(conf); int handlerCount = conf.getInt("mapred.resourceutilization.handler.count", 10); // create rpc server this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(), handlerCount, false, conf); // The rpc-server port can be ephemeral... ensure we have the correct info this.serverAddress = this.server.getListenerAddress(); LOG.info("Collector up at: " + this.serverAddress); // start RPC server this.server.start(); // How long does the TaskTracker reports expire timeLimit = conf.getLong("mapred.resourceutilization.timelimit", DEFAULT_TIME_LIMIT); // How long do we consider a job is finished after it stops stopTimeLimit = conf.getLong("mapred.resourceutilization.stoptimelimit", DEFAULT_STOP_TIME_LIMIT); // How often do we aggregate the reports aggregatePeriod = conf.getLong("mapred.resourceutilization.aggregateperiod", DEFAULT_AGGREGATE_SLEEP_TIME); // Start the daemon thread to aggregate the TaskTracker reports this.aggregateDaemon = new Daemon(new AggregateRun()); this.aggregateDaemon.start(); }
From source file:de.javakaffee.web.msm.integration.MemcachedFailoverIntegrationTest.java
private String toString(final String nodeId, final InetSocketAddress address) { return nodeId + ":" + address.getHostName() + ":" + address.getPort(); }
From source file:com.devoteam.srit.xmlloader.http.nio.NIOSocketServerListener.java
public void acceptReady() { try {// w w w .jav a2 s . c o m SocketChannel socketChannel = this.channel.accept(); NIOSocketServerHttp socketServerHttp = new NIOSocketServerHttp(); HybridSocket socket = new HybridSocket(socketServerHttp); InetSocketAddress remoteInetSocketAddress = (InetSocketAddress) socketChannel.socket() .getRemoteSocketAddress(); InetSocketAddress localInetSocketAddress = (InetSocketAddress) socketChannel.socket() .getLocalSocketAddress(); String connectionName = "HTTPServerConnection" + Stack.nextTransactionId(); String remoteHost = remoteInetSocketAddress.getAddress().getHostAddress(); String remotePort = Integer.toString(remoteInetSocketAddress.getPort()); String localHost = localInetSocketAddress.getAddress().getHostAddress(); String localPort = Integer.toString(localInetSocketAddress.getPort()); NIOChannelHttp channelHTTP = new NIOChannelHttp(connectionName, localHost, localPort, remoteHost, remotePort, StackFactory.PROTOCOL_HTTP, secure); DefaultHttpServerConnection serverConnection = new DefaultHttpServerConnection(); socketServerHttp.init(serverConnection, channelHTTP); channelHTTP.setSocketServerHttp(socketServerHttp); StackFactory.getStack(StackFactory.PROTOCOL_HTTP).openChannel(channelHTTP); if (socketChannel instanceof SSLSocketChannel) StackHttp.ioReactor.openTLS((SSLSocketChannel) socketChannel, socket); else StackHttp.ioReactor.openTCP(socketChannel, socket); serverConnection.bind(socket, new BasicHttpParams()); } catch (Exception e) { GlobalLogger.instance().getApplicationLogger().error(TextEvent.Topic.PROTOCOL, e, "Exception in SocketServerListener secure=", secure); e.printStackTrace(); } }
From source file:org.apache.hadoop.hdfs.qjournal.server.JournalNodeJournalSyncer.java
/** * Fetch manifest from a single given journal node over http. *///from w w w .j a v a 2s .c o m private List<EditLogFile> getManifest(InetSocketAddress jn, Journal journal, long minTxId) throws IOException { String m = DFSUtil .getHTMLContentWithTimeout( new URL("http", jn.getAddress().getHostAddress(), jn.getPort(), GetJournalManifestServlet.buildPath(journal.getJournalId(), minTxId, journal.getJournalStorage())), httpConnectReadTimeoutMs, httpConnectReadTimeoutMs); return convertJsonToListManifest(m); }
From source file:org.apache.hadoop.mapred.UtilizationCollector.java
protected void initialize(Configuration conf) throws IOException { InetSocketAddress socAddr = UtilizationCollector.getAddress(conf); int handlerCount = conf.getInt("mapred.resourceutilization.handler.count", 10); // create rpc server this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(), handlerCount, false, conf); // The rpc-server port can be ephemeral... ensure we have the correct info this.serverAddress = this.server.getListenerAddress(); LOG.info("Collector up at: " + this.serverAddress); // start RPC server this.server.start(); // How long does the TaskTracker reports expire timeLimit = conf.getLong("mapred.resourceutilization.timelimit", DEFAULT_TIME_LIMIT); // How long do we consider a job is finished after it stops stopTimeLimit = conf.getLong("mapred.resourceutilization.stoptimelimit", DEFAULT_STOP_TIME_LIMIT); // How often do we aggregate the reports aggregatePeriod = conf.getLong("mapred.resourceutilization.aggregateperiod", DEFAULT_AGGREGATE_SLEEP_TIME); // Start the daemon thread to aggregate the TaskTracker reports this.aggregateDaemon = new Daemon(new AggregateRun()); this.aggregateDaemon.start(); }
From source file:org.apache.hadoop.yarn.ipc.HadoopYarnRPC.java
@Override public Server getServer(Class protocol, Object instance, InetSocketAddress addr, Configuration conf, SecretManager<? extends TokenIdentifier> secretManager, int numHandlers) { LOG.info("Creating a HadoopYarnRpc server for protocol " + protocol + " with " + numHandlers + " handlers"); LOG.info("Configured SecurityInfo class name is " + conf.get(YarnConfiguration.YARN_SECURITY_INFO)); RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class); final RPC.Server hadoopServer; try {/*from w ww. j a va 2s. c o m*/ hadoopServer = RPC.getServer(protocol, instance, addr.getHostName(), addr.getPort(), numHandlers, false, conf, secretManager); } catch (IOException e) { throw new YarnException(e); } Server server = new Server() { @Override public void close() { hadoopServer.stop(); } @Override public int getPort() { return hadoopServer.getListenerAddress().getPort(); } @Override public void join() throws InterruptedException { hadoopServer.join(); } @Override public void start() { hadoopServer.start(); } }; return server; }
From source file:org.apache.hadoop.minikdc.MiniKdc.java
private void initKDCServer() throws Exception { String orgName = conf.getProperty(ORG_NAME); String orgDomain = conf.getProperty(ORG_DOMAIN); String bindAddress = conf.getProperty(KDC_BIND_ADDRESS); final Map<String, String> map = new HashMap<String, String>(); map.put("0", orgName.toLowerCase(Locale.ENGLISH)); map.put("1", orgDomain.toLowerCase(Locale.ENGLISH)); map.put("2", orgName.toUpperCase(Locale.ENGLISH)); map.put("3", orgDomain.toUpperCase(Locale.ENGLISH)); map.put("4", bindAddress); ClassLoader cl = Thread.currentThread().getContextClassLoader(); InputStream is1 = cl.getResourceAsStream("minikdc.ldiff"); SchemaManager schemaManager = ds.getSchemaManager(); LdifReader reader = null;/* ww w . j av a 2 s . c om*/ try { final String content = StrSubstitutor.replace(IOUtils.toString(is1), map); reader = new LdifReader(new StringReader(content)); for (LdifEntry ldifEntry : reader) { ds.getAdminSession().add(new DefaultEntry(schemaManager, ldifEntry.getEntry())); } } finally { IOUtils.closeQuietly(reader); IOUtils.closeQuietly(is1); } KerberosConfig kerberosConfig = new KerberosConfig(); kerberosConfig.setMaximumRenewableLifetime(Long.parseLong(conf.getProperty(MAX_RENEWABLE_LIFETIME))); kerberosConfig.setMaximumTicketLifetime(Long.parseLong(conf.getProperty(MAX_TICKET_LIFETIME))); kerberosConfig.setSearchBaseDn(String.format("dc=%s,dc=%s", orgName, orgDomain)); kerberosConfig.setPaEncTimestampRequired(false); kdc = new KdcServer(kerberosConfig); kdc.setDirectoryService(ds); // transport String transport = conf.getProperty(TRANSPORT); AbstractTransport absTransport; if (transport.trim().equals("TCP")) { absTransport = new TcpTransport(bindAddress, port, 3, 50); } else if (transport.trim().equals("UDP")) { absTransport = new UdpTransport(port); } else { throw new IllegalArgumentException("Invalid transport: " + transport); } kdc.addTransports(absTransport); kdc.setServiceName(conf.getProperty(INSTANCE)); kdc.start(); // if using ephemeral port, update port number for binding if (port == 0) { InetSocketAddress addr = (InetSocketAddress) absTransport.getAcceptor().getLocalAddress(); port = addr.getPort(); } StringBuilder sb = new StringBuilder(); InputStream is2 = cl.getResourceAsStream("minikdc-krb5.conf"); BufferedReader r = null; try { r = new BufferedReader(new InputStreamReader(is2, StandardCharsets.UTF_8)); String line = r.readLine(); while (line != null) { sb.append(line).append("{3}"); line = r.readLine(); } } finally { IOUtils.closeQuietly(r); IOUtils.closeQuietly(is2); } krb5conf = new File(workDir, "krb5.conf").getAbsoluteFile(); FileUtils.writeStringToFile(krb5conf, MessageFormat.format(sb.toString(), getRealm(), getHost(), Integer.toString(getPort()), System.getProperty("line.separator"))); System.setProperty(JAVA_SECURITY_KRB5_CONF, krb5conf.getAbsolutePath()); System.setProperty(SUN_SECURITY_KRB5_DEBUG, conf.getProperty(DEBUG, "false")); // refresh the config Class<?> classRef; if (System.getProperty("java.vendor").contains("IBM")) { classRef = Class.forName("com.ibm.security.krb5.internal.Config"); } else { classRef = Class.forName("sun.security.krb5.Config"); } Method refreshMethod = classRef.getMethod("refresh", new Class[0]); refreshMethod.invoke(classRef, new Object[0]); LOG.info("MiniKdc listening at port: {}", getPort()); LOG.info("MiniKdc setting JVM krb5.conf to: {}", krb5conf.getAbsolutePath()); }
From source file:org.apache.hadoop.raid.TestRaidUI.java
@Test public void testRaidUI() throws Exception { Configuration localConf = new Configuration(conf); cnode = RaidNode.createRaidNode(null, localConf); InetSocketAddress infoSocAddr = dfsCluster.getNameNode().getHttpAddress(); InjectionHandler h = new TestRaidHTTPInjectionHandler(); InjectionHandler.set(h);//from ww w . j ava 2 s . c o m LOG.info("First call will fail with timeout because RaidNode UI will " + "hang for 10 seconds. Check TestRaidHTTPInjectionHandler when " + "counter == 1"); long stime = System.currentTimeMillis(); String httpContent = DFSUtil.getHTMLContent(new URI("http", null, infoSocAddr.getHostName(), infoSocAddr.getPort(), "/dfshealth.jsp", null, null)); LOG.info("Output1: " + httpContent); long duration = System.currentTimeMillis() - stime; long expectTimeout = JspHelper.RAID_UI_CONNECT_TIMEOUT + JspHelper.RAID_UI_READ_TIMEOUT; assertTrue("Should take less than " + expectTimeout + "ms actual time: " + duration, duration < expectTimeout + 1000); assertTrue("Should get timeout error", httpContent.contains("Raidnode didn't response")); assertFalse("Shouldn't get right result", httpContent.contains("WARNING Corrupt files")); LOG.info("Second call will fail with error because RaidNode UI throw " + "an IOException. Check TestRaidHTTPInjectionHandler when counter == 2"); httpContent = DFSUtil.getHTMLContent(new URI("http", null, infoSocAddr.getHostName(), infoSocAddr.getPort(), "/dfshealth.jsp", null, null)); LOG.info("Output2: " + httpContent); assertTrue("Should get error", httpContent.contains("Raidnode is unreachable")); assertFalse("Shouldn't get right result", httpContent.contains("WARNING Corrupt files")); LOG.info("Third call will succeed"); httpContent = DFSUtil.getHTMLContent(new URI("http", null, infoSocAddr.getHostName(), infoSocAddr.getPort(), "/dfshealth.jsp", null, null)); LOG.info("Output3: " + httpContent); assertTrue("Should get right result", httpContent.contains("WARNING Corrupt files")); }
From source file:org.jenkinsci.plugins.GitLabSecurityRealm.java
/** * Returns the proxy to be used when connecting to the given URI. *///ww w. j av a 2 s.c o m private HttpHost getProxy(HttpUriRequest method) throws URIException { Jenkins jenkins = Jenkins.getInstance(); if (jenkins == null) { return null; // defensive check } ProxyConfiguration proxy = jenkins.proxy; if (proxy == null) { return null; // defensive check } Proxy p = proxy.createProxy(method.getURI().getHost()); switch (p.type()) { case DIRECT: return null; // no proxy case HTTP: InetSocketAddress sa = (InetSocketAddress) p.address(); return new HttpHost(sa.getHostName(), sa.getPort()); case SOCKS: default: return null; // not supported yet } }
From source file:org.elasticsearch.xpack.security.authc.saml.SamlAuthenticationIT.java
private URI getWebServerUri() { final InetSocketAddress address = httpServer.getAddress(); final String host = address.getHostString(); final int port = address.getPort(); try {//from ww w . j a v a 2 s . co m return new URI("http", null, host, port, "/", null, null); } catch (URISyntaxException e) { throw new ElasticsearchException("Cannot construct URI for httpServer @ {}:{}", e, host, port); } }