List of usage examples for java.net InetSocketAddress getPort
public final int getPort()
From source file:org.apache.hadoop.security.TestSecurityUtil.java
private void verifyValues(InetSocketAddress addr, String host, String ip, int port) { assertTrue(!addr.isUnresolved());/*from w ww . ja v a2 s.c o m*/ // don't know what the standard resolver will return for hostname. // should be host for host; host or ip for ip is ambiguous if (!SecurityUtil.useIpForTokenService) { assertEquals(host, addr.getHostName()); assertEquals(host, addr.getAddress().getHostName()); } assertEquals(ip, addr.getAddress().getHostAddress()); assertEquals(port, addr.getPort()); }
From source file:org.apache.hama.bsp.TestBSPTaskFaults.java
@Override protected void setUp() throws Exception { super.setUp(); conf = new HamaConfiguration(); conf.setInt(Constants.GROOM_PING_PERIOD, 200); conf.setClass("bsp.work.class", FaulTestBSP.class, BSP.class); conf.setClass(SyncServiceFactory.SYNC_PEER_CLASS, LocalBSPRunner.LocalSyncClient.class, SyncClient.class); int port = BSPNetUtils.getFreePort(4321 + incrementTestNumber()); try {//from w w w. j ava2s. c o m InetSocketAddress inetAddress = new InetSocketAddress(port); groom = new MinimalGroomServer(conf); workerServer = RPC.getServer(groom, inetAddress.getHostName(), inetAddress.getPort(), conf); workerServer.start(); LOG.info("Started RPC server"); conf.setInt("bsp.groom.rpc.port", inetAddress.getPort()); umbilical = (BSPPeerProtocol) RPC.getProxy(BSPPeerProtocol.class, HamaRPCProtocolVersion.versionID, inetAddress, conf); LOG.info("Started the proxy connections"); this.testBSPTaskService = Executors.newScheduledThreadPool(1); } catch (BindException be) { LOG.info(be); } }
From source file:org.apache.hadoop.hdfs.server.namenode.SnapshotNode.java
/** * Initialize SnapshotNode/*from w ww . j a v a 2s . co m*/ * @throws IOException */ private void init() throws IOException { ssDir = conf.get("fs.snapshot.dir", "/.SNAPSHOT"); tempDir = conf.get("fs.snapshot.tempdir", "/tmp/snapshot"); fileServer = getImageServer(); dfs = FileSystem.get(conf); Path ssPath = new Path(ssDir); if (!dfs.exists(ssPath)) { dfs.mkdirs(ssPath); } maxLeaseUpdateThreads = conf.getInt("fs.snapshot.leaseupdatethreads", 100); // Waiting room purge thread purgeThread = new Daemon((new WaitingRoom(conf)).getPurger()); purgeThread.start(); // Get namenode rpc connection nameNodeAddr = NameNode.getAddress(conf); namenode = (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class, NamenodeProtocol.versionID, nameNodeAddr, conf); // Snapshot RPC Server InetSocketAddress socAddr = SnapshotNode.getAddress(conf); int handlerCount = conf.getInt("fs.snapshot.handler.count", 10); server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(), handlerCount, false, conf); // The rpc-server port can be ephemeral... ensure we have the correct info serverAddress = server.getListenerAddress(); LOG.info("SnapshotNode up at: " + serverAddress); server.start(); // start rpc server }
From source file:org.apache.hadoop.mapred.ProxyJobTracker.java
public void shutdown() throws Exception { infoServer.stop();//from w w w .ja v a 2 s . c om rpcServer.stop(); thriftServer.stop(); // Do an dummy connect to the thrift server port. This will cause an thrift // exception and move the server beyond the blocking accept. // Thread.interrupt() does not help. String target = conf.getProxyJobTrackerThriftAddress(); InetSocketAddress addr = NetUtils.createSocketAddr(target); try { new Socket(addr.getAddress(), addr.getPort()).close(); } catch (IOException e) { } }
From source file:org.apache.http.impl.nio.conn.TestPoolingHttpClientAsyncConnectionManager.java
@Test public void testResolveLocalAddress() throws Exception { final InternalAddressResolver addressResolver = new InternalAddressResolver(schemePortResolver, dnsResolver);/*from w w w. j a v a2 s. c om*/ final HttpHost target = new HttpHost("localhost"); final byte[] ip = new byte[] { 10, 0, 0, 10 }; final HttpRoute route = new HttpRoute(target, InetAddress.getByAddress(ip), false); final InetSocketAddress address = (InetSocketAddress) addressResolver.resolveLocalAddress(route); Assert.assertNotNull(address); Assert.assertEquals(InetAddress.getByAddress(ip), address.getAddress()); Assert.assertEquals(0, address.getPort()); }
From source file:org.apache.http.impl.nio.conn.TestPoolingHttpClientAsyncConnectionManager.java
@Test public void testResolveRemoteAddress() throws Exception { final InternalAddressResolver addressResolver = new InternalAddressResolver(schemePortResolver, dnsResolver);/*from w w w . ja v a 2s .c o m*/ final HttpHost target = new HttpHost("somehost", 80); final HttpRoute route = new HttpRoute(target); Mockito.when(schemePortResolver.resolve(target)).thenReturn(123); final byte[] ip = new byte[] { 10, 0, 0, 10 }; Mockito.when(dnsResolver.resolve("somehost")) .thenReturn(new InetAddress[] { InetAddress.getByAddress(ip) }); final InetSocketAddress address = (InetSocketAddress) addressResolver.resolveRemoteAddress(route); Assert.assertNotNull(address); Assert.assertEquals(InetAddress.getByAddress(ip), address.getAddress()); Assert.assertEquals(123, address.getPort()); }
From source file:org.apache.http.impl.nio.conn.TestPoolingHttpClientAsyncConnectionManager.java
@Test public void testResolveRemoteAddressViaProxy() throws Exception { final InternalAddressResolver addressResolver = new InternalAddressResolver(schemePortResolver, dnsResolver);//from w w w .jav a2 s .co m final HttpHost target = new HttpHost("somehost", 80); final HttpHost proxy = new HttpHost("someproxy"); final HttpRoute route = new HttpRoute(target, null, proxy, false); Mockito.when(schemePortResolver.resolve(proxy)).thenReturn(8888); final byte[] ip = new byte[] { 10, 0, 0, 10 }; Mockito.when(dnsResolver.resolve("someproxy")) .thenReturn(new InetAddress[] { InetAddress.getByAddress(ip) }); final InetSocketAddress address = (InetSocketAddress) addressResolver.resolveRemoteAddress(route); Assert.assertNotNull(address); Assert.assertEquals(InetAddress.getByAddress(ip), address.getAddress()); Assert.assertEquals(8888, address.getPort()); }
From source file:org.apache.accumulo.minicluster.MiniAccumuloCluster.java
/** * @param config/*from w ww .jav a 2 s .co m*/ * initial configuration */ public MiniAccumuloCluster(MiniAccumuloConfig config) throws IOException { this.config = config.initialize(); config.getConfDir().mkdirs(); config.getAccumuloDir().mkdirs(); config.getZooKeeperDir().mkdirs(); config.getLogDir().mkdirs(); config.getWalogDir().mkdirs(); config.getLibDir().mkdirs(); if (config.useMiniDFS()) { File nn = new File(config.getAccumuloDir(), "nn"); nn.mkdirs(); File dn = new File(config.getAccumuloDir(), "dn"); dn.mkdirs(); File dfs = new File(config.getAccumuloDir(), "dfs"); dfs.mkdirs(); Configuration conf = new Configuration(); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1"); conf.set("dfs.support.append", "true"); conf.set("dfs.datanode.synconclose", "true"); conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission()); String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath()); miniDFS = new MiniDFSCluster(conf, 1, true, null); if (oldTestBuildData == null) System.clearProperty("test.build.data"); else System.setProperty("test.build.data", oldTestBuildData); miniDFS.waitClusterUp(); InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress(); dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort(); File coreFile = new File(config.getConfDir(), "core-site.xml"); writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet()); File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml"); writeConfig(hdfsFile, conf); Map<String, String> siteConfig = config.getSiteConfig(); siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri); siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo"); config.setSiteConfig(siteConfig); } else { dfsUri = "file://"; } File siteFile = new File(config.getConfDir(), "accumulo-site.xml"); writeConfig(siteFile, config.getSiteConfig().entrySet()); FileWriter fileWriter = new FileWriter(siteFile); fileWriter.append("<configuration>\n"); for (Entry<String, String> entry : config.getSiteConfig().entrySet()) fileWriter.append("<property><name>" + entry.getKey() + "</name><value>" + entry.getValue() + "</value></property>\n"); fileWriter.append("</configuration>\n"); fileWriter.close(); zooCfgFile = new File(config.getConfDir(), "zoo.cfg"); fileWriter = new FileWriter(zooCfgFile); // zookeeper uses Properties to read its config, so use that to write in order to properly escape things like Windows paths Properties zooCfg = new Properties(); zooCfg.setProperty("tickTime", "2000"); zooCfg.setProperty("initLimit", "10"); zooCfg.setProperty("syncLimit", "5"); zooCfg.setProperty("clientPort", config.getZooKeeperPort() + ""); zooCfg.setProperty("maxClientCnxns", "1000"); zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath()); zooCfg.store(fileWriter, null); fileWriter.close(); File nativeMap = new File(config.getLibDir().getAbsolutePath() + "/native/map"); nativeMap.mkdirs(); File testRoot = new File( new File(new File(System.getProperty("user.dir")).getParent() + "/server/src/main/c++/nativeMap") .getAbsolutePath()); if (testRoot.exists()) { for (String file : testRoot.list()) { File src = new File(testRoot, file); if (src.isFile() && file.startsWith("libNativeMap")) FileUtils.copyFile(src, new File(nativeMap, file)); } } }
From source file:org.apache.hadoop.mapred.StatusHttpServer.java
/** * Configure an ssl listener on the server. * @param addr address to listen on//ww w. j a va 2 s .com * @param keystore location of the keystore * @param storPass password for the keystore * @param keyPass password for the key */ public void addSslListener(InetSocketAddress addr, String keystore, String storPass, String keyPass) throws IOException { if (sslListener != null || webServer.isStarted()) { throw new IOException("Failed to add ssl listener"); } sslListener = new SslListener(); sslListener.setHost(addr.getHostName()); sslListener.setPort(addr.getPort()); sslListener.setKeystore(keystore); sslListener.setPassword(storPass); sslListener.setKeyPassword(keyPass); webServer.addListener(sslListener); }
From source file:org.apache.hadoop.hdfs.DFSUtil.java
/** * Substitute a default host in the case that an address has been configured * with a wildcard. This is used, for example, when determining the HTTP * address of the NN -- if it's configured to bind to 0.0.0.0, we want to * substitute the hostname from the filesystem URI rather than trying to * connect to 0.0.0.0./*from w w w.ja va 2s. c o m*/ * * @param configuredAddress * the address found in the configuration * @param defaultHost * the host to substitute with, if configuredAddress * is a local/wildcard address. * @return the substituted address * @throws IOException * if it is a wildcard address and security is enabled */ @VisibleForTesting public static String substituteForWildcardAddress(String configuredAddress, String defaultHost) throws IOException { InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress); InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost + ":0"); final InetAddress addr = sockAddr.getAddress(); if (addr != null && addr.isAnyLocalAddress()) { if (UserGroupInformation.isSecurityEnabled() && defaultSockAddr.getAddress().isAnyLocalAddress()) { throw new IOException("Cannot use a wildcard address with security. " + "Must explicitly set bind address for Kerberos"); } return defaultHost + ":" + sockAddr.getPort(); } else { return configuredAddress; } }