List of usage examples for java.net InetSocketAddress getPort
public final int getPort()
From source file:com.vmware.photon.controller.common.thrift.ClientPoolUtils.java
public static <C extends TAsyncClient> C createNewClient(InetSocketAddress address, TProtocolFactory protocolFactory, ClientPoolOptions options, ThriftFactory thriftFactory, TAsyncClientFactory<C> clientFactory, Map<C, TTransport> clientTransportMap) throws IOException, TTransportException { TTransport socket = null;/* w w w .j a v a2 s . c om*/ if (!isKeyStoreUsed(options.getKeyStorePath())) { // Auth is not enabled socket = new TNonblockingSocket(address.getHostString(), address.getPort()); } else { TSSLTransportFactory.TSSLTransportParameters params = new TSSLTransportFactory.TSSLTransportParameters(); params.setTrustStore(options.getKeyStorePath(), options.getKeyStorePassword()); socket = TSSLTransportFactory.getClientSocket(address.getHostString(), address.getPort(), (options.getTimeoutMs() == 0L) ? 10000 : (int) options.getTimeoutMs(), params); } if (StringUtils.isNotBlank(options.getServiceName())) { protocolFactory = thriftFactory.create(options.getServiceName()); } C client = clientFactory.create(protocolFactory, socket); clientTransportMap.put(client, socket); logger.debug("created new client {} for {}", client, address); return client; }
From source file:io.servicecomb.serviceregistry.RegistryUtils.java
private static IpPort genPublishIpPort(String schema, IpPort ipPort) { String publicAddressSetting = DynamicPropertyFactory.getInstance().getStringProperty(PUBLISH_ADDRESS, "") .get();/*from ww w .ja va2s .c o m*/ publicAddressSetting = publicAddressSetting.trim(); if (publicAddressSetting.isEmpty()) { InetSocketAddress socketAddress = ipPort.getSocketAddress(); if (socketAddress.getAddress().isAnyLocalAddress()) { String host = NetUtils.getHostAddress(); LOGGER.warn("address {}, auto select a host address to publish {}:{}, maybe not the correct one", socketAddress, host, socketAddress.getPort()); return new IpPort(host, ipPort.getPort()); } return ipPort; } if (publicAddressSetting.startsWith("{") && publicAddressSetting.endsWith("}")) { publicAddressSetting = NetUtils .ensureGetInterfaceAddress(publicAddressSetting.substring(1, publicAddressSetting.length() - 1)) .getHostAddress(); } String publishPortKey = PUBLISH_PORT.replace("{transport_name}", schema); int publishPortSetting = DynamicPropertyFactory.getInstance().getIntProperty(publishPortKey, 0).get(); int publishPort = publishPortSetting == 0 ? ipPort.getPort() : publishPortSetting; return new IpPort(publicAddressSetting, publishPort); }
From source file:org.apache.atlas.ha.AtlasServerIdSelector.java
/** * Return the ID corresponding to this Atlas instance. * * The match is done by looking for an ID configured in {@link HAConfiguration#ATLAS_SERVER_IDS} key * that has a host:port entry for the key {@link HAConfiguration#ATLAS_SERVER_ADDRESS_PREFIX}+ID where * the host is a local IP address and port is set in the system property * {@link AtlasConstants#SYSTEM_PROPERTY_APP_PORT}. * * @param configuration/*from w ww .j av a 2 s. c o m*/ * @return * @throws AtlasException if no ID is found that maps to a local IP Address or port */ public static String selectServerId(Configuration configuration) throws AtlasException { // ids are already trimmed by this method String[] ids = configuration.getStringArray(HAConfiguration.ATLAS_SERVER_IDS); String matchingServerId = null; int appPort = Integer.parseInt(System.getProperty(AtlasConstants.SYSTEM_PROPERTY_APP_PORT)); for (String id : ids) { String hostPort = configuration.getString(HAConfiguration.ATLAS_SERVER_ADDRESS_PREFIX + id); if (!StringUtils.isEmpty(hostPort)) { InetSocketAddress socketAddress; try { socketAddress = NetUtils.createSocketAddr(hostPort); } catch (Exception e) { LOG.warn("Exception while trying to get socket address for {}", hostPort, e); continue; } if (!socketAddress.isUnresolved() && NetUtils.isLocalAddress(socketAddress.getAddress()) && appPort == socketAddress.getPort()) { LOG.info("Found matched server id {} with host port: {}", id, hostPort); matchingServerId = id; break; } } else { LOG.info("Could not find matching address entry for id: {}", id); } } if (matchingServerId == null) { String msg = String.format( "Could not find server id for this instance. " + "Unable to find IDs matching any local host and port binding among %s", StringUtils.join(ids, ",")); throw new AtlasException(msg); } return matchingServerId; }
From source file:org.eclipse.mylyn.commons.repositories.http.core.HttpUtil.java
public static void configureProxy(AbstractHttpClient client, RepositoryLocation location) { Assert.isNotNull(client);//from w w w .j av a 2 s .c o m Assert.isNotNull(location); String url = location.getUrl(); Assert.isNotNull(url, "The location url must not be null"); //$NON-NLS-1$ String host = NetUtil.getHost(url); Proxy proxy; if (NetUtil.isUrlHttps(url)) { proxy = location.getProxyForHost(host, IProxyData.HTTPS_PROXY_TYPE); } else { proxy = location.getProxyForHost(host, IProxyData.HTTP_PROXY_TYPE); } if (proxy != null && !Proxy.NO_PROXY.equals(proxy)) { InetSocketAddress address = (InetSocketAddress) proxy.address(); client.getParams().setParameter(ConnRoutePNames.DEFAULT_PROXY, new HttpHost(address.getHostName(), address.getPort())); if (proxy instanceof AuthenticatedProxy) { AuthenticatedProxy authProxy = (AuthenticatedProxy) proxy; Credentials credentials = getCredentials(authProxy.getUserName(), authProxy.getPassword(), address.getAddress(), false); if (credentials instanceof NTCredentials) { AuthScope proxyAuthScopeNTLM = new AuthScope(address.getHostName(), address.getPort(), AuthScope.ANY_REALM, AuthPolicy.NTLM); client.getCredentialsProvider().setCredentials(proxyAuthScopeNTLM, credentials); AuthScope proxyAuthScopeAny = new AuthScope(address.getHostName(), address.getPort(), AuthScope.ANY_REALM); Credentials usernamePasswordCredentials = getCredentials(authProxy.getUserName(), authProxy.getPassword(), address.getAddress(), true); client.getCredentialsProvider().setCredentials(proxyAuthScopeAny, usernamePasswordCredentials); } else { AuthScope proxyAuthScope = new AuthScope(address.getHostName(), address.getPort(), AuthScope.ANY_REALM); client.getCredentialsProvider().setCredentials(proxyAuthScope, credentials); } } } else { client.getParams().setParameter(ConnRoutePNames.DEFAULT_PROXY, null); } }
From source file:org.apache.hadoop.hdfs.server.datanode.TestDatanodeJsp.java
private static void testViewingFile(MiniDFSCluster cluster, String filePath) throws IOException { FileSystem fs = cluster.getFileSystem(); Path testPath = new Path(filePath); if (!fs.exists(testPath)) { DFSTestUtil.writeFile(fs, testPath, FILE_DATA); }/*w ww .ja v a 2s.c o m*/ InetSocketAddress nnIpcAddress = cluster.getNameNode().getNameNodeAddress(); InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress(); String base = JspHelper.Url.url("http", cluster.getDataNodes().get(0).getDatanodeId()); URL url = new URL(base + "/" + "browseDirectory.jsp" + JspHelper.getUrlParam("dir", URLEncoder.encode(testPath.toString(), "UTF-8"), true) + JspHelper.getUrlParam("namenodeInfoPort", Integer.toString(nnHttpAddress.getPort())) + JspHelper.getUrlParam("nnaddr", "localhost:" + nnIpcAddress.getPort())); viewFilePage = StringEscapeUtils.unescapeHtml(DFSTestUtil.urlGet(url)); assertTrue("page should show preview of file contents, got: " + viewFilePage, viewFilePage.contains(FILE_DATA)); assertTrue("page should show link to download file", viewFilePage.contains( "/streamFile" + ServletUtil.encodePath(filePath) + "?nnaddr=localhost:" + nnIpcAddress.getPort())); // check whether able to tail the file String regex = "<a.+href=\"(.+?)\">Tail\\s*this\\s*file\\<\\/a\\>"; assertFileContents(regex, "Tail this File"); // check whether able to 'Go Back to File View' after tailing the file regex = "<a.+href=\"(.+?)\">Go\\s*Back\\s*to\\s*File\\s*View\\<\\/a\\>"; assertFileContents(regex, "Go Back to File View"); regex = "<a href=\"///" + nnHttpAddress.getHostName() + ":" + nnHttpAddress.getPort() + "/dfshealth.jsp\">Go back to DFS home</a>"; assertTrue("page should generate DFS home scheme without explicit scheme", viewFilePage.contains(regex)); }
From source file:org.apache.hadoop.hdfs.server.namenode.AvatarNodeZkUtil.java
public static void printZookeeperEntries(Configuration originalConf, Configuration conf, String serviceName, PrintStream outputStream) throws IOException, KeeperException, InterruptedException { String connection = conf.get(FSConstants.FS_HA_ZOOKEEPER_QUORUM); if (connection == null) return;//from w w w.ja va 2s .c o m AvatarZooKeeperClient zk = new AvatarZooKeeperClient(conf, null); outputStream.println("ZooKeeper entries:"); // client protocol InetSocketAddress defaultAddr = NameNode.getClientProtocolAddress(originalConf); String defaultName = defaultAddr.getHostName() + ":" + defaultAddr.getPort(); outputStream.println("Default name is " + defaultName); String registration = zk.getPrimaryAvatarAddress(defaultName, new Stat(), false); outputStream.println("Primary node according to ZooKeeper: " + registration); // datanode protocol defaultAddr = NameNode.getDNProtocolAddress(originalConf); defaultName = defaultAddr.getHostName() + ":" + defaultAddr.getPort(); registration = zk.getPrimaryAvatarAddress(defaultName, new Stat(), false); outputStream.println("Primary node DN protocol : " + registration); // http address defaultAddr = NetUtils.createSocketAddr(originalConf.get("dfs.http.address")); defaultName = defaultAddr.getHostName() + ":" + defaultAddr.getPort(); registration = zk.getPrimaryAvatarAddress(defaultName, new Stat(), false); outputStream.println("Primary node http address : " + registration); for (Avatar anAvatar : Avatar.avatars) { outputStream.println(anAvatar + " entries: "); for (ZookeeperKey key : ZookeeperKey.values()) { String keyInZookeeper = getZnodeName(conf, serviceName, anAvatar, key); outputStream.println( keyInZookeeper + " : " + zk.getPrimaryAvatarAddress(keyInZookeeper, new Stat(), false)); } } }
From source file:com.pinterest.terrapin.TerrapinUtil.java
/** * Attempt to load data (already in HDFS on a correct directory) into an already locked fileset. * The data is assumed to already have been placed in the correct directory on the terrapin * cluster. This is being called by the Terrapin loader jobs. The @fsInfo object is the same * as the locked fsInfo object.//w w w . j av a 2 s.c om */ public static void loadFileSetData(ZooKeeperManager zkManager, FileSetInfo fsInfo, Options options) throws Exception { InetSocketAddress controllerSockAddress = zkManager.getControllerLeader(); LOG.info("Connecting to controller at " + controllerSockAddress.getHostName() + ":" + controllerSockAddress.getPort()); LOG.info("Load timeout " + Constants.LOAD_TIMEOUT_SECONDS + " seconds."); Service<ThriftClientRequest, byte[]> service = ClientBuilder.safeBuild(ClientBuilder.get() .hosts(controllerSockAddress).codec(new ThriftClientFramedCodecFactory(Option.<ClientId>empty())) .retries(1).connectTimeout(Duration.fromMilliseconds(1000)) .requestTimeout(Duration.fromSeconds(Constants.LOAD_TIMEOUT_SECONDS)).hostConnectionLimit(100) .failFast(false)); TerrapinController.ServiceIface iface = new TerrapinController.ServiceToClient(service, new TBinaryProtocol.Factory()); TerrapinLoadRequest request = new TerrapinLoadRequest(); request.setHdfsDirectory(fsInfo.servingInfo.hdfsPath); request.setOptions(options); request.setFileSet(fsInfo.fileSetName); request.setExpectedNumPartitions(fsInfo.servingInfo.numPartitions); LOG.info("Loading file set " + fsInfo.fileSetName + " at " + fsInfo.servingInfo.hdfsPath); long startTimeSeconds = System.currentTimeMillis() / 1000; int numTriesLeft = 5; boolean done = false; Exception e = null; while (numTriesLeft > 0) { try { iface.loadFileSet(request).get(); done = true; break; } catch (Throwable t) { LOG.error("Swap failed with exception.", t); e = new Exception(t); numTriesLeft--; } LOG.info("Retrying in 10 seconds."); try { Thread.sleep(10000); } catch (InterruptedException ie) { LOG.error("Interrupted."); break; } } if (done) { LOG.info("Load successful. Swap took " + ((System.currentTimeMillis() / 1000) - startTimeSeconds) + " seconds."); } else { LOG.error("Load failed !!!."); throw new Exception(e); } }
From source file:org.apache.hadoop.thriftfs.ThriftUtils.java
/** * Creates a Thrift name node client.//from w ww.j av a2 s . c o m * * @param conf the HDFS instance * @return a Thrift name node client. */ public static Namenode.Client createNamenodeClient(Configuration conf) throws Exception { String s = conf.get(NamenodePlugin.THRIFT_ADDRESS_PROPERTY, NamenodePlugin.DEFAULT_THRIFT_ADDRESS); // TODO(todd) use fs.default.name here if set to 0.0.0.0 - but share this with the code in // SecondaryNameNode that does the same InetSocketAddress addr = NetUtils.createSocketAddr(s); // If the NN thrift server is listening on the wildcard address (0.0.0.0), // use the external IP from the NN configuration, but with the port listed // in the thrift config. if (addr.getAddress().isAnyLocalAddress()) { InetSocketAddress nnAddr = NameNode.getAddress(conf); addr = new InetSocketAddress(nnAddr.getAddress(), addr.getPort()); } TTransport t = new TSocket(addr.getHostName(), addr.getPort()); t.open(); TProtocol p = new TBinaryProtocol(t); return new Namenode.Client(p); }
From source file:org.apache.hadoop.hdfs.server.namenode.AvatarNodeZkUtil.java
public static void clearZookeeper(Configuration originalConf, Configuration conf, String serviceName) throws IOException { String connection = conf.get(FSConstants.FS_HA_ZOOKEEPER_QUORUM); if (connection == null) { return;/* w ww.ja v a 2s . c o m*/ } AvatarZooKeeperClient zk = new AvatarZooKeeperClient(conf, null); // Clear NameNode address in ZK InetSocketAddress defaultAddr; String[] aliases; defaultAddr = NameNode.getClientProtocolAddress(originalConf); String defaultName = defaultAddr.getHostName() + ":" + defaultAddr.getPort(); LOG.info("Clear Client Address information in ZooKeeper: " + defaultName); zk.clearPrimary(defaultName); aliases = conf.getStrings(FSConstants.FS_NAMENODE_ALIASES); if (aliases != null) { for (String alias : aliases) { zk.clearPrimary(alias); } } LOG.info("Clear Service Address information in ZooKeeper"); defaultAddr = NameNode.getDNProtocolAddress(originalConf); if (defaultAddr != null) { String defaultServiceName = defaultAddr.getHostName() + ":" + defaultAddr.getPort(); zk.clearPrimary(defaultServiceName); } aliases = conf.getStrings(FSConstants.DFS_NAMENODE_DN_ALIASES); if (aliases != null) { for (String alias : aliases) { zk.clearPrimary(alias); } } LOG.info("Clear Http Address information in ZooKeeper"); // Clear http address in ZK // Stolen from NameNode so we have the same code in both places defaultAddr = NetUtils.createSocketAddr(originalConf.get(FSConstants.DFS_NAMENODE_HTTP_ADDRESS_KEY)); String defaultHttpAddress = defaultAddr.getHostName() + ":" + defaultAddr.getPort(); zk.clearPrimary(defaultHttpAddress); aliases = conf.getStrings(FSConstants.DFS_HTTP_ALIASES); if (aliases != null) { for (String alias : aliases) { zk.clearPrimary(alias); } } for (Avatar avatar : Avatar.avatars) { for (ZookeeperKey key : ZookeeperKey.values()) { zk.clearPrimary(getZnodeName(conf, serviceName, avatar, key)); } } }
From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer.java
/** * Adds the ContainerLocalizer arguments for a @{link ShellCommandExecutor}, * as expected by ContainerLocalizer.main * @param command the current ShellCommandExecutor command line * @param user localization user//from ww w .ja v a 2 s. c o m * @param appId localized app id * @param locId localizer id * @param nmAddr nodemanager address * @param localDirs list of local dirs */ public static void buildMainArgs(List<String> command, String user, String appId, String locId, InetSocketAddress nmAddr, List<String> localDirs, String userFolder) { command.add(ContainerLocalizer.class.getName()); command.add(user); command.add(userFolder); command.add(appId); command.add(locId); command.add(nmAddr.getHostName()); command.add(Integer.toString(nmAddr.getPort())); for (String dir : localDirs) { command.add(dir); } }