List of usage examples for java.net InetSocketAddress getHostName
public final String getHostName()
From source file:org.apache.htrace.impl.PackedBufferManager.java
private SelectionKey doConnect() throws IOException { SocketChannel sock = SocketChannel.open(); SelectionKey sockKey = null;//from w w w. j a va 2s. c om boolean success = false; try { if (sock.isBlocking()) { sock.configureBlocking(false); } InetSocketAddress resolvedEndpoint = new InetSocketAddress(conf.endpoint.getHostString(), conf.endpoint.getPort()); resolvedEndpoint.getHostName(); // trigger DNS resolution sock.connect(resolvedEndpoint); sockKey = sock.register(selector, SelectionKey.OP_CONNECT, sock); long startMs = TimeUtil.nowMs(); long remainingMs = conf.connectTimeoutMs; while (true) { selector.select(remainingMs); for (SelectionKey key : selector.keys()) { if (key.isConnectable()) { SocketChannel s = (SocketChannel) key.attachment(); s.finishConnect(); if (LOG.isTraceEnabled()) { LOG.trace("Successfully connected to " + conf.endpointStr + "."); } success = true; return sockKey; } } remainingMs = updateRemainingMs(startMs, conf.connectTimeoutMs); if (remainingMs == 0) { throw new IOException("Attempt to connect to " + conf.endpointStr + " timed out after " + TimeUtil.deltaMs(startMs, TimeUtil.nowMs()) + " ms."); } } } finally { if (!success) { if (sockKey != null) { sockKey.cancel(); } sock.close(); } } }
From source file:org.apache.hadoop.hdfs.qjournal.server.JournalNode.java
/** * Start listening for edits via RPC.//ww w. j a va2 s.c o m */ public void start() throws IOException { Preconditions.checkState(!isStarted(), "JN already running"); validateAndCreateJournalDir(localDir); DefaultMetricsSystem.initialize("JournalNode"); JvmMetrics.create("JournalNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf); SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); registerJNMXBean(); httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); httpServerURI = httpServer.getServerURI().toString(); rpcServer = new JournalNodeRpcServer(conf, this); rpcServer.start(); }
From source file:org.apache.tajo.client.TajoAdmin.java
private void processMasters(Writer writer) throws ParseException, IOException, ServiceException, SQLException { tajoClient = TajoHAClientUtil.getTajoClient(tajoConf, tajoClient); if (tajoConf.getBoolVar(TajoConf.ConfVars.TAJO_MASTER_HA_ENABLE)) { List<String> list = HAServiceUtil.getMasters(tajoConf); int i = 0; for (String master : list) { if (i > 0) { writer.write(" "); }//from ww w .j a v a2 s . c o m writer.write(master); i++; } writer.write("\n"); } else { String confMasterServiceAddr = tajoClient.getConf() .getVar(TajoConf.ConfVars.TAJO_MASTER_UMBILICAL_RPC_ADDRESS); InetSocketAddress masterAddress = NetUtils.createSocketAddr(confMasterServiceAddr); writer.write(masterAddress.getHostName()); writer.write("\n"); } }
From source file:org.jolokia.jvmagent.handler.JolokiaHttpHandlerTest.java
@Test public void restrictorWithReverseDnsLookup() throws URISyntaxException, IOException, ParseException { Configuration config = getConfig(ConfigKey.RESTRICTOR_CLASS, TestReverseDnsLookupRestrictor.class.getName(), ConfigKey.ALLOW_DNS_REVERSE_LOOKUP, "true"); InetSocketAddress address = new InetSocketAddress(8080); TestReverseDnsLookupRestrictor.expectedRemoteHostsToCheck = new String[] { address.getHostName(), address.getAddress().getHostAddress() }; JSONObject resp = simpleMemoryGetReadRequest(config); assertFalse(resp.containsKey("error")); }
From source file:com.bigdata.rdf.sail.remoting.GraphRepositoryClient.java
private ProxyHost getProxyHost() { ProxyHost theProxyHost = null;/*from w w w .j a v a 2s.c o m*/ ProxySelector ps = ProxySelector.getDefault(); List<Proxy> p = null; // select the proxy for the URI of this repository try { if (ps != null) { // log.info( "Getting Proxy List." ); p = ps.select(new java.net.URI(this.servletURL)); } } catch (Exception e) { // log.warn( "Exception getting proxy: " + e.toString() ); } if (p == null) { // log.warn( "No proxy information available." ); } else { // log.info( "Received proxy list: " + p.toString() ); Iterator<Proxy> proxies = p.iterator(); // just take the first for now if (proxies != null && proxies.hasNext()) { Proxy theProxy = (Proxy) proxies.next(); // log.info( "Proxy set to: " + theProxy.toString() ); if (!Proxy.NO_PROXY.equals(theProxy)) { InetSocketAddress theSock = (InetSocketAddress) theProxy.address(); theProxyHost = new ProxyHost(theSock.getHostName(), theSock.getPort()); } } else { // log.warn( "Proxy list has zero members." ); } } return theProxyHost; }
From source file:org.apache.hadoop.hdfs.server.namenode.StandbyNew.java
/** * Initialize the webserver so that the primary namenode can fetch * transaction logs from standby via http. *///w w w. j a va2 s . c o m void initSecondary(Configuration conf) throws IOException { nameNodeAddr = AvatarNode.getRemoteNamenodeAddress(conf); this.primaryNamenode = (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class, NamenodeProtocol.versionID, nameNodeAddr, conf); fsName = AvatarNode.getRemoteNamenodeHttpName(conf); // Initialize other scheduling parameters from the configuration checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600); checkpointSize = conf.getLong("fs.checkpoint.size", 4194304); // initialize the webserver for uploading files. String infoAddr = NetUtils.getServerAddress(conf, "dfs.secondary.info.bindAddress", "dfs.secondary.info.port", "dfs.secondary.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); infoBindAddress = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf); infoServer.setAttribute("name.system.image", fsImage); this.infoServer.setAttribute("name.conf", conf); infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class); infoServer.start(); // The web-server port can be ephemeral... ensure we have the correct info infoPort = infoServer.getPort(); conf.set("dfs.secondary.http.address", infoBindAddress + ":" + infoPort); LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort); LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " + "(" + checkpointPeriod / 60 + " min)"); LOG.warn("Log Size Trigger :" + checkpointSize + " bytes " + "(" + checkpointSize / 1024 + " KB)"); }
From source file:com.hortonworks.hbase.replication.bridge.ReplicationBridgeServer.java
/** * Starts a HRegionServer at the default location * * @param conf// w ww. ja va2 s . c o m * @throws IOException * @throws InterruptedException * @throws KeeperException * @throws ZkConnectException */ public ReplicationBridgeServer(Configuration conf) throws IOException, InterruptedException, KeeperException { this.conf = conf; // Set how many times to retry talking to another server over HConnection. HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG); // Server to handle client requests. String hostname = conf.get("hbase.regionserver.ipc.address", Strings.domainNamePointerToHostName( DNS.getDefaultHost(conf.get("hbase.regionserver.dns.interface", "default"), conf.get("hbase.regionserver.dns.nameserver", "default")))); port = conf.getInt("hbase.bridge.server.port", BRIDGE_SERVER_PORT); // Creation of a HSA will force a resolve. InetSocketAddress initialIsa = new InetSocketAddress(hostname, port); if (initialIsa.getAddress() == null) { throw new IllegalArgumentException("Failed resolve of " + initialIsa); } this.rpcServer = HBaseRPC.getServer(this, new Class<?>[] { HRegionInterface.class }, initialIsa.getHostName(), // BindAddress is IP we got for this server. initialIsa.getPort(), conf.getInt("hbase.regionserver.handler.count", 10), conf.getInt("hbase.regionserver.metahandler.count", 10), conf.getBoolean("hbase.rpc.verbose", false), conf, HConstants.QOS_THRESHOLD); }
From source file:com.joyent.manta.http.MantaConnectionFactory.java
/** * Finds the host of the proxy server that was configured as part of the * JVM settings.//from www.ja va2s .co m * * @return proxy server as {@link HttpHost}, if no proxy then null */ protected HttpHost findProxyServer() { final ProxySelector proxySelector = ProxySelector.getDefault(); List<Proxy> proxies = proxySelector.select(URI.create(config.getMantaURL())); if (!proxies.isEmpty()) { /* The Apache HTTP Client doesn't understand the concept of multiple * proxies, so we use only the first one returned. */ final Proxy proxy = proxies.get(0); switch (proxy.type()) { case DIRECT: return null; case SOCKS: throw new ConfigurationException("SOCKS proxies are unsupported"); default: // do nothing and fall through } if (proxy.address() instanceof InetSocketAddress) { InetSocketAddress sa = (InetSocketAddress) proxy.address(); return new HttpHost(sa.getHostName(), sa.getPort()); } else { String msg = String.format( "Expecting proxy to be instance of InetSocketAddress. " + " Actually: %s", proxy.address()); throw new ConfigurationException(msg); } } else { return null; } }
From source file:com.cloudera.util.StatusHttpServer.java
/** * Configure an ssl listener on the server. * /* w w w. ja v a2 s .co m*/ * @param addr * address to listen on * @param keystore * location of the keystore * @param storPass * password for the keystore * @param keyPass * password for the key */ public void addSslListener(InetSocketAddress addr, String keystore, String storPass, String keyPass) throws IOException { if (sslConnector != null || webServer.isStarted()) { throw new IOException("Failed to add ssl listener"); } sslConnector = new SslSocketConnector(); sslConnector.setHost(addr.getHostName()); sslConnector.setPort(addr.getPort()); sslConnector.setKeystore(keystore); sslConnector.setPassword(storPass); sslConnector.setKeyPassword(keyPass); webServer.addConnector(sslConnector); }
From source file:org.apache.hadoop.hdfs.server.datanode.TestDataNodeUUID.java
/** * This test makes sure that we have a valid * Node ID after the checkNodeUUID is done. *//* w w w . j av a2s.c o m*/ @Test public void testDatanodeUuid() throws Exception { final InetSocketAddress NN_ADDR = new InetSocketAddress("localhost", 5020); Configuration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); FileSystem.setDefaultUri(conf, "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort()); ArrayList<StorageLocation> locations = new ArrayList<>(); DataNode dn = new DataNode(conf, locations, null); //Assert that Node iD is null String nullString = null; assertEquals(dn.getDatanodeUuid(), nullString); // CheckDataNodeUUID will create an UUID if UUID is null dn.checkDatanodeUuid(); // Make sure that we have a valid DataNodeUUID at that point of time. assertNotEquals(dn.getDatanodeUuid(), nullString); }