List of usage examples for java.net InetSocketAddress getPort
public final int getPort()
From source file:com.github.hrpc.rpc.Server.java
public static void bind(ServerSocket socket, InetSocketAddress address, int backlog, Option conf, String rangeConf) throws IOException { try {// w w w .j a v a 2 s. co m IntegerRanges range = null; if (rangeConf != null) { range = conf.getRange(rangeConf, ""); } if (range == null || range.isEmpty() || (address.getPort() != 0)) { socket.bind(address, backlog); } else { for (Integer port : range) { if (socket.isBound()) break; try { InetSocketAddress temp = new InetSocketAddress(address.getAddress(), port); socket.bind(temp, backlog); } catch (BindException e) { //Ignored } } if (!socket.isBound()) { throw new BindException("Could not find a free port in " + range); } } } catch (SocketException e) { throw NetUtils.wrapException(null, 0, address.getHostName(), address.getPort(), e); } }
From source file:org.apache.flink.runtime.jobmanager.JobManager.java
public JobManager(ExecutionMode executionMode) throws Exception { final String ipcAddressString = GlobalConfiguration.getString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, null);/* w ww . ja v a 2s . co m*/ InetAddress ipcAddress = null; if (ipcAddressString != null) { try { ipcAddress = InetAddress.getByName(ipcAddressString); } catch (UnknownHostException e) { throw new Exception("Cannot convert " + ipcAddressString + " to an IP address: " + e.getMessage(), e); } } final int ipcPort = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT); // Read the suggested client polling interval this.recommendedClientPollingInterval = GlobalConfiguration.getInteger( ConfigConstants.JOBCLIENT_POLLING_INTERVAL_KEY, ConfigConstants.DEFAULT_JOBCLIENT_POLLING_INTERVAL); // read the default number of times that failed tasks should be re-executed this.defaultExecutionRetries = GlobalConfiguration.getInteger(ConfigConstants.DEFAULT_EXECUTION_RETRIES_KEY, ConfigConstants.DEFAULT_EXECUTION_RETRIES); // delay between retries should be one heartbeat timeout this.delayBetweenRetries = 2 * GlobalConfiguration.getLong(ConfigConstants.JOB_MANAGER_DEAD_TASKMANAGER_TIMEOUT_KEY, ConfigConstants.DEFAULT_JOB_MANAGER_DEAD_TASKMANAGER_TIMEOUT); // Load the job progress collector this.eventCollector = new EventCollector(this.recommendedClientPollingInterval); this.libraryCacheManager = new BlobLibraryCacheManager(new BlobServer(), GlobalConfiguration.getConfiguration()); // Register simple job archive int archived_items = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_WEB_ARCHIVE_COUNT, ConfigConstants.DEFAULT_JOB_MANAGER_WEB_ARCHIVE_COUNT); if (archived_items > 0) { this.archive = new MemoryArchivist(archived_items); this.eventCollector.registerArchivist(archive); } else { this.archive = null; } this.currentJobs = new ConcurrentHashMap<JobID, ExecutionGraph>(); // Create the accumulator manager, with same archiving limit as web // interface. We need to store the accumulators for at least one job. // Otherwise they might be deleted before the client requested the // accumulator results. this.accumulatorManager = new AccumulatorManager(Math.min(1, archived_items)); // Determine own RPC address final InetSocketAddress rpcServerAddress = new InetSocketAddress(ipcAddress, ipcPort); // Start job manager's IPC server try { final int handlerCount = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_HANDLERS_KEY, ConfigConstants.DEFAULT_JOB_MANAGER_IPC_HANDLERS); this.jobManagerServer = RPC.getServer(this, rpcServerAddress.getHostName(), rpcServerAddress.getPort(), handlerCount); this.jobManagerServer.start(); } catch (IOException e) { throw new Exception("Cannot start RPC server: " + e.getMessage(), e); } LOG.info("Starting job manager in " + executionMode + " mode"); // Try to load the instance manager for the given execution mode if (executionMode == ExecutionMode.LOCAL) { final int numTaskManagers = GlobalConfiguration .getInteger(ConfigConstants.LOCAL_INSTANCE_MANAGER_NUMBER_TASK_MANAGER, 1); this.instanceManager = new LocalInstanceManager(numTaskManagers); } else if (executionMode == ExecutionMode.CLUSTER) { this.instanceManager = new InstanceManager(); } else { throw new IllegalArgumentException("ExecutionMode"); } // create the scheduler and make it listen at the availability of new instances this.scheduler = new Scheduler(this.executorService); this.instanceManager.addInstanceListener(this.scheduler); }
From source file:net.dv8tion.jda.audio.AudioWebSocket.java
@Override public void onTextMessage(WebSocket websocket, String message) { JSONObject contentAll = new JSONObject(message); int opCode = contentAll.getInt("op"); switch (opCode) { case INITIAL_CONNECTION_RESPONSE: { JSONObject content = contentAll.getJSONObject("d"); ssrc = content.getInt("ssrc"); int port = content.getInt("port"); int heartbeatInterval = content.getInt("heartbeat_interval"); //Find our external IP and Port using Discord InetSocketAddress externalIpAndPort = null; int tries = 0; do {/*from ww w . ja va 2 s .c o m*/ externalIpAndPort = handleUdpDiscovery(new InetSocketAddress(endpoint, port), ssrc); tries++; if (externalIpAndPort == null && tries > 5) { close(false, UDP_UNABLE_TO_CONNECT); return; } } while (externalIpAndPort == null); setupUdpKeepAliveThread(); send(new JSONObject().put("op", 1) .put("d", new JSONObject().put("protocol", "udp").put("data", new JSONObject().put("address", externalIpAndPort.getHostString()) .put("port", externalIpAndPort.getPort()) .put("mode", "xsalsa20_poly1305") //Discord requires encryption )).toString()); setupKeepAliveThread(heartbeatInterval); break; } case HEARTBEAT_PING_RETURN: { if (LOG.getEffectiveLevel().getPriority() <= SimpleLog.Level.TRACE.getPriority()) { long timePingSent = contentAll.getLong("d"); long ping = System.currentTimeMillis() - timePingSent; LOG.trace("ping: " + ping + "ms"); } break; } case CONNECTING_COMPLETED: { //secret_key is an array of 32 ints that are less than 256, so they are bytes. JSONArray keyArray = contentAll.getJSONObject("d").getJSONArray("secret_key"); secretKey = new byte[DISCORD_SECRET_KEY_LENGTH]; for (int i = 0; i < keyArray.length(); i++) secretKey[i] = (byte) keyArray.getInt(i); LOG.trace("Audio connection has finished connecting!"); ready = true; break; } case USER_SPEAKING_UPDATE: { JSONObject content = contentAll.getJSONObject("d"); boolean speaking = content.getBoolean("speaking"); int ssrc = content.getInt("ssrc"); String userId = content.getString("user_id"); User user = api.getUserById(userId); if (user == null) { LOG.warn("Got an Audio USER_SPEAKING_UPDATE for a non-existent User. JSON: " + contentAll); return; } audioConnection.updateUserSSRC(ssrc, userId, speaking); if (speaking) LOG.log(SimpleLog.Level.ALL, user.getUsername() + " started transmitting audio."); //Replace with event. else LOG.log(SimpleLog.Level.ALL, user.getUsername() + " stopped transmitting audio."); //Replace with event. break; } default: LOG.debug("Unknown Audio OP code.\n" + contentAll.toString(4)); } }
From source file:org.jenkinsci.plugins.fod.FoDAPI.java
private HttpClient getHttpClient() { final String METHOD_NAME = CLASS_NAME + ".getHttpClient"; PrintStream logger = FodBuilder.getLogger(); if (null == this.httpClient) { HttpClientBuilder builder = HttpClientBuilder.create(); if (null != proxyConfig) { String fodBaseUrl = null; if (null != this.baseUrl && !this.baseUrl.isEmpty()) { fodBaseUrl = this.baseUrl; } else { fodBaseUrl = PUBLIC_FOD_BASE_URL; }/*w w w. j a va 2 s .co m*/ Proxy proxy = proxyConfig.createProxy(fodBaseUrl); InetSocketAddress address = (InetSocketAddress) proxy.address(); HttpHost proxyHttpHost = new HttpHost(address.getHostName(), address.getPort(), proxy.address().toString().indexOf("https") != 0 ? "http" : "https"); builder.setProxy(proxyHttpHost); if (null != proxyConfig.getUserName() && !proxyConfig.getUserName().trim().equals("") && null != proxyConfig.getPassword() && !proxyConfig.getPassword().trim().equals("")) { Credentials credentials = new UsernamePasswordCredentials(proxyConfig.getUserName(), proxyConfig.getPassword()); AuthScope authScope = new AuthScope(address.getHostName(), address.getPort()); CredentialsProvider credsProvider = new BasicCredentialsProvider(); credsProvider.setCredentials(authScope, credentials); builder.setDefaultCredentialsProvider(credsProvider); } logger.println(METHOD_NAME + ": using proxy configuration: " + proxyHttpHost.getSchemeName() + "://" + proxyHttpHost.getHostName() + ":" + proxyHttpHost.getPort()); } this.httpClient = builder.build(); } return this.httpClient; }
From source file:org.apache.hama.bsp.BSPMaster.java
BSPMaster(HamaConfiguration conf, String identifier) throws IOException, InterruptedException { this.conf = conf; this.masterIdentifier = identifier; // Create the scheduler and init scheduler services Class<? extends TaskScheduler> schedulerClass = conf.getClass("bsp.master.taskscheduler", SimpleTaskScheduler.class, TaskScheduler.class); this.taskScheduler = ReflectionUtils.newInstance(schedulerClass, conf); InetSocketAddress inetSocketAddress = getAddress(conf); // inetSocketAddress is null if we are in local mode, then we should start // nothing.// w w w . j av a 2s. com if (inetSocketAddress != null) { host = inetSocketAddress.getHostName(); port = inetSocketAddress.getPort(); LOG.info("RPC BSPMaster: host " + host + " port " + port); startTime = System.currentTimeMillis(); this.masterServer = RPC.getServer(this, host, port, conf); infoPort = conf.getInt("bsp.http.infoserver.port", 40013); infoServer = new HttpServer("bspmaster", host, infoPort, true, conf); infoServer.setAttribute("bsp.master", this); // starting webserver infoServer.start(); if (conf.getBoolean("bsp.monitor.fd.enabled", false)) { this.supervisor.set(FDProvider.createSupervisor( conf.getClass("bsp.monitor.fd.supervisor.class", UDPSupervisor.class, Supervisor.class), conf)); } while (!Thread.currentThread().isInterrupted()) { try { if (fs == null) { fs = FileSystem.get(conf); } } catch (IOException e) { LOG.error("Can't get connection to Hadoop Namenode!", e); } try { // clean up the system dir, which will only work if hdfs is out of // safe mode if (systemDir == null) { systemDir = new Path(getSystemDir()); } LOG.info("Cleaning up the system directory"); LOG.info(systemDir); fs.delete(systemDir, true); if (FileSystem.mkdirs(fs, systemDir, new FsPermission(SYSTEM_DIR_PERMISSION))) { break; } LOG.error("Mkdirs failed to create " + systemDir); LOG.info(SUBDIR); } catch (AccessControlException ace) { LOG.warn("Failed to operate on bsp.system.dir (" + systemDir + ") because of permissions."); LOG.warn( "Manually delete the bsp.system.dir (" + systemDir + ") and then start the BSPMaster."); LOG.warn("Bailing out ... "); throw ace; } catch (IOException ie) { LOG.info("problem cleaning system directory: " + systemDir, ie); } Thread.sleep(FS_ACCESS_RETRY_PERIOD); } if (Thread.currentThread().isInterrupted()) { throw new InterruptedException(); } deleteLocalFiles(SUBDIR); } else { System.out.println(localModeMessage); LOG.info(localModeMessage); } }
From source file:org.apache.hadoop.yarn.server.resourcemanager.RMHAProtocolService.java
protected void startHAAdminServer() throws Exception { InetSocketAddress haAdminServiceAddress = conf.getSocketAddr(YarnConfiguration.RM_HA_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_HA_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_HA_ADMIN_PORT); RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, ProtobufRpcEngine.class); HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator = new HAServiceProtocolServerSideTranslatorPB( this); BlockingService haPbService = HAServiceProtocolProtos.HAServiceProtocolService .newReflectiveBlockingService(haServiceProtocolXlator); WritableRpcEngine.ensureInitialized(); String bindHost = haAdminServiceAddress.getHostName(); int serviceHandlerCount = conf.getInt(YarnConfiguration.RM_HA_ADMIN_CLIENT_THREAD_COUNT, YarnConfiguration.DEFAULT_RM_HA_ADMIN_CLIENT_THREAD_COUNT); haAdminServer = new RPC.Builder(conf).setProtocol(HAServiceProtocolPB.class).setInstance(haPbService) .setBindAddress(bindHost).setPort(haAdminServiceAddress.getPort()) .setNumHandlers(serviceHandlerCount).setVerbose(false).build(); // Enable service authorization? if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { haAdminServer.refreshServiceAcl(conf, new RMPolicyProvider()); }//from ww w. j a va2 s.co m haAdminServer.start(); conf.updateConnectAddr(YarnConfiguration.RM_HA_ADMIN_ADDRESS, haAdminServer.getListenerAddress()); }
From source file:org.apache.flink.client.CliFrontend.java
/** * Creates a {@link ClusterClient} object from the given command line options and other parameters. * @param options Command line options// w ww.j a v a 2 s. c o m * @param program The program for which to create the client. * @throws Exception */ protected ClusterClient createClient(CommandLineOptions options, PackagedProgram program) throws Exception { // Get the custom command-line (e.g. Standalone/Yarn/Mesos) CustomCommandLine<?> activeCommandLine = getActiveCustomCommandLine(options.getCommandLine()); ClusterClient client; try { client = activeCommandLine.retrieveCluster(options.getCommandLine(), config); logAndSysout("Cluster configuration: " + client.getClusterIdentifier()); } catch (UnsupportedOperationException e) { try { String applicationName = "Flink Application: " + program.getMainClassName(); client = activeCommandLine.createCluster(applicationName, options.getCommandLine(), config, program.getAllLibraries()); logAndSysout("Cluster started: " + client.getClusterIdentifier()); } catch (UnsupportedOperationException e2) { throw new IllegalConfigurationException( "The JobManager address is neither provided at the command-line, " + "nor configured in flink-conf.yaml."); } } // Avoid resolving the JobManager Gateway here to prevent blocking until we invoke the user's program. final InetSocketAddress jobManagerAddress = client.getJobManagerAddress(); logAndSysout("Using address " + jobManagerAddress.getHostString() + ":" + jobManagerAddress.getPort() + " to connect to JobManager."); logAndSysout("JobManager web interface address " + client.getWebInterfaceURL()); return client; }
From source file:org.apache.hadoop.hdfs.server.datanode.CachingDataNode.java
private void reconfigureIpcServer(final Configuration conf) throws IOException { this.dataNode.ipcServer.stop(); final InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get(DFS_DATANODE_IPC_ADDRESS_KEY)); // Add all the RPC protocols that the Datanode implements RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class, ProtobufRpcEngine.class); final ClientDatanodeProtocolServerSideTranslatorPB clientDatanodeProtocolXlator = new ClientDatanodeProtocolServerSideTranslatorPB( this); BlockingService service = ClientDatanodeProtocolService .newReflectiveBlockingService(clientDatanodeProtocolXlator); final boolean oldVal = DefaultMetricsSystem.inMiniClusterMode(); DefaultMetricsSystem.setMiniClusterMode(true); this.dataNode.ipcServer = RPC.getServer(ClientDatanodeProtocolPB.class, service, ipcAddr.getHostName(), ipcAddr.getPort(), conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, DFS_DATANODE_HANDLER_COUNT_DEFAULT), false, conf, this.dataNode.blockPoolTokenSecretManager); DefaultMetricsSystem.setMiniClusterMode(oldVal); final InterDatanodeProtocolServerSideTranslatorPB interDatanodeProtocolXlator = new InterDatanodeProtocolServerSideTranslatorPB( this); service = InterDatanodeProtocolService.newReflectiveBlockingService(interDatanodeProtocolXlator); DFSUtil.addPBProtocol(conf, InterDatanodeProtocolPB.class, service, this.dataNode.ipcServer); LOG.info("Opened IPC server at " + this.dataNode.ipcServer.getListenerAddress()); // set service-level authorization security policy if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { this.dataNode.ipcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); }/* www . java 2 s . c o m*/ }
From source file:org.apache.hama.http.HttpServer.java
/** * Configure an ssl listener on the server. * // w ww. j av a 2 s .co m * @param addr address to listen on * @param sslConf conf to retrieve ssl options * @param needClientAuth whether client authentication is required */ public void addSslListener(InetSocketAddress addr, Configuration sslConf, boolean needClientAuth) throws IOException { if (webServer.isStarted()) { throw new IOException("Failed to add ssl listener"); } if (needClientAuth) { // setting up SSL truststore for authenticating clients System.setProperty("javax.net.ssl.trustStore", sslConf.get("ssl.server.truststore.location", "")); System.setProperty("javax.net.ssl.trustStorePassword", sslConf.get("ssl.server.truststore.password", "")); System.setProperty("javax.net.ssl.trustStoreType", sslConf.get("ssl.server.truststore.type", "jks")); } SslSocketConnector sslListener = new SslSocketConnector(); sslListener.setHost(addr.getHostName()); sslListener.setPort(addr.getPort()); sslListener.setKeystore(sslConf.get("ssl.server.keystore.location")); sslListener.setPassword(sslConf.get("ssl.server.keystore.password", "")); sslListener.setKeyPassword(sslConf.get("ssl.server.keystore.keypassword", "")); sslListener.setKeystoreType(sslConf.get("ssl.server.keystore.type", "jks")); sslListener.setNeedClientAuth(needClientAuth); webServer.addConnector(sslListener); }
From source file:common.NameNode.java
private void startHttpServer(Configuration conf) throws IOException { InetSocketAddress infoSocAddr = getHttpServerAddress(conf); String infoHost = infoSocAddr.getHostName(); int infoPort = infoSocAddr.getPort(); this.httpServer = new HttpServer("hdfs", infoHost, infoPort, infoPort == 0, conf); if (conf.getBoolean("dfs.https.enable", false)) { boolean needClientAuth = conf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); InetSocketAddress secInfoSocAddr = NetUtils .createSocketAddr(conf.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0)); Configuration sslConf = new HdfsConfiguration(false); sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); this.httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); // assume same ssl port for all datanodes InetSocketAddress datanodeSslPort = NetUtils .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 50475)); this.httpServer.setAttribute("datanode.https.port", datanodeSslPort.getPort()); }/*from w ww .j av a 2s .com*/ this.httpServer.setAttribute("name.node", this); this.httpServer.setAttribute("name.node.address", getNameNodeAddress()); this.httpServer.setAttribute("name.system.image", getFSImage()); this.httpServer.setAttribute("name.conf", conf); this.httpServer.addInternalServlet("getDelegationToken", DelegationTokenServlet.PATH_SPEC, DelegationTokenServlet.class); this.httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class); this.httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class); this.httpServer.addInternalServlet("listPaths", "/listPaths/*", ListPathsServlet.class); this.httpServer.addInternalServlet("data", "/data/*", FileDataServlet.class); this.httpServer.addInternalServlet("checksum", "/fileChecksum/*", FileChecksumServlets.RedirectServlet.class); this.httpServer.addInternalServlet("contentSummary", "/contentSummary/*", ContentSummaryServlet.class); this.httpServer.start(); // The web-server port can be ephemeral... ensure we have the correct info infoPort = this.httpServer.getPort(); this.httpAddress = new InetSocketAddress(infoHost, infoPort); setHttpServerAddress(conf); LOG.info(getRole() + " Web-server up at: " + httpAddress); }