List of usage examples for java.net InetSocketAddress getHostName
public final String getHostName()
From source file:org.openflamingo.remote.thrift.mapred.ThriftJobTrackerPlugin.java
@Override public void start(Object service) { LOG.info("Starting ThriftJobTrackerPlugin"); this.jobTracker = (JobTracker) service; try {/*from w ww .ja v a2 s. c o m*/ InetSocketAddress address = NetUtils .createSocketAddr(conf.get(THRIFT_ADDRESS_PROPERTY, DEFAULT_THRIFT_ADDRESS)); this.thriftServer = new ThriftPluginServer(address, new ProcessorFactory()); thriftServer.setConf(conf); thriftServer.start(); // The port may have been 0, so we update it. conf.set(THRIFT_ADDRESS_PROPERTY, address.getHostName() + ":" + thriftServer.getPort()); } catch (Exception e) { LOG.warn("Cannot start Thrift jobtracker plug-in", e); throw new RuntimeException("Cannot start Thrift jobtracker plug-in", e); } }
From source file:com.couchbase.client.ViewConnection.java
/** * Create ViewNode connections and queue them up for connect. * * This method also defines the connection params for each connection, * including the default settings like timeouts and the user agent string. * * @param addrs addresses of all the nodes it should connect to. * @return Returns a list of the ViewNodes. * @throws IOException/*from w ww .j a v a2s.co m*/ */ private List<ViewNode> createConnections(List<InetSocketAddress> addrs) throws IOException { List<ViewNode> nodeList = new LinkedList<ViewNode>(); for (InetSocketAddress a : addrs) { HttpParams params = new SyncBasicHttpParams(); params.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, 5000) .setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 5000) .setIntParameter(CoreConnectionPNames.SOCKET_BUFFER_SIZE, 8 * 1024) .setBooleanParameter(CoreConnectionPNames.STALE_CONNECTION_CHECK, false) .setBooleanParameter(CoreConnectionPNames.TCP_NODELAY, true) .setParameter(CoreProtocolPNames.USER_AGENT, "Couchbase Java Client 1.0.2"); HttpProcessor httpproc = new ImmutableHttpProcessor( new HttpRequestInterceptor[] { new RequestContent(), new RequestTargetHost(), new RequestConnControl(), new RequestUserAgent(), new RequestExpectContinue(), }); AsyncNHttpClientHandler protocolHandler = new AsyncNHttpClientHandler(httpproc, new MyHttpRequestExecutionHandler(), new DefaultConnectionReuseStrategy(), new DirectByteBufferAllocator(), params); protocolHandler.setEventListener(new EventLogger()); AsyncConnectionManager connMgr = new AsyncConnectionManager(new HttpHost(a.getHostName(), a.getPort()), NUM_CONNS, protocolHandler, params, new RequeueOpCallback(this)); getLogger().info("Added %s to connect queue", a.getHostName()); ViewNode node = connFactory.createViewNode(a, connMgr); node.init(); nodeList.add(node); } return nodeList; }
From source file:eu.stratosphere.nephele.jobmanager.JobManager.java
public JobManager(ExecutionMode executionMode) throws Exception { final String ipcAddressString = GlobalConfiguration.getString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, null);// w ww.j av a2 s.co m InetAddress ipcAddress = null; if (ipcAddressString != null) { try { ipcAddress = InetAddress.getByName(ipcAddressString); } catch (UnknownHostException e) { throw new Exception("Cannot convert " + ipcAddressString + " to an IP address: " + e.getMessage(), e); } } final int ipcPort = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT); // Read the suggested client polling interval this.recommendedClientPollingInterval = GlobalConfiguration.getInteger( ConfigConstants.JOBCLIENT_POLLING_INTERVAL_KEY, ConfigConstants.DEFAULT_JOBCLIENT_POLLING_INTERVAL); // Load the job progress collector this.eventCollector = new EventCollector(this.recommendedClientPollingInterval); // Register simple job archive int archived_items = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_WEB_ARCHIVE_COUNT, ConfigConstants.DEFAULT_JOB_MANAGER_WEB_ARCHIVE_COUNT); if (archived_items > 0) { this.archive = new MemoryArchivist(archived_items); this.eventCollector.registerArchivist(archive); } else { this.archive = null; } // Create the accumulator manager, with same archiving limit as web // interface. We need to store the accumulators for at least one job. // Otherwise they might be deleted before the client requested the // accumulator results. this.accumulatorManager = new AccumulatorManager(Math.min(1, archived_items)); // Load the input split manager this.inputSplitManager = new InputSplitManager(); // Determine own RPC address final InetSocketAddress rpcServerAddress = new InetSocketAddress(ipcAddress, ipcPort); // Start job manager's IPC server try { final int handlerCount = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_HANDLERS_KEY, ConfigConstants.DEFAULT_JOB_MANAGER_IPC_HANDLERS); this.jobManagerServer = RPC.getServer(this, rpcServerAddress.getHostName(), rpcServerAddress.getPort(), handlerCount); this.jobManagerServer.start(); } catch (IOException e) { throw new Exception("Cannot start RPC server: " + e.getMessage(), e); } LOG.info("Starting job manager in " + executionMode + " mode"); // Try to load the instance manager for the given execution mode // Try to load the scheduler for the given execution mode if (executionMode == ExecutionMode.LOCAL) { try { this.instanceManager = new LocalInstanceManager(); } catch (Throwable t) { throw new Exception("Cannot instantiate local instance manager: " + t.getMessage(), t); } } else { final String instanceManagerClassName = JobManagerUtils.getInstanceManagerClassName(executionMode); LOG.info("Trying to load " + instanceManagerClassName + " as instance manager"); this.instanceManager = JobManagerUtils.loadInstanceManager(instanceManagerClassName); if (this.instanceManager == null) { throw new Exception("Unable to load instance manager " + instanceManagerClassName); } } // Try to load the scheduler for the given execution mode final String schedulerClassName = JobManagerUtils.getSchedulerClassName(executionMode); LOG.info("Trying to load " + schedulerClassName + " as scheduler"); // Try to get the instance manager class name this.scheduler = JobManagerUtils.loadScheduler(schedulerClassName, this, this.instanceManager); if (this.scheduler == null) { throw new Exception("Unable to load scheduler " + schedulerClassName); } // Load profiler if it should be used if (GlobalConfiguration.getBoolean(ProfilingUtils.ENABLE_PROFILING_KEY, false)) { final String profilerClassName = GlobalConfiguration.getString(ProfilingUtils.JOBMANAGER_CLASSNAME_KEY, "eu.stratosphere.nephele.profiling.impl.JobManagerProfilerImpl"); this.profiler = ProfilingUtils.loadJobManagerProfiler(profilerClassName, ipcAddress); if (this.profiler == null) { throw new Exception("Cannot load profiler"); } } else { this.profiler = null; LOG.debug("Profiler disabled"); } }
From source file:com.taobao.adfs.distributed.DistributedServer.java
void createRpcServer() throws IOException { InetSocketAddress socAddr = NetUtils.createSocketAddr(serverName); int rpcHandlerNumber = conf.getInt("distributed.server.handler.number", 100); for (int i = 0; i < 10; ++i) { try {/*ww w.j ava 2 s . c o m*/ rpcServer = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(), rpcHandlerNumber, false, conf); break; } catch (IOException e) { if (i == 9 || !Utilities.getFirstCause(e).getClass().equals(BindException.class)) throw e; Utilities.sleepAndProcessInterruptedException(1000, logger); } } try { rpcServer.start(); log(Level.INFO, " create rpc server with address=", serverName); } catch (Throwable t) { log(Level.ERROR, " fail to create rpc server with address=", serverName, t); rpcServer = null; throw new IOException(t); } }
From source file:org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.java
@SuppressWarnings("unchecked") @Override// www .j a v a 2s . co m public long renewDelegationToken(final Token<TimelineDelegationTokenIdentifier> timelineDT) throws IOException, YarnException { final boolean isTokenServiceAddrEmpty = timelineDT.getService().toString().isEmpty(); final String scheme = isTokenServiceAddrEmpty ? null : (YarnConfiguration.useHttps(this.getConfig()) ? "https" : "http"); final InetSocketAddress address = isTokenServiceAddrEmpty ? null : SecurityUtil.getTokenServiceAddr(timelineDT); PrivilegedExceptionAction<Long> renewDTAction = new PrivilegedExceptionAction<Long>() { @Override public Long run() throws Exception { // If the timeline DT to renew is different than cached, replace it. // Token to set every time for retry, because when exception happens, // DelegationTokenAuthenticatedURL will reset it to null; if (!timelineDT.equals(token.getDelegationToken())) { token.setDelegationToken((Token) timelineDT); } DelegationTokenAuthenticatedURL authUrl = new DelegationTokenAuthenticatedURL(authenticator, connConfigurator); // If the token service address is not available, fall back to use // the configured service address. final URI serviceURI = isTokenServiceAddrEmpty ? resURI : new URI(scheme, null, address.getHostName(), address.getPort(), RESOURCE_URI_STR, null, null); return authUrl.renewDelegationToken(serviceURI.toURL(), token, doAsUser); } }; return (Long) operateDelegationToken(renewDTAction); }
From source file:org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.java
@SuppressWarnings("unchecked") @Override//from w ww .jav a 2 s .c o m public void cancelDelegationToken(final Token<TimelineDelegationTokenIdentifier> timelineDT) throws IOException, YarnException { final boolean isTokenServiceAddrEmpty = timelineDT.getService().toString().isEmpty(); final String scheme = isTokenServiceAddrEmpty ? null : (YarnConfiguration.useHttps(this.getConfig()) ? "https" : "http"); final InetSocketAddress address = isTokenServiceAddrEmpty ? null : SecurityUtil.getTokenServiceAddr(timelineDT); PrivilegedExceptionAction<Void> cancelDTAction = new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { // If the timeline DT to cancel is different than cached, replace it. // Token to set every time for retry, because when exception happens, // DelegationTokenAuthenticatedURL will reset it to null; if (!timelineDT.equals(token.getDelegationToken())) { token.setDelegationToken((Token) timelineDT); } DelegationTokenAuthenticatedURL authUrl = new DelegationTokenAuthenticatedURL(authenticator, connConfigurator); // If the token service address is not available, fall back to use // the configured service address. final URI serviceURI = isTokenServiceAddrEmpty ? resURI : new URI(scheme, null, address.getHostName(), address.getPort(), RESOURCE_URI_STR, null, null); authUrl.cancelDelegationToken(serviceURI.toURL(), token, doAsUser); return null; } }; operateDelegationToken(cancelDTAction); }
From source file:org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.java
protected void doSecureLogin() throws IOException { InetSocketAddress socAddr = getBindAddress(conf); SecurityUtil.login(this.conf, YarnConfiguration.RM_KEYTAB, YarnConfiguration.RM_PRINCIPAL, socAddr.getHostName()); // if security is enable, set rmLoginUGI as UGI of loginUser if (UserGroupInformation.isSecurityEnabled()) { this.rmLoginUGI = UserGroupInformation.getLoginUser(); }/*from w ww . ja v a 2 s. c o m*/ }
From source file:org.apache.nifi.controller.StandardFlowService.java
private StandardFlowService(final FlowController controller, final NiFiProperties nifiProperties, final NodeProtocolSenderListener senderListener, final StringEncryptor encryptor, final boolean configuredForClustering, final ClusterCoordinator clusterCoordinator, final RevisionManager revisionManager, final Authorizer authorizer) throws IOException { this.nifiProperties = nifiProperties; this.controller = controller; flowXml = Paths.get(nifiProperties.getProperty(NiFiProperties.FLOW_CONFIGURATION_FILE)); gracefulShutdownSeconds = (int) FormatUtils.getTimeDuration( nifiProperties.getProperty(NiFiProperties.FLOW_CONTROLLER_GRACEFUL_SHUTDOWN_PERIOD), TimeUnit.SECONDS);/* w ww. j av a2s . c o m*/ autoResumeState = nifiProperties.getAutoResumeState(); dao = new StandardXMLFlowConfigurationDAO(flowXml, encryptor, nifiProperties); this.clusterCoordinator = clusterCoordinator; if (clusterCoordinator != null) { clusterCoordinator.setFlowService(this); } this.revisionManager = revisionManager; this.authorizer = authorizer; if (configuredForClustering) { this.configuredForClustering = configuredForClustering; this.senderListener = senderListener; senderListener.addHandler(this); final InetSocketAddress nodeApiAddress = nifiProperties.getNodeApiAddress(); final InetSocketAddress nodeSocketAddress = nifiProperties.getClusterNodeProtocolAddress(); String nodeUuid = null; final StateManager stateManager = controller.getStateManagerProvider() .getStateManager(CLUSTER_NODE_CONFIG); if (stateManager != null) { nodeUuid = stateManager.getState(Scope.LOCAL).get(NODE_UUID); } if (nodeUuid == null) { nodeUuid = UUID.randomUUID().toString(); } // use a random UUID as the proposed node identifier this.nodeId = new NodeIdentifier(nodeUuid, nodeApiAddress.getHostName(), nodeApiAddress.getPort(), nodeSocketAddress.getHostName(), nodeSocketAddress.getPort(), nifiProperties.getRemoteInputHost(), nifiProperties.getRemoteInputPort(), nifiProperties.getRemoteInputHttpPort(), nifiProperties.isSiteToSiteSecure()); } else { this.configuredForClustering = false; this.senderListener = null; } }
From source file:common.NameNode.java
/** * Initialize name-node./*from w w w. j a v a2 s . c om*/ * * @param conf the configuration */ protected void initialize(Configuration conf) throws IOException { InetSocketAddress socAddr = getRpcServerAddress(conf); int handlerCount = conf.getInt("dfs.namenode.handler.count", 10); // set service-level authorization security policy if (serviceAuthEnabled = conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider()); } NameNode.initMetrics(conf, this.getRole()); loadNamesystem(conf); // create rpc server this.server = RPC.getServer(NamenodeProtocols.class, this, socAddr.getHostName(), socAddr.getPort(), handlerCount, false, conf, namesystem.getDelegationTokenSecretManager()); // The rpc-server port can be ephemeral... ensure we have the correct info this.rpcAddress = this.server.getListenerAddress(); setRpcServerAddress(conf); activate(conf); LOG.info(getRole() + " up at: " + rpcAddress); }
From source file:org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.java
public NameNodeRpcServer(Configuration conf, NameNode nn) throws IOException { this.nn = nn; this.namesystem = nn.getNamesystem(); this.metrics = NameNode.getNameNodeMetrics(); int handlerCount = conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY, DFS_NAMENODE_HANDLER_COUNT_DEFAULT); RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class); ClientNamenodeProtocolServerSideTranslatorPB clientProtocolServerTranslator = new ClientNamenodeProtocolServerSideTranslatorPB( this); BlockingService clientNNPbService = ClientNamenodeProtocol .newReflectiveBlockingService(clientProtocolServerTranslator); DatanodeProtocolServerSideTranslatorPB dnProtoPbTranslator = new DatanodeProtocolServerSideTranslatorPB( this); BlockingService dnProtoPbService = DatanodeProtocolService .newReflectiveBlockingService(dnProtoPbTranslator); NamenodeProtocolServerSideTranslatorPB namenodeProtocolXlator = new NamenodeProtocolServerSideTranslatorPB( this); BlockingService NNPbService = NamenodeProtocolService.newReflectiveBlockingService(namenodeProtocolXlator); RefreshAuthorizationPolicyProtocolServerSideTranslatorPB refreshAuthPolicyXlator = new RefreshAuthorizationPolicyProtocolServerSideTranslatorPB( this); BlockingService refreshAuthService = RefreshAuthorizationPolicyProtocolService .newReflectiveBlockingService(refreshAuthPolicyXlator); RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator = new RefreshUserMappingsProtocolServerSideTranslatorPB( this); BlockingService refreshUserMappingService = RefreshUserMappingsProtocolService .newReflectiveBlockingService(refreshUserMappingXlator); RefreshCallQueueProtocolServerSideTranslatorPB refreshCallQueueXlator = new RefreshCallQueueProtocolServerSideTranslatorPB( this); BlockingService refreshCallQueueService = RefreshCallQueueProtocolService .newReflectiveBlockingService(refreshCallQueueXlator); GenericRefreshProtocolServerSideTranslatorPB genericRefreshXlator = new GenericRefreshProtocolServerSideTranslatorPB( this); BlockingService genericRefreshService = GenericRefreshProtocolService .newReflectiveBlockingService(genericRefreshXlator); GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = new GetUserMappingsProtocolServerSideTranslatorPB( this); BlockingService getUserMappingService = GetUserMappingsProtocolService .newReflectiveBlockingService(getUserMappingXlator); HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator = new HAServiceProtocolServerSideTranslatorPB( this); BlockingService haPbService = HAServiceProtocolService .newReflectiveBlockingService(haServiceProtocolXlator); TraceAdminProtocolServerSideTranslatorPB traceAdminXlator = new TraceAdminProtocolServerSideTranslatorPB( this); BlockingService traceAdminService = TraceAdminService.newReflectiveBlockingService(traceAdminXlator); WritableRpcEngine.ensureInitialized(); InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf); if (serviceRpcAddr != null) { String bindHost = nn.getServiceRpcServerBindHost(conf); if (bindHost == null) { bindHost = serviceRpcAddr.getHostName(); }/*from w ww.j a v a 2s . c o m*/ LOG.info("Service RPC server is binding to " + bindHost + ":" + serviceRpcAddr.getPort()); int serviceHandlerCount = conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY, DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT); this.serviceRpcServer = new RPC.Builder(conf) .setProtocol(org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class) .setInstance(clientNNPbService).setBindAddress(bindHost).setPort(serviceRpcAddr.getPort()) .setNumHandlers(serviceHandlerCount).setVerbose(false) .setSecretManager(namesystem.getDelegationTokenSecretManager()).build(); // Add all the RPC protocols that the namenode implements DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService, serviceRpcServer); DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService, serviceRpcServer); DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService, serviceRpcServer); DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class, refreshAuthService, serviceRpcServer); DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, refreshUserMappingService, serviceRpcServer); // We support Refreshing call queue here in case the client RPC queue is full DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class, refreshCallQueueService, serviceRpcServer); DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class, genericRefreshService, serviceRpcServer); DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, getUserMappingService, serviceRpcServer); DFSUtil.addPBProtocol(conf, TraceAdminProtocolPB.class, traceAdminService, serviceRpcServer); // Update the address with the correct port InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress(); serviceRPCAddress = new InetSocketAddress(serviceRpcAddr.getHostName(), listenAddr.getPort()); nn.setRpcServiceServerAddress(conf, serviceRPCAddress); } else { serviceRpcServer = null; serviceRPCAddress = null; } InetSocketAddress rpcAddr = nn.getRpcServerAddress(conf); String bindHost = nn.getRpcServerBindHost(conf); if (bindHost == null) { bindHost = rpcAddr.getHostName(); } LOG.info("RPC server is binding to " + bindHost + ":" + rpcAddr.getPort()); this.clientRpcServer = new RPC.Builder(conf) .setProtocol(org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class) .setInstance(clientNNPbService).setBindAddress(bindHost).setPort(rpcAddr.getPort()) .setNumHandlers(handlerCount).setVerbose(false) .setSecretManager(namesystem.getDelegationTokenSecretManager()).build(); // Add all the RPC protocols that the namenode implements DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService, clientRpcServer); DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService, clientRpcServer); DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService, clientRpcServer); DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class, refreshAuthService, clientRpcServer); DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, refreshUserMappingService, clientRpcServer); DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class, refreshCallQueueService, clientRpcServer); DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class, genericRefreshService, clientRpcServer); DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, getUserMappingService, clientRpcServer); DFSUtil.addPBProtocol(conf, TraceAdminProtocolPB.class, traceAdminService, clientRpcServer); // set service-level authorization security policy if (serviceAuthEnabled = conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); if (serviceRpcServer != null) { serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); } } // The rpc-server port can be ephemeral... ensure we have the correct info InetSocketAddress listenAddr = clientRpcServer.getListenerAddress(); clientRpcAddress = new InetSocketAddress(rpcAddr.getHostName(), listenAddr.getPort()); nn.setRpcServerAddress(conf, clientRpcAddress); minimumDataNodeVersion = conf.get(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT); // Set terse exception whose stack trace won't be logged this.clientRpcServer.addTerseExceptions(SafeModeException.class, FileNotFoundException.class, HadoopIllegalArgumentException.class, FileAlreadyExistsException.class, InvalidPathException.class, ParentNotDirectoryException.class, UnresolvedLinkException.class, AlreadyBeingCreatedException.class, QuotaExceededException.class, RecoveryInProgressException.class, AccessControlException.class, InvalidToken.class, LeaseExpiredException.class, NSQuotaExceededException.class, DSQuotaExceededException.class, AclException.class, FSLimitException.PathComponentTooLongException.class, FSLimitException.MaxDirectoryItemsExceededException.class, UnresolvedPathException.class); }