List of usage examples for io.netty.channel ChannelOption SO_KEEPALIVE
ChannelOption SO_KEEPALIVE
To view the source code for io.netty.channel ChannelOption SO_KEEPALIVE.
Click Source Link
From source file:org.apache.flink.runtime.io.network.netty.NettyClient.java
License:Apache License
void init(final NettyProtocol protocol, NettyBufferPool nettyBufferPool) throws IOException { checkState(bootstrap == null, "Netty client has already been initialized."); long start = System.currentTimeMillis(); bootstrap = new Bootstrap(); // -------------------------------------------------------------------- // Transport-specific configuration // -------------------------------------------------------------------- switch (config.getTransportType()) { case NIO://w ww .j ava 2s.c o m initNioBootstrap(); break; case EPOLL: initEpollBootstrap(); break; case AUTO: if (Epoll.isAvailable()) { initEpollBootstrap(); LOG.info("Transport type 'auto': using EPOLL."); } else { initNioBootstrap(); LOG.info("Transport type 'auto': using NIO."); } } // -------------------------------------------------------------------- // Configuration // -------------------------------------------------------------------- bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.option(ChannelOption.SO_KEEPALIVE, true); // Timeout for new connections bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getClientConnectTimeoutSeconds() * 1000); // Pooled allocator for Netty's ByteBuf instances bootstrap.option(ChannelOption.ALLOCATOR, nettyBufferPool); // Receive and send buffer size int receiveAndSendBufferSize = config.getSendAndReceiveBufferSize(); if (receiveAndSendBufferSize > 0) { bootstrap.option(ChannelOption.SO_SNDBUF, receiveAndSendBufferSize); bootstrap.option(ChannelOption.SO_RCVBUF, receiveAndSendBufferSize); } // -------------------------------------------------------------------- // Child channel pipeline for accepted connections // -------------------------------------------------------------------- bootstrap.handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { channel.pipeline().addLast(protocol.getClientChannelHandlers()); } }); long end = System.currentTimeMillis(); LOG.info("Successful initialization (took {} ms).", (end - start)); }
From source file:org.apache.flink.runtime.io.network.netty.NettyConnectionManager.java
License:Apache License
@Override public void start(ChannelManager channelManager) throws IOException { LOG.info(String.format("Starting with %d incoming and %d outgoing connection threads.", numInThreads, numOutThreads));//from w ww .jav a 2 s . c om LOG.info(String.format("Setting low water mark to %d and high water mark to %d bytes.", lowWaterMark, highWaterMark)); LOG.info(String.format("Close channels after idle for %d ms.", closeAfterIdleForMs)); final BufferProviderBroker bufferProviderBroker = channelManager; final EnvelopeDispatcher envelopeDispatcher = channelManager; int numHeapArenas = 0; int numDirectArenas = numInThreads + numOutThreads; int pageSize = bufferSize << 1; int chunkSize = 16 << 20; // 16 MB // shift pageSize maxOrder times to get to chunkSize int maxOrder = (int) (Math.log(chunkSize / pageSize) / Math.log(2)); PooledByteBufAllocator pooledByteBufAllocator = new PooledByteBufAllocator(true, numHeapArenas, numDirectArenas, pageSize, maxOrder); String msg = String.format( "Instantiated PooledByteBufAllocator with direct arenas: %d, heap arenas: %d, " + "page size (bytes): %d, chunk size (bytes): %d.", numDirectArenas, numHeapArenas, pageSize, (pageSize << maxOrder)); LOG.info(msg); // -------------------------------------------------------------------- // server bootstrap (incoming connections) // -------------------------------------------------------------------- in = new ServerBootstrap(); in.group(new NioEventLoopGroup(numInThreads)).channel(NioServerSocketChannel.class) .localAddress(bindAddress, bindPort).childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { channel.pipeline().addLast(new InboundEnvelopeDecoder(bufferProviderBroker)) .addLast(new InboundEnvelopeDispatcher(envelopeDispatcher)); } }).option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(pageSize)) .option(ChannelOption.ALLOCATOR, pooledByteBufAllocator); // -------------------------------------------------------------------- // client bootstrap (outgoing connections) // -------------------------------------------------------------------- out = new Bootstrap(); out.group(new NioEventLoopGroup(numOutThreads)).channel(NioSocketChannel.class) .handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { channel.pipeline().addLast(new OutboundEnvelopeEncoder()); } }).option(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, lowWaterMark) .option(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, highWaterMark) .option(ChannelOption.ALLOCATOR, pooledByteBufAllocator).option(ChannelOption.TCP_NODELAY, false) .option(ChannelOption.SO_KEEPALIVE, true); try { in.bind().sync(); } catch (InterruptedException e) { throw new IOException(e); } if (LOG.isDebugEnabled()) { new Thread(new Runnable() { @Override public void run() { Date date = new Date(); while (true) { try { Thread.sleep(DEBUG_PRINT_QUEUED_ENVELOPES_EVERY_MS); date.setTime(System.currentTimeMillis()); System.out.println(date); System.out.println(getNonZeroNumQueuedEnvelopes()); } catch (InterruptedException e) { e.printStackTrace(); } } } }).start(); } }
From source file:org.apache.flink.runtime.io.network.netty.OutboundConnectionQueueTest.java
License:Apache License
/** * Verifies that concurrent enqueue and close events are handled * correctly./*from w w w. j a v a 2s . c o m*/ */ private void doTestConcurrentEnqueueAndClose(final int numProducers, final int numEnvelopesPerProducer, final int minSleepTimeMs, final int maxSleepTimeMs) throws Exception { final InetAddress bindHost = InetAddress.getLocalHost(); final int bindPort = 20000; // Testing concurrent enqueue and close requires real TCP channels, // because Netty's testing EmbeddedChannel does not implement the // same threading model as the NioEventLoopGroup (for example there // is no difference between being IN and OUTSIDE of the event loop // thread). final ServerBootstrap in = new ServerBootstrap(); in.group(new NioEventLoopGroup(1)).channel(NioServerSocketChannel.class).localAddress(bindHost, bindPort) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { channel.pipeline().addLast(new ChannelInboundHandlerAdapter()); } }); final Bootstrap out = new Bootstrap(); out.group(new NioEventLoopGroup(1)).channel(NioSocketChannel.class) .handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { channel.pipeline().addLast(new ChannelOutboundHandlerAdapter()); } }).option(ChannelOption.TCP_NODELAY, false).option(ChannelOption.SO_KEEPALIVE, true); in.bind().sync(); // -------------------------------------------------------------------- // The testing pipeline looks as follows: // - Test Verification Handler [OUT] // - Test Control Handler [IN] // - Idle State Handler [IN/OUT] [added by OutboundConnectionQueue] // - Outbound queue (SUT) [IN] [added by OutboundConnectionQueue] channel = out.connect(bindHost, bindPort).sync().channel(); queue = new OutboundConnectionQueue(channel, receiver, connectionManager, 0); controller = new TestControlHandler(true); verifier = new TestVerificationHandler(); channel.pipeline().addFirst("Test Control Handler", controller); channel.pipeline().addFirst("Test Verification Handler", verifier); // -------------------------------------------------------------------- final Random rand = new Random(RANDOM_SEED); // Every producer works on their local reference of the queue and only // updates it to the new channel when enqueue returns false, which // should only happen if the channel has been closed. final ConcurrentMap<ChannelID, OutboundConnectionQueue> producerQueues = new ConcurrentHashMap<ChannelID, OutboundConnectionQueue>(); final ChannelID[] ids = new ChannelID[numProducers]; for (int i = 0; i < numProducers; i++) { ids[i] = new ChannelID(); producerQueues.put(ids[i], queue); } final CountDownLatch receivedAllEnvelopesLatch = verifier.waitForEnvelopes(numEnvelopesPerProducer - 1, ids); final List<Channel> closedChannels = new ArrayList<Channel>(); // -------------------------------------------------------------------- final Runnable closer = new Runnable() { @Override public void run() { while (receivedAllEnvelopesLatch.getCount() != 0) { try { controller.fireIdle(); // Test two idle events arriving "closely" // after each other if (rand.nextBoolean()) { controller.fireIdle(); } Thread.sleep(minSleepTimeMs / 2); } catch (InterruptedException e) { e.printStackTrace(); } } } }; final Runnable[] producers = new Runnable[numProducers]; for (int i = 0; i < numProducers; i++) { final int index = i; producers[i] = new Runnable() { @Override public void run() { final JobID jid = new JobID(); final ChannelID cid = ids[index]; for (int j = 0; j < numEnvelopesPerProducer; j++) { OutboundConnectionQueue localQueue = producerQueues.get(cid); try { // This code path is handled by the NetworkConnectionManager // in production to enqueue the envelope either to the current // channel or a new one if it was closed. while (!localQueue.enqueue(new Envelope(j, jid, cid))) { synchronized (lock) { if (localQueue == queue) { closedChannels.add(channel); channel = out.connect(bindHost, bindPort).sync().channel(); queue = new OutboundConnectionQueue(channel, receiver, connectionManager, 0); channel.pipeline().addFirst("Test Control Handler", controller); channel.pipeline().addFirst("Test Verification Handler", verifier); } } producerQueues.put(cid, queue); localQueue = queue; } int sleepTime = rand.nextInt((maxSleepTimeMs - minSleepTimeMs) + 1) + minSleepTimeMs; Thread.sleep(sleepTime); } catch (InterruptedException e) { throw new RuntimeException(e); } } } }; } for (int i = 0; i < numProducers; i++) { new Thread(producers[i], "Producer " + i).start(); } new Thread(closer, "Closer").start(); // -------------------------------------------------------------------- while (receivedAllEnvelopesLatch.getCount() != 0) { receivedAllEnvelopesLatch.await(); } // Final close, if the last close didn't make it. synchronized (lock) { if (channel != null) { controller.fireIdle(); } } verifier.waitForClose(); // If the producers do not sleep after each envelope, the close // should not make it through and no channel should have been // added to the list of closed channels if (minSleepTimeMs == 0 && maxSleepTimeMs == 0) { Assert.assertEquals(0, closedChannels.size()); } for (Channel ch : closedChannels) { Assert.assertFalse(ch.isOpen()); } System.out.println(closedChannels.size() + " channels were closed during execution."); out.group().shutdownGracefully().sync(); in.group().shutdownGracefully().sync(); }
From source file:org.apache.geode.redis.GeodeRedisServer.java
License:Apache License
/** * Helper method to start the server listening for connections. The server is bound to the port * specified by {@link GeodeRedisServer#serverPort} * /*www . j a v a 2s . c o m*/ * @throws IOException * @throws InterruptedException */ private void startRedisServer() throws IOException, InterruptedException { ThreadFactory selectorThreadFactory = new ThreadFactory() { private final AtomicInteger counter = new AtomicInteger(); @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setName("GeodeRedisServer-SelectorThread-" + counter.incrementAndGet()); t.setDaemon(true); return t; } }; ThreadFactory workerThreadFactory = new ThreadFactory() { private final AtomicInteger counter = new AtomicInteger(); @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setName("GeodeRedisServer-WorkerThread-" + counter.incrementAndGet()); return t; } }; bossGroup = null; workerGroup = null; Class<? extends ServerChannel> socketClass = null; if (singleThreadPerConnection) { bossGroup = new OioEventLoopGroup(Integer.MAX_VALUE, selectorThreadFactory); workerGroup = new OioEventLoopGroup(Integer.MAX_VALUE, workerThreadFactory); socketClass = OioServerSocketChannel.class; } else { bossGroup = new NioEventLoopGroup(this.numSelectorThreads, selectorThreadFactory); workerGroup = new NioEventLoopGroup(this.numWorkerThreads, workerThreadFactory); socketClass = NioServerSocketChannel.class; } InternalDistributedSystem system = (InternalDistributedSystem) cache.getDistributedSystem(); String pwd = system.getConfig().getRedisPassword(); final byte[] pwdB = Coder.stringToBytes(pwd); ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(socketClass).childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { if (logger.fineEnabled()) logger.fine("GeodeRedisServer-Connection established with " + ch.remoteAddress()); ChannelPipeline p = ch.pipeline(); p.addLast(ByteToCommandDecoder.class.getSimpleName(), new ByteToCommandDecoder()); p.addLast(ExecutionHandlerContext.class.getSimpleName(), new ExecutionHandlerContext(ch, cache, regionCache, GeodeRedisServer.this, pwdB)); } }).option(ChannelOption.SO_REUSEADDR, true).option(ChannelOption.SO_RCVBUF, getBufferSize()) .childOption(ChannelOption.SO_KEEPALIVE, true) .childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, GeodeRedisServer.connectTimeoutMillis) .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); // Bind and start to accept incoming connections. ChannelFuture f = b.bind(new InetSocketAddress(getBindAddress(), serverPort)).sync(); if (this.logger.infoEnabled()) { String logMessage = "GeodeRedisServer started {" + getBindAddress() + ":" + serverPort + "}, Selector threads: " + this.numSelectorThreads; if (this.singleThreadPerConnection) logMessage += ", One worker thread per connection"; else logMessage += ", Worker threads: " + this.numWorkerThreads; this.logger.info(logMessage); } this.serverChannel = f.channel(); }
From source file:org.apache.giraph.comm.netty.NettyClient.java
License:Apache License
/** * Only constructor//from ww w . j a va2s.c o m * * @param context Context for progress * @param conf Configuration * @param myTaskInfo Current task info * @param exceptionHandler handler for uncaught exception. Will * terminate job. */ public NettyClient(Mapper<?, ?, ?, ?>.Context context, final ImmutableClassesGiraphConfiguration conf, TaskInfo myTaskInfo, final Thread.UncaughtExceptionHandler exceptionHandler) { this.context = context; this.myTaskInfo = myTaskInfo; this.channelsPerServer = GiraphConstants.CHANNELS_PER_SERVER.get(conf); sendBufferSize = CLIENT_SEND_BUFFER_SIZE.get(conf); receiveBufferSize = CLIENT_RECEIVE_BUFFER_SIZE.get(conf); limitNumberOfOpenRequests = conf.getBoolean(LIMIT_NUMBER_OF_OPEN_REQUESTS, LIMIT_NUMBER_OF_OPEN_REQUESTS_DEFAULT); if (limitNumberOfOpenRequests) { maxNumberOfOpenRequests = conf.getInt(MAX_NUMBER_OF_OPEN_REQUESTS, MAX_NUMBER_OF_OPEN_REQUESTS_DEFAULT); if (LOG.isInfoEnabled()) { LOG.info("NettyClient: Limit number of open requests to " + maxNumberOfOpenRequests); } } else { maxNumberOfOpenRequests = -1; } maxRequestMilliseconds = MAX_REQUEST_MILLISECONDS.get(conf); maxConnectionFailures = NETTY_MAX_CONNECTION_FAILURES.get(conf); waitingRequestMsecs = WAITING_REQUEST_MSECS.get(conf); maxPoolSize = GiraphConstants.NETTY_CLIENT_THREADS.get(conf); maxResolveAddressAttempts = MAX_RESOLVE_ADDRESS_ATTEMPTS.get(conf); clientRequestIdRequestInfoMap = new MapMaker().concurrencyLevel(maxPoolSize).makeMap(); handlerToUseExecutionGroup = NETTY_CLIENT_EXECUTION_AFTER_HANDLER.get(conf); useExecutionGroup = NETTY_CLIENT_USE_EXECUTION_HANDLER.get(conf); if (useExecutionGroup) { int executionThreads = NETTY_CLIENT_EXECUTION_THREADS.get(conf); executionGroup = new DefaultEventExecutorGroup(executionThreads, ThreadUtils.createThreadFactory("netty-client-exec-%d", exceptionHandler)); if (LOG.isInfoEnabled()) { LOG.info("NettyClient: Using execution handler with " + executionThreads + " threads after " + handlerToUseExecutionGroup + "."); } } else { executionGroup = null; } workerGroup = new NioEventLoopGroup(maxPoolSize, ThreadUtils.createThreadFactory("netty-client-worker-%d", exceptionHandler)); bootstrap = new Bootstrap(); bootstrap.group(workerGroup).channel(NioSocketChannel.class) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, MAX_CONNECTION_MILLISECONDS_DEFAULT) .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_KEEPALIVE, true) .option(ChannelOption.SO_SNDBUF, sendBufferSize).option(ChannelOption.SO_RCVBUF, receiveBufferSize) .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator()) .handler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { /*if_not[HADOOP_NON_SECURE]*/ if (conf.authenticate()) { LOG.info("Using Netty with authentication."); // Our pipeline starts with just byteCounter, and then we use // addLast() to incrementally add pipeline elements, so that we // can name them for identification for removal or replacement // after client is authenticated by server. // After authentication is complete, the pipeline's SASL-specific // functionality is removed, restoring the pipeline to exactly the // same configuration as it would be without authentication. PipelineUtils.addLastWithExecutorCheck("clientInboundByteCounter", inboundByteCounter, handlerToUseExecutionGroup, executionGroup, ch); if (conf.doCompression()) { PipelineUtils.addLastWithExecutorCheck("compressionDecoder", conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch); } PipelineUtils.addLastWithExecutorCheck("clientOutboundByteCounter", outboundByteCounter, handlerToUseExecutionGroup, executionGroup, ch); if (conf.doCompression()) { PipelineUtils.addLastWithExecutorCheck("compressionEncoder", conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch); } // The following pipeline component is needed to decode the // server's SASL tokens. It is replaced with a // FixedLengthFrameDecoder (same as used with the // non-authenticated pipeline) after authentication // completes (as in non-auth pipeline below). PipelineUtils.addLastWithExecutorCheck("length-field-based-frame-decoder", new LengthFieldBasedFrameDecoder(1024, 0, 4, 0, 4), handlerToUseExecutionGroup, executionGroup, ch); PipelineUtils.addLastWithExecutorCheck("request-encoder", new RequestEncoder(conf), handlerToUseExecutionGroup, executionGroup, ch); // The following pipeline component responds to the server's SASL // tokens with its own responses. Both client and server share the // same Hadoop Job token, which is used to create the SASL // tokens to authenticate with each other. // After authentication finishes, this pipeline component // is removed. PipelineUtils.addLastWithExecutorCheck("sasl-client-handler", new SaslClientHandler(conf), handlerToUseExecutionGroup, executionGroup, ch); PipelineUtils.addLastWithExecutorCheck("response-handler", new ResponseClientHandler(clientRequestIdRequestInfoMap, conf), handlerToUseExecutionGroup, executionGroup, ch); } else { LOG.info("Using Netty without authentication."); /*end[HADOOP_NON_SECURE]*/ PipelineUtils.addLastWithExecutorCheck("clientInboundByteCounter", inboundByteCounter, handlerToUseExecutionGroup, executionGroup, ch); if (conf.doCompression()) { PipelineUtils.addLastWithExecutorCheck("compressionDecoder", conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch); } PipelineUtils.addLastWithExecutorCheck("clientOutboundByteCounter", outboundByteCounter, handlerToUseExecutionGroup, executionGroup, ch); if (conf.doCompression()) { PipelineUtils.addLastWithExecutorCheck("compressionEncoder", conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch); } PipelineUtils.addLastWithExecutorCheck("fixed-length-frame-decoder", new FixedLengthFrameDecoder(RequestServerHandler.RESPONSE_BYTES), handlerToUseExecutionGroup, executionGroup, ch); PipelineUtils.addLastWithExecutorCheck("request-encoder", new RequestEncoder(conf), handlerToUseExecutionGroup, executionGroup, ch); PipelineUtils.addLastWithExecutorCheck("response-handler", new ResponseClientHandler(clientRequestIdRequestInfoMap, conf), handlerToUseExecutionGroup, executionGroup, ch); /*if_not[HADOOP_NON_SECURE]*/ } /*end[HADOOP_NON_SECURE]*/ } }); }
From source file:org.apache.giraph.comm.netty.NettyServer.java
License:Apache License
/** * Start the server with the appropriate port */// www . ja v a 2s. co m public void start() { bootstrap = new ServerBootstrap(); bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, tcpBacklog) .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator()) .childOption(ChannelOption.SO_KEEPALIVE, true).childOption(ChannelOption.TCP_NODELAY, true) .childOption(ChannelOption.SO_SNDBUF, sendBufferSize) .childOption(ChannelOption.SO_RCVBUF, receiveBufferSize) .childOption(ChannelOption.ALLOCATOR, conf.getNettyAllocator()) .childOption(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(receiveBufferSize / 4, receiveBufferSize, receiveBufferSize)); /** * Pipeline setup: depends on whether configured to use authentication * or not. */ bootstrap.childHandler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { /*if_not[HADOOP_NON_SECURE]*/ if (conf.authenticate()) { LOG.info("start: Will use Netty pipeline with " + "authentication and authorization of clients."); // After a client authenticates, the two authentication-specific // pipeline components SaslServerHandler and ResponseEncoder are // removed, leaving the pipeline the same as in the non-authenticated // configuration except for the presence of the Authorize component. PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter, handlerToUseExecutionGroup, executionGroup, ch); if (conf.doCompression()) { PipelineUtils.addLastWithExecutorCheck("compressionDecoder", conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch); } PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter, handlerToUseExecutionGroup, executionGroup, ch); if (conf.doCompression()) { PipelineUtils.addLastWithExecutorCheck("compressionEncoder", conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch); } PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder", new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4), handlerToUseExecutionGroup, executionGroup, ch); PipelineUtils.addLastWithExecutorCheck("requestDecoder", new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup, ch); // Removed after authentication completes: PipelineUtils.addLastWithExecutorCheck("saslServerHandler", saslServerHandlerFactory.newHandler(conf), handlerToUseExecutionGroup, executionGroup, ch); PipelineUtils.addLastWithExecutorCheck("authorizeServerHandler", new AuthorizeServerHandler(), handlerToUseExecutionGroup, executionGroup, ch); PipelineUtils.addLastWithExecutorCheck( "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap, conf, myTaskInfo, exceptionHandler), handlerToUseExecutionGroup, executionGroup, ch); // Removed after authentication completes: PipelineUtils.addLastWithExecutorCheck("responseEncoder", new ResponseEncoder(), handlerToUseExecutionGroup, executionGroup, ch); } else { LOG.info("start: Using Netty without authentication."); /*end[HADOOP_NON_SECURE]*/ // Store all connected channels in order to ensure that we can close // them on stop(), or else stop() may hang waiting for the // connections to close on their own ch.pipeline().addLast("connectedChannels", new ChannelInboundHandlerAdapter() { @Override public void channelActive(ChannelHandlerContext ctx) throws Exception { accepted.add(ctx.channel()); ctx.fireChannelActive(); } }); PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter, handlerToUseExecutionGroup, executionGroup, ch); if (conf.doCompression()) { PipelineUtils.addLastWithExecutorCheck("compressionDecoder", conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch); } PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter, handlerToUseExecutionGroup, executionGroup, ch); if (conf.doCompression()) { PipelineUtils.addLastWithExecutorCheck("compressionEncoder", conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch); } PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder", new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4), handlerToUseExecutionGroup, executionGroup, ch); PipelineUtils.addLastWithExecutorCheck("requestDecoder", new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup, ch); PipelineUtils.addLastWithExecutorCheck( "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap, conf, myTaskInfo, exceptionHandler), handlerToUseExecutionGroup, executionGroup, ch); /*if_not[HADOOP_NON_SECURE]*/ } /*end[HADOOP_NON_SECURE]*/ } }); int taskId = conf.getTaskPartition(); int numTasks = conf.getInt("mapred.map.tasks", 1); // Number of workers + 1 for master int numServers = conf.getInt(GiraphConstants.MAX_WORKERS, numTasks) + 1; int portIncrementConstant = (int) Math.pow(10, Math.ceil(Math.log10(numServers))); int bindPort = GiraphConstants.IPC_INITIAL_PORT.get(conf) + taskId; int bindAttempts = 0; final int maxIpcPortBindAttempts = MAX_IPC_PORT_BIND_ATTEMPTS.get(conf); final boolean failFirstPortBindingAttempt = GiraphConstants.FAIL_FIRST_IPC_PORT_BIND_ATTEMPT.get(conf); // Simple handling of port collisions on the same machine while // preserving debugability from the port number alone. // Round up the max number of workers to the next power of 10 and use // it as a constant to increase the port number with. while (bindAttempts < maxIpcPortBindAttempts) { this.myAddress = new InetSocketAddress(localHostname, bindPort); if (failFirstPortBindingAttempt && bindAttempts == 0) { if (LOG.isInfoEnabled()) { LOG.info("start: Intentionally fail first " + "binding attempt as giraph.failFirstIpcPortBindAttempt " + "is true, port " + bindPort); } ++bindAttempts; bindPort += portIncrementConstant; continue; } try { ChannelFuture f = bootstrap.bind(myAddress).sync(); accepted.add(f.channel()); break; } catch (InterruptedException e) { throw new IllegalStateException(e); // CHECKSTYLE: stop IllegalCatchCheck } catch (Exception e) { // CHECKSTYLE: resume IllegalCatchCheck LOG.warn("start: Likely failed to bind on attempt " + bindAttempts + " to port " + bindPort, e.getCause()); ++bindAttempts; bindPort += portIncrementConstant; } } if (bindAttempts == maxIpcPortBindAttempts || myAddress == null) { throw new IllegalStateException( "start: Failed to start NettyServer with " + bindAttempts + " attempts"); } if (LOG.isInfoEnabled()) { LOG.info("start: Started server " + "communication server: " + myAddress + " with up to " + maxPoolSize + " threads on bind attempt " + bindAttempts + " with sendBufferSize = " + sendBufferSize + " receiveBufferSize = " + receiveBufferSize); } }
From source file:org.apache.hadoop.hbase.ipc.AsyncRpcClient.java
License:Apache License
/** * Constructor for tests/* w w w. ja v a 2 s . co m*/ * * @param configuration to HBase * @param clusterId for the cluster * @param localAddress local address to connect to * @param channelInitializer for custom channel handlers */ @VisibleForTesting AsyncRpcClient(Configuration configuration, String clusterId, SocketAddress localAddress, ChannelInitializer<SocketChannel> channelInitializer) { super(configuration, clusterId, localAddress); if (LOG.isDebugEnabled()) { LOG.debug("Starting async Hbase RPC client"); } Pair<EventLoopGroup, Class<? extends Channel>> eventLoopGroupAndChannelClass; this.useGlobalEventLoopGroup = conf.getBoolean(USE_GLOBAL_EVENT_LOOP_GROUP, true); if (useGlobalEventLoopGroup) { eventLoopGroupAndChannelClass = getGlobalEventLoopGroup(configuration); } else { eventLoopGroupAndChannelClass = createEventLoopGroup(configuration); } if (LOG.isDebugEnabled()) { LOG.debug("Use " + (useGlobalEventLoopGroup ? "global" : "individual") + " event loop group " + eventLoopGroupAndChannelClass.getFirst().getClass().getSimpleName()); } this.connections = new PoolMap<>(getPoolType(configuration), getPoolSize(configuration)); this.failedServers = new FailedServers(configuration); int operationTimeout = configuration.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); // Configure the default bootstrap. this.bootstrap = new Bootstrap(); bootstrap.group(eventLoopGroupAndChannelClass.getFirst()).channel(eventLoopGroupAndChannelClass.getSecond()) .option(ChannelOption.TCP_NODELAY, tcpNoDelay).option(ChannelOption.SO_KEEPALIVE, tcpKeepAlive) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, operationTimeout); if (channelInitializer == null) { channelInitializer = DEFAULT_CHANNEL_INITIALIZER; } bootstrap.handler(channelInitializer); if (localAddress != null) { bootstrap.localAddress(localAddress); } }
From source file:org.apache.hadoop.hbase.ipc.NettyRpcConnection.java
License:Apache License
private void connect() { if (LOG.isDebugEnabled()) { LOG.debug("Connecting to " + remoteId.address); }/*ww w .j a v a2 s . c om*/ this.channel = new Bootstrap().group(rpcClient.group).channel(rpcClient.channelClass) .option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay()) .option(ChannelOption.SO_KEEPALIVE, rpcClient.tcpKeepAlive) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, rpcClient.connectTO) .handler(new BufferCallBeforeInitHandler()).localAddress(rpcClient.localAddr) .remoteAddress(remoteId.address).connect().addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { Channel ch = future.channel(); if (!future.isSuccess()) { failInit(ch, toIOE(future.cause())); rpcClient.failedServers.addToFailedServers(remoteId.address); return; } ch.writeAndFlush(connectionHeaderPreamble.retainedDuplicate()); if (useSasl) { saslNegotiate(ch); } else { // send the connection header to server ch.write(connectionHeaderWithLength.retainedDuplicate()); established(ch); } } }).channel(); }
From source file:org.apache.hadoop.hbase.ipc.NettyRpcServer.java
License:Apache License
public NettyRpcServer(final Server server, final String name, final List<BlockingServiceAndInterface> services, final InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler) throws IOException { super(server, name, services, bindAddress, conf, scheduler); this.bindAddress = bindAddress; boolean useEpoll = useEpoll(conf); int workerCount = conf.getInt("hbase.netty.rpc.server.worker.count", Runtime.getRuntime().availableProcessors() / 4); EventLoopGroup bossGroup = null;// w w w . j a v a2 s. c om EventLoopGroup workerGroup = null; if (useEpoll) { bossGroup = new EpollEventLoopGroup(1); workerGroup = new EpollEventLoopGroup(workerCount); } else { bossGroup = new NioEventLoopGroup(1); workerGroup = new NioEventLoopGroup(workerCount); } ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(bossGroup, workerGroup); if (useEpoll) { bootstrap.channel(EpollServerSocketChannel.class); } else { bootstrap.channel(NioServerSocketChannel.class); } bootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay); bootstrap.childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive); bootstrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); bootstrap.childHandler(new Initializer(maxRequestSize)); try { serverChannel = bootstrap.bind(this.bindAddress).sync().channel(); LOG.info("NettyRpcServer bind to address=" + serverChannel.localAddress() + ", hbase.netty.rpc.server.worker.count=" + workerCount + ", useEpoll=" + useEpoll); allChannels.add(serverChannel); } catch (InterruptedException e) { throw new InterruptedIOException(e.getMessage()); } initReconfigurable(conf); this.scheduler.init(new RpcSchedulerContext(this)); }
From source file:org.apache.hadoop.mpich.appmaster.netty.PMIServer.java
License:Apache License
public void start() throws Exception { this.bossGroup = new NioEventLoopGroup(); this.workerGroup = new NioEventLoopGroup(); ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .childHandler(new ServerChannelInitializer(this.manager)) .childOption(ChannelOption.SO_KEEPALIVE, true); // Bind and start to accept incoming connections. this.channel = b.bind(0).sync().channel(); this.portNum = ((NioServerSocketChannel) channel).localAddress().getPort(); }