List of usage examples for io.netty.buffer PooledByteBufAllocator DEFAULT
PooledByteBufAllocator DEFAULT
To view the source code for io.netty.buffer PooledByteBufAllocator DEFAULT.
Click Source Link
From source file:org.ratpackframework.bootstrap.internal.NettyRatpackService.java
License:Apache License
@Override protected void startUp() throws Exception { ServerBootstrap bootstrap = new ServerBootstrap(); group = new NioEventLoopGroup(MultithreadEventLoopGroup.DEFAULT_EVENT_LOOP_THREADS, new DefaultThreadFactory("ratpack-group", Thread.MAX_PRIORITY)); bootstrap.group(group).channel(NioServerSocketChannel.class).childHandler(channelInitializer); bootstrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); bootstrap.childOption(ChannelOption.TCP_NODELAY, true); bootstrap.option(ChannelOption.SO_REUSEADDR, true); bootstrap.option(ChannelOption.SO_BACKLOG, 1024); channel = bootstrap.bind(requestedAddress).sync().channel(); boundAddress = (InetSocketAddress) channel.localAddress(); if (logger.isLoggable(Level.INFO)) { logger.info(String.format("Ratpack started for http://%s:%s", getBindHost(), getBindPort())); }/* ww w .jav a 2 s . c o m*/ }
From source file:org.restcomm.imscf.common.lwcomm.service.impl.LwCommListener.java
License:Open Source License
void start() { // TODO: handle AS-resolved pools int receiveTransportThreads = config.getReceiveTransportPoolConfig().getMaxThreads(); int receiveWorkerThreads = config.getReceiveWorkerPoolConfig().getMaxThreads(); // Netty 4.0 does not handle parallel UDP servers well. // See: https://github.com/netty/netty/issues/1706 // We differentiate two listener modes: //// ww w .j av a 2 s . c o m // a) NIO // ------ // In this case a simple NioEventLoopGroup is used. The NioEventLoopGroup is given // "receiveTransportThreads" number of threads. User listener will be called // in a different executor which has receiveWorkerThreads number of threads. // This does not work well with netty 4.0 but still implemented here // in case of it will be fixed in a future netty version (the problem is // that regardless of setting the nThreads parameter in NioEventLoopGroup only // one thread is used for incoming packet processing...). // // c) EPOLL // -------- // The solution offered in the link above: // 1) Use the epoll transport (Linux only) // 2) Turn on SO_REUSEPORT option // 3) Create multiple datagram channels bound to the same port // According to this: http://stackoverflow.com/questions/3261965/so-reuseport-on-linux // only works on Linux with kernel 3.9+ or RHEL 6.5+ -- if epoll is not available, // it falls back to NIO mode. LwCommServiceImpl.LOGGER.info( "Starting LwCommListener. Receive transport threads: {}, receive worker threads: {}", receiveTransportThreads, receiveWorkerThreads); Configuration.ListenerMode listenerMode = config.getListenerMode(); LwCommServiceImpl.LOGGER.info("Listener mode configured is {}", config.getListenerMode()); if (listenerMode == Configuration.ListenerMode.EPOLL && !Epoll.isAvailable()) { LwCommServiceImpl.LOGGER .warn("Listener mode EPOLL is configured but is not available. Falling back to NIO mode."); listenerMode = Configuration.ListenerMode.NIO; } Bootstrap b = new Bootstrap(); b.group(receiveTransportGroup); if (receiveTransportGroup instanceof EpollEventLoopGroup) { b.channel(EpollDatagramChannel.class); b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); b.option(EpollChannelOption.SO_REUSEPORT, true); } else { b.channel(NioDatagramChannel.class); b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); } channels = new HashSet<Channel>(); b.handler(new ChannelInitializer<DatagramChannel>() { protected void initChannel(DatagramChannel channel) throws Exception { LwCommServiceImpl.LOGGER.info("Initializing channel: '{}'", channel); channels.add(channel); channel.pipeline().addLast(channelHandler); } }); // TODO FIXME: hardcoded 256K limit for receive buffer! b.option(ChannelOption.SO_RCVBUF, 256 * 1024); b.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(10240)); InetAddress host = null; int port = config.getLocalNode().getPort(); try { host = InetAddress.getByName(config.getLocalNode().getHost()); ChannelFuture future; if (listenerMode == ListenerMode.NIO) { future = b.bind(host, port).sync(); if (!future.isSuccess()) { LwCommServiceImpl.LOGGER.error("Error while binding socket to {}:{}", host, port); } else { LwCommServiceImpl.LOGGER.info("Binding socket to {}:{} - SUCCESS", host, port); } } else { for (int i = 0; i < receiveTransportThreads; i++) { future = b.bind(host, port).sync(); if (!future.isSuccess()) { LwCommServiceImpl.LOGGER.error("Error while binding {} of {} socket to {}:{}", i + 1, receiveTransportThreads, host, port); } else { LwCommServiceImpl.LOGGER.info("Successfully bound socket {} of {} to {}:{} - ", i + 1, receiveTransportThreads, host, port, future.channel()); } } } } catch (Exception e) { LwCommServiceImpl.LOGGER.error("Error while binding socket or getting local node address.", e); } }
From source file:org.restnext.server.Server.java
License:Apache License
/** * Starts the server./*from w ww . ja va2s. c om*/ */ public void start() { loadAndPrintBanner(); try { InetSocketAddress bindAddress = serverInitializer.getBindAddress(); ServerBootstrap serverBootstrap = Epoll.isAvailable() ? newEpoolServerBootstrap() : newNioServerBootstrap(); ChannelFuture channelFuture = serverBootstrap //.handler(new LoggingHandler(LogLevel.INFO)) .childHandler(serverInitializer) .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT).bind(bindAddress).sync(); LOGGER.info("Application is running at - {}://{}", serverInitializer.isSslConfigured() ? "https" : "http", bindAddress); channelFuture.channel().closeFuture().sync(); } catch (Exception e) { throw new ServerException("Could not start the server", e); } finally { stop(); } }
From source file:org.reveno.atp.core.channel.NettyBasedBuffer.java
License:Apache License
public NettyBasedBuffer(int length, boolean direct) { this.buffer = direct ? PooledByteBufAllocator.DEFAULT.directBuffer(length) : PooledByteBufAllocator.DEFAULT.buffer(length); this.buffer.order(ByteOrder.BIG_ENDIAN); }
From source file:org.reveno.atp.core.channel.NettyBasedBuffer.java
License:Apache License
public NettyBasedBuffer(int length, int maxLength, boolean direct) { this.buffer = direct ? PooledByteBufAllocator.DEFAULT.directBuffer(length, maxLength) : PooledByteBufAllocator.DEFAULT.buffer(length, maxLength); this.buffer.order(ByteOrder.BIG_ENDIAN); }
From source file:org.springframework.boot.rsocket.netty.NettyRSocketServerFactoryTests.java
License:Apache License
private RSocketRequester.Builder createRSocketRequesterBuilder() { RSocketStrategies strategies = RSocketStrategies.builder().decoder(StringDecoder.allMimeTypes()) .encoder(CharSequenceEncoder.allMimeTypes()) .dataBufferFactory(new NettyDataBufferFactory(PooledByteBufAllocator.DEFAULT)).build(); return RSocketRequester.builder().rsocketStrategies(strategies); }
From source file:org.springframework.core.io.buffer.LeakAwareDataBufferFactory.java
License:Apache License
/** * Creates a new {@code LeakAwareDataBufferFactory} by wrapping a * {@link DefaultDataBufferFactory}./* ww w. ja va 2s.c om*/ */ public LeakAwareDataBufferFactory() { this(new NettyDataBufferFactory(PooledByteBufAllocator.DEFAULT)); }
From source file:org.springframework.messaging.rsocket.annotation.support.DefaultMetadataExtractorTests.java
License:Apache License
@Before public void setUp() { this.strategies = RSocketStrategies.builder().decoder(StringDecoder.allMimeTypes()) .encoder(CharSequenceEncoder.allMimeTypes()) .dataBufferFactory(new LeakAwareNettyDataBufferFactory(PooledByteBufAllocator.DEFAULT)).build(); this.rsocket = BDDMockito.mock(RSocket.class); this.captor = ArgumentCaptor.forClass(Payload.class); BDDMockito.when(this.rsocket.fireAndForget(captor.capture())).thenReturn(Mono.empty()); this.extractor = new DefaultMetadataExtractor(this.strategies); }
From source file:org.springframework.messaging.rsocket.DefaultMetadataExtractorTests.java
License:Apache License
@BeforeEach public void setUp() { DataBufferFactory bufferFactory = new LeakAwareNettyDataBufferFactory(PooledByteBufAllocator.DEFAULT); this.strategies = RSocketStrategies.builder().dataBufferFactory(bufferFactory).build(); this.extractor = new DefaultMetadataExtractor(StringDecoder.allMimeTypes()); }
From source file:org.starnub.starbounddata.packets.Packet.java
License:Open Source License
/** * Recommended: For internal use with StarNub Player Sessions * <p>/*from w w w.ja va 2s .c om*/ * Uses: This method will write to a {@link io.netty.buffer.ByteBuf} using this packets fields * <p> * * @return ByteBuf representing the ByteBuf to write to socket */ public ByteBuf packetToMessageEncoder() { ByteBuf msgOut = PooledByteBufAllocator.DEFAULT.directBuffer(); this.write(msgOut); int payloadLengthOut = msgOut.readableBytes(); byte[] dataOut; if (payloadLengthOut > 100) { dataOut = Zlib.compress(msgOut.readBytes(payloadLengthOut).array()); payloadLengthOut = -dataOut.length; } else { dataOut = msgOut.readBytes(payloadLengthOut).array(); } msgOut.clear(); msgOut.writeByte(PACKET_ID); writeSVLQPacketEncoder(msgOut, payloadLengthOut); msgOut.writeBytes(dataOut); return msgOut; }