Example usage for io.netty.channel ChannelOption SO_REUSEADDR

List of usage examples for io.netty.channel ChannelOption SO_REUSEADDR

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption SO_REUSEADDR.

Prototype

ChannelOption SO_REUSEADDR

To view the source code for io.netty.channel ChannelOption SO_REUSEADDR.

Click Source Link

Usage

From source file:io.apigee.trireme.container.netty.NettyFactory.java

License:Open Source License

public ChannelFuture connect(int port, String host, String localHost,
        ChannelInitializer<SocketChannel> pipeline) {
    Bootstrap boot = new Bootstrap();
    boot.group(ioThreads).channel(NioSocketChannel.class).option(ChannelOption.SO_REUSEADDR, true)
            .remoteAddress(host, port).handler(pipeline);
    if (localHost != null) {
        boot.localAddress(localHost, 0);
    }/*w  w  w. j a  v a 2s  .c  om*/
    return boot.connect();
}

From source file:io.apigee.trireme.container.netty.NettyServer.java

License:Open Source License

NettyServer(int port, String host, int backlog, ChannelInitializer<SocketChannel> pipelineFactory) {
    if (host == null) {
        address = new InetSocketAddress(port);
    } else {/* w  ww.j a va 2s . c o  m*/
        address = new InetSocketAddress(host, port);
    }
    bootstrap = new ServerBootstrap();
    bootstrap.group(NettyFactory.get().getAcceptorThreads(), NettyFactory.get().getIOThreads())
            .channel(NioServerSocketChannel.class).option(ChannelOption.SO_REUSEADDR, true)
            .childHandler(pipelineFactory).localAddress(address);

    serverChannel = bootstrap.bind().syncUninterruptibly().channel();
}

From source file:io.atomix.catalyst.transport.netty.NettyServer.java

License:Apache License

/**
 * Starts listening for the given member.
 *///from   ww  w  . j a va 2 s.  c om
private void listen(Address address, Consumer<Connection> listener, ThreadContext context) {
    channelGroup = new DefaultChannelGroup("catalyst-acceptor-channels", GlobalEventExecutor.INSTANCE);

    handler = new ServerHandler(connections, listener, context, transport.properties());

    final ServerBootstrap bootstrap = new ServerBootstrap();
    bootstrap.group(transport.eventLoopGroup()).channel(NioServerSocketChannel.class)
            .handler(new LoggingHandler(LogLevel.DEBUG)).childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel channel) throws Exception {
                    ChannelPipeline pipeline = channel.pipeline();
                    if (transport.properties().sslEnabled()) {
                        pipeline.addFirst(
                                new SslHandler(new NettyTls(transport.properties()).initSslEngine(false)));
                    }
                    pipeline.addLast(FIELD_PREPENDER);
                    pipeline.addLast(new LengthFieldBasedFrameDecoder(transport.properties().maxFrameSize(), 0,
                            4, 0, 4));
                    pipeline.addLast(handler);
                }
            }).option(ChannelOption.SO_BACKLOG, transport.properties().acceptBacklog())
            .option(ChannelOption.TCP_NODELAY, transport.properties().tcpNoDelay())
            .option(ChannelOption.SO_REUSEADDR, transport.properties().reuseAddress())
            .childOption(ChannelOption.ALLOCATOR, ALLOCATOR)
            .childOption(ChannelOption.SO_KEEPALIVE, transport.properties().tcpKeepAlive());

    if (transport.properties().sendBufferSize() != -1) {
        bootstrap.childOption(ChannelOption.SO_SNDBUF, transport.properties().sendBufferSize());
    }
    if (transport.properties().receiveBufferSize() != -1) {
        bootstrap.childOption(ChannelOption.SO_RCVBUF, transport.properties().receiveBufferSize());
    }

    LOGGER.info("Binding to {}", address);

    ChannelFuture bindFuture = bootstrap.bind(address.socketAddress());
    bindFuture.addListener((ChannelFutureListener) channelFuture -> {
        if (channelFuture.isSuccess()) {
            listening = true;
            context.executor().execute(() -> {
                LOGGER.info("Listening at {}", bindFuture.channel().localAddress());
                listenFuture.complete(null);
            });
        } else {
            context.execute(() -> listenFuture.completeExceptionally(channelFuture.cause()));
        }
    });
    channelGroup.add(bindFuture.channel());
}

From source file:io.atomix.catalyst.transport.NettyServer.java

License:Apache License

/**
 * Starts listening for the given member.
 *//*from  www  .j  av  a  2 s .  c  om*/
private void listen(Address address, Consumer<Connection> listener, ThreadContext context) {
    channelGroup = new DefaultChannelGroup("catalyst-acceptor-channels", GlobalEventExecutor.INSTANCE);

    handler = new ServerHandler(connections, listener, context);

    final ServerBootstrap bootstrap = new ServerBootstrap();
    bootstrap.group(eventLoopGroup)
            .channel(eventLoopGroup instanceof EpollEventLoopGroup ? EpollServerSocketChannel.class
                    : NioServerSocketChannel.class)
            .handler(new LoggingHandler(LogLevel.DEBUG)).childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel channel) throws Exception {
                    ChannelPipeline pipeline = channel.pipeline();
                    pipeline.addLast(FIELD_PREPENDER);
                    pipeline.addLast(new LengthFieldBasedFrameDecoder(1024 * 32, 0, 2, 0, 2));
                    pipeline.addLast(handler);
                }
            }).option(ChannelOption.SO_BACKLOG, 128).option(ChannelOption.TCP_NODELAY, true)
            .option(ChannelOption.SO_REUSEADDR, true).childOption(ChannelOption.ALLOCATOR, ALLOCATOR)
            .childOption(ChannelOption.SO_KEEPALIVE, true);

    LOGGER.info("Binding to {}", address);

    ChannelFuture bindFuture = bootstrap.bind(address.socketAddress());
    bindFuture.addListener((ChannelFutureListener) channelFuture -> {
        if (channelFuture.isSuccess()) {
            listening = true;
            context.executor().execute(() -> {
                LOGGER.info("Listening at {}", bindFuture.channel().localAddress());
                listenFuture.complete(null);
            });
        } else {
            context.execute(() -> listenFuture.completeExceptionally(channelFuture.cause()));
        }
    });
    channelGroup.add(bindFuture.channel());
}

From source file:io.atomix.cluster.messaging.impl.NettyBroadcastService.java

License:Apache License

private CompletableFuture<Void> bootstrapServer() {
    Bootstrap serverBootstrap = new Bootstrap().group(group)
            .channelFactory(() -> new NioDatagramChannel(InternetProtocolFamily.IPv4))
            .handler(new SimpleChannelInboundHandler<Object>() {
                @Override//  w w  w.j a va 2  s. co m
                public void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception {
                    // Nothing will be sent.
                }
            }).option(ChannelOption.IP_MULTICAST_IF, iface).option(ChannelOption.SO_REUSEADDR, true);

    CompletableFuture<Void> future = new CompletableFuture<>();
    serverBootstrap.bind(localAddress).addListener((ChannelFutureListener) f -> {
        if (f.isSuccess()) {
            serverChannel = f.channel();
            future.complete(null);
        } else {
            future.completeExceptionally(f.cause());
        }
    });
    return future;
}

From source file:io.atomix.cluster.messaging.impl.NettyBroadcastService.java

License:Apache License

private CompletableFuture<Void> bootstrapClient() {
    Bootstrap clientBootstrap = new Bootstrap().group(group)
            .channelFactory(() -> new NioDatagramChannel(InternetProtocolFamily.IPv4))
            .handler(new SimpleChannelInboundHandler<DatagramPacket>() {
                @Override//from w  w w  . ja  v  a 2  s  .co m
                protected void channelRead0(ChannelHandlerContext context, DatagramPacket packet)
                        throws Exception {
                    byte[] payload = new byte[packet.content().readInt()];
                    packet.content().readBytes(payload);
                    Message message = SERIALIZER.decode(payload);
                    Set<Consumer<byte[]>> listeners = NettyBroadcastService.this.listeners
                            .get(message.subject());
                    if (listeners != null) {
                        for (Consumer<byte[]> listener : listeners) {
                            listener.accept(message.payload());
                        }
                    }
                }
            }).option(ChannelOption.IP_MULTICAST_IF, iface).option(ChannelOption.SO_REUSEADDR, true)
            .localAddress(localAddress.getPort());

    CompletableFuture<Void> future = new CompletableFuture<>();
    clientBootstrap.bind().addListener((ChannelFutureListener) f -> {
        if (f.isSuccess()) {
            clientChannel = (DatagramChannel) f.channel();
            log.info("{} joining multicast group {} on port {}", localAddress.getHostName(),
                    groupAddress.getHostName(), groupAddress.getPort());
            clientChannel.joinGroup(groupAddress, iface).addListener(f2 -> {
                if (f2.isSuccess()) {
                    log.info("{} successfully joined multicast group {} on port {}", localAddress.getHostName(),
                            groupAddress.getHostName(), groupAddress.getPort());
                    future.complete(null);
                } else {
                    log.info("{} failed to join group {} on port {}", localAddress.getHostName(),
                            groupAddress.getHostName(), groupAddress.getPort());
                    future.completeExceptionally(f2.cause());
                }
            });
        } else {
            future.completeExceptionally(f.cause());
        }
    });
    return future;
}

From source file:io.atomix.cluster.messaging.impl.NettyMessagingService.java

License:Apache License

private CompletableFuture<Void> startAcceptingConnections() {
    CompletableFuture<Void> future = new CompletableFuture<>();
    ServerBootstrap b = new ServerBootstrap();
    b.option(ChannelOption.SO_REUSEADDR, true);
    b.option(ChannelOption.SO_BACKLOG, 128);
    b.childOption(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(8 * 1024, 32 * 1024));
    b.childOption(ChannelOption.SO_RCVBUF, 1024 * 1024);
    b.childOption(ChannelOption.SO_SNDBUF, 1024 * 1024);
    b.childOption(ChannelOption.SO_KEEPALIVE, true);
    b.childOption(ChannelOption.TCP_NODELAY, true);
    b.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    b.group(serverGroup, clientGroup);/*  w  w w.jav a2s.c  om*/
    b.channel(serverChannelClass);
    if (enableNettyTls) {
        b.childHandler(new SslServerCommunicationChannelInitializer());
    } else {
        b.childHandler(new BasicChannelInitializer());
    }

    // Bind and start to accept incoming connections.
    b.bind(localAddress.port()).addListener((ChannelFutureListener) f -> {
        if (f.isSuccess()) {
            log.info("{} accepting incoming connections on port {}", localAddress.address(true),
                    localAddress.port());
            serverChannel = f.channel();
            future.complete(null);
        } else {
            log.warn("{} failed to bind to port {} due to {}", localAddress.address(true), localAddress.port(),
                    f.cause());
            future.completeExceptionally(f.cause());
        }
    });
    return future;
}

From source file:io.crate.netty.CrateChannelBootstrapFactory.java

License:Apache License

public static ServerBootstrap newChannelBootstrap(String id, Settings settings) {
    EventLoopGroup boss = new NioEventLoopGroup(Netty4Transport.NETTY_BOSS_COUNT.get(settings),
            daemonThreadFactory(settings, id + "-netty-boss"));
    EventLoopGroup worker = new NioEventLoopGroup(Netty4Transport.WORKER_COUNT.get(settings),
            daemonThreadFactory(settings, id + "-netty-worker"));
    Boolean reuseAddress = Netty4Transport.TCP_REUSE_ADDRESS.get(settings);
    return new ServerBootstrap().channel(NioServerSocketChannel.class).group(boss, worker)
            .option(ChannelOption.SO_REUSEADDR, reuseAddress)
            .childOption(ChannelOption.SO_REUSEADDR, reuseAddress)
            .childOption(ChannelOption.TCP_NODELAY, Netty4Transport.TCP_NO_DELAY.get(settings))
            .childOption(ChannelOption.SO_KEEPALIVE, Netty4Transport.TCP_KEEP_ALIVE.get(settings));
}

From source file:io.hekate.cluster.seed.multicast.MulticastSeedNodeProvider.java

License:Apache License

@Override
public void startDiscovery(String cluster, InetSocketAddress address) throws HekateException {
    log.info("Starting seed nodes discovery [cluster={}, {}]", cluster, ToString.formatProperties(this));

    SeedNode thisNode = new SeedNode(address, cluster);

    try {/* ww w.j  a va2  s.co m*/
        NetworkInterface nif = selectMulticastInterface(address);

        try {
            synchronized (mux) {
                if (isRegistered()) {
                    throw new IllegalStateException(
                            "Multicast seed node provider is already registered with another address "
                                    + "[existing=" + localNode + ']');
                }

                ByteBuf discoveryMsg = prepareDiscovery(thisNode);

                ByteBuf seedNodeInfoBytes = prepareSeedNodeInfo(thisNode);

                localNode = thisNode;

                seedNodes = new HashSet<>();

                eventLoop = new NioEventLoopGroup(1, new HekateThreadFactory("SeedNodeMulticast"));

                // Prepare common bootstrap options.
                Bootstrap bootstrap = new Bootstrap();

                bootstrap.option(ChannelOption.SO_REUSEADDR, true);
                bootstrap.option(ChannelOption.IP_MULTICAST_TTL, ttl);
                bootstrap.option(ChannelOption.IP_MULTICAST_IF, nif);

                if (loopBackDisabled) {
                    bootstrap.option(ChannelOption.IP_MULTICAST_LOOP_DISABLED, true);

                    if (DEBUG) {
                        log.debug("Setting {} option to true", ChannelOption.IP_MULTICAST_LOOP_DISABLED);
                    }
                }

                bootstrap.group(eventLoop);
                bootstrap.channelFactory(() -> new NioDatagramChannel(ipVer));

                // Create a sender channel (not joined to a multicast group).
                bootstrap.localAddress(0);
                bootstrap.handler(createSenderHandler(thisNode));

                ChannelFuture senderBind = bootstrap.bind();

                DatagramChannel localSender = (DatagramChannel) senderBind.channel();

                sender = localSender;

                senderBind.get();

                // Create a listener channel and join to a multicast group.
                bootstrap.localAddress(group.getPort());

                bootstrap.handler(createListenerHandler(thisNode, seedNodeInfoBytes));

                ChannelFuture listenerBind = bootstrap.bind();

                listener = (DatagramChannel) listenerBind.channel();

                listenerBind.get();

                log.info("Joining to a multicast group " + "[address={}, port={}, interface={}, ttl={}]",
                        AddressUtils.host(group), group.getPort(), nif.getName(), ttl);

                listener.joinGroup(group, nif).get();

                // Create a periodic task for discovery messages sending.
                discoveryFuture = eventLoop.scheduleWithFixedDelay(() -> {
                    if (DEBUG) {
                        log.debug("Sending discovery message [from={}]", thisNode);
                    }

                    DatagramPacket discovery = new DatagramPacket(discoveryMsg.copy(), group);

                    localSender.writeAndFlush(discovery);
                }, 0, interval, TimeUnit.MILLISECONDS);
            }
        } catch (ExecutionException e) {
            cleanup();

            throw new HekateException(
                    "Failed to start a multicast seed nodes discovery [node=" + thisNode + ']', e.getCause());
        }

        log.info("Will wait for seed nodes [timeout={}(ms)]", waitTime);

        Thread.sleep(waitTime);
    } catch (InterruptedException e) {
        cleanup();

        Thread.currentThread().interrupt();

        throw new HekateException(
                "Thread was interrupted while awaiting for multicast discovery [node=" + thisNode + ']', e);
    }

    log.info("Done waiting for seed nodes.");
}

From source file:io.hekate.network.netty.NettyServer.java

License:Apache License

private void setOpts(ServerBootstrap boot) {
    setUserOpt(boot, ChannelOption.SO_BACKLOG, soBacklog);
    setUserOpt(boot, ChannelOption.SO_RCVBUF, soReceiveBufferSize);
    setUserOpt(boot, ChannelOption.SO_REUSEADDR, soReuseAddress);

    if (!autoAccept) {
        setUserOpt(boot, ChannelOption.AUTO_READ, false);
    }//from w ww  .  j ava  2s  .  c o m
}