Example usage for io.netty.channel ChannelOption SO_KEEPALIVE

List of usage examples for io.netty.channel ChannelOption SO_KEEPALIVE

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption SO_KEEPALIVE.

Prototype

ChannelOption SO_KEEPALIVE

To view the source code for io.netty.channel ChannelOption SO_KEEPALIVE.

Click Source Link

Usage

From source file:jgnash.engine.attachment.AttachmentTransferClient.java

License:Open Source License

/**
 * Starts the connection with the lock server
 *
 * @return {@code true} if successful/*from w  w w  . j  av a 2s .  c  o m*/
 */
public boolean connectToServer(final String host, final int port, final char[] password) {
    boolean result = false;

    // If a password has been specified, create an EncryptionManager
    if (password != null && password.length > 0) {
        encryptionManager = new EncryptionManager(password);
    }

    final Bootstrap bootstrap = new Bootstrap();

    eventLoopGroup = new NioEventLoopGroup();

    transferHandler = new NettyTransferHandler(tempDirectory, encryptionManager);

    bootstrap.group(eventLoopGroup).channel(NioSocketChannel.class).handler(new Initializer())
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, ConnectionFactory.getConnectionTimeout() * 1000)
            .option(ChannelOption.SO_KEEPALIVE, true);

    try {
        // Start the connection attempt.
        channel = bootstrap.connect(host, port).sync().channel();

        result = true;
        logger.info("Connection made with File Transfer Server");
    } catch (final InterruptedException e) {
        logger.log(Level.SEVERE, "Failed to connect to the File Transfer Server", e);
        disconnectFromServer();
    }

    return result;
}

From source file:jgnash.engine.attachment.AttachmentTransferServer.java

License:Open Source License

public boolean startServer(final char[] password) {
    boolean result = false;

    // If a password has been specified, create an EncryptionManager
    if (password != null && password.length > 0) {
        encryptionManager = new EncryptionManager(password);
    }/*w  ww. j a v a2s .  c  o m*/

    try {
        ServerBootstrap b = new ServerBootstrap();
        b.group(eventLoopGroup).channel(NioServerSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true)
                .handler(new LoggingHandler(LogLevel.INFO))
                .childHandler(new ChannelInitializer<SocketChannel>() {

                    @Override
                    public void initChannel(final SocketChannel ch) throws Exception {

                        ch.pipeline().addLast(
                                new DelimiterBasedFrameDecoder(((TRANSFER_BUFFER_SIZE + 2) / 3) * 4 + PATH_MAX,
                                        true, Delimiters.lineDelimiter()),

                                new StringEncoder(CharsetUtil.UTF_8), new StringDecoder(CharsetUtil.UTF_8),

                                new Base64Encoder(), new Base64Decoder(),

                                new ServerTransferHandler());
                    }
                });

        // Start the server.
        final ChannelFuture future = b.bind(port).sync();

        if (future.isDone() && future.isSuccess()) {
            result = true;
            logger.info("File Transfer Server started successfully");
        } else {
            logger.info("Failed to start the File Transfer Server");
        }
    } catch (final InterruptedException e) {
        logger.log(Level.SEVERE, e.getLocalizedMessage(), e);
        stopServer();
    }

    return result;
}

From source file:jgnash.engine.concurrent.DistributedLockManager.java

License:Open Source License

/**
 * Starts the connection with the lock server
 *
 * @param password connection password/*from  ww  w .  j  a  v  a  2 s .  c  om*/
 * @return {@code true} if successful
 */
public boolean connectToServer(final char[] password) {
    boolean result = false;

    // If a password has been specified, create an EncryptionManager
    if (password != null && password.length > 0) {
        encryptionManager = new EncryptionManager(password);
    }

    final Bootstrap bootstrap = new Bootstrap();

    eventLoopGroup = new NioEventLoopGroup();

    bootstrap.group(eventLoopGroup).channel(NioSocketChannel.class).handler(new Initializer())
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, ConnectionFactory.getConnectionTimeout() * 1000)
            .option(ChannelOption.SO_KEEPALIVE, true);

    try {
        // Start the connection attempt.
        channel = bootstrap.connect(host, port).sync().channel();

        channel.writeAndFlush(encrypt(UUID_PREFIX + uuid) + EOL_DELIMITER).sync(); // send this channels uuid

        result = true;
        logger.info("Connection made with Distributed Lock Server");
    } catch (final InterruptedException e) {
        logger.log(Level.SEVERE, "Failed to connect to Distributed Lock Server", e);
        disconnectFromServer();
    }

    return result;
}

From source file:jgnash.engine.concurrent.DistributedLockServer.java

License:Open Source License

public boolean startServer(final char[] password) {
    boolean result = false;

    // If a password has been specified, create an EncryptionManager
    if (password != null && password.length > 0) {
        encryptionManager = new EncryptionManager(password);
    }//  w w  w  .  jav  a  2 s  .com

    eventLoopGroup = new NioEventLoopGroup();

    final ServerBootstrap bootstrap = new ServerBootstrap();

    try {
        bootstrap.group(eventLoopGroup).channel(NioServerSocketChannel.class).childHandler(new Initializer())
                .childOption(ChannelOption.SO_KEEPALIVE, true);

        final ChannelFuture future = bootstrap.bind(port);
        future.sync();

        if (future.isDone() && future.isSuccess()) {
            logger.info("Distributed Lock Server started successfully");
            result = true;
        } else {
            logger.info("Failed to start the Distributed Lock Server");
        }
    } catch (final InterruptedException e) {
        logger.log(Level.SEVERE, e.getLocalizedMessage(), e);
        stopServer();
    }

    return result;
}

From source file:jgnash.engine.message.MessageBusClient.java

License:Open Source License

public boolean connectToServer(final char[] password) {
    boolean result = false;

    // If a password has been specified, create an EncryptionManager
    if (password != null && password.length > 0) {
        encryptionManager = new EncryptionManager(password);
    }//from   w  w w .ja v a  2s  . c  om

    eventLoopGroup = new NioEventLoopGroup();

    final Bootstrap bootstrap = new Bootstrap();

    bootstrap.group(eventLoopGroup).channel(NioSocketChannel.class).handler(new MessageBusClientInitializer())
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, getConnectionTimeout() * 1000)
            .option(ChannelOption.SO_KEEPALIVE, true);

    try {
        // Start the connection attempt.
        channel = bootstrap.connect(host, port).sync().channel();

        result = true;
        logger.info("Connected to remote message server");
    } catch (final InterruptedException e) {
        logger.log(Level.SEVERE, "Failed to connect to remote message bus", e);
        disconnectFromServer();
    }

    return result;
}

From source file:jgnash.engine.message.MessageBusServer.java

License:Open Source License

public boolean startServer(final DataStoreType dataStoreType, final String dataBasePath,
        final char[] password) {
    boolean result = false;

    logger.info("Starting message bus server");

    this.dataBasePath = dataBasePath;
    this.dataStoreType = dataStoreType.name();

    // If a password has been specified, create an EncryptionManager
    if (password != null && password.length > 0) {
        encryptionManager = new EncryptionManager(password);
    }/*from   ww w  .  ja  v a 2  s .com*/

    eventLoopGroup = new NioEventLoopGroup();

    final ServerBootstrap bootstrap = new ServerBootstrap();

    try {
        bootstrap.group(eventLoopGroup).channel(NioServerSocketChannel.class)
                .childHandler(new MessageBusRemoteInitializer()).childOption(ChannelOption.SO_KEEPALIVE, true);

        final ChannelFuture future = bootstrap.bind(port);
        future.sync();

        if (future.isDone() && future.isSuccess()) {
            logger.info("Message Bus Server started successfully");
            result = true;
        } else {
            logger.info("Failed to start the Message Bus Server");
        }
    } catch (final InterruptedException e) {
        logger.log(Level.SEVERE, e.getLocalizedMessage(), e);
        stopServer();
    }

    return result;
}

From source file:jj.http.server.HttpServer.java

License:Apache License

private void makeServerBootstrap(int bindingCount) {
    serverBootstrap = new ServerBootstrap()
            .group(new NioEventLoopGroup(bindingCount, threadFactory), ioEventLoopGroup)
            .channel(NioServerSocketChannel.class).childHandler(initializer)
            .option(ChannelOption.SO_KEEPALIVE, configuration.keepAlive())
            .option(ChannelOption.SO_REUSEADDR, configuration.reuseAddress())
            .option(ChannelOption.TCP_NODELAY, configuration.tcpNoDelay())
            .option(ChannelOption.SO_TIMEOUT, configuration.timeout())
            .option(ChannelOption.SO_BACKLOG, configuration.backlog())
            .option(ChannelOption.SO_RCVBUF, configuration.receiveBufferSize())
            .option(ChannelOption.SO_SNDBUF, configuration.sendBufferSize());
}

From source file:jmeter.plugins.http2.sampler.NettyHttp2Client.java

License:Apache License

public SampleResult request() {
    SampleResult sampleResult = new SampleResult();

    final SslContext sslCtx = getSslContext();
    if (sslCtx == null) {
        sampleResult.setSuccessful(false);
        return sampleResult;
    }/*from  w w  w  .ja va2 s  .  c o m*/

    // Configure the client.
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    Http2ClientInitializer initializer = new Http2ClientInitializer(sslCtx, Integer.MAX_VALUE);
    Bootstrap b = new Bootstrap();
    b.group(workerGroup);
    b.channel(NioSocketChannel.class);
    b.option(ChannelOption.SO_KEEPALIVE, true);
    b.remoteAddress(host, port);
    b.handler(initializer);

    // Start sampling
    sampleResult.sampleStart();

    // Start the client.
    Channel channel = b.connect().syncUninterruptibly().channel();

    // Wait for the HTTP/2 upgrade to occur.
    Http2SettingsHandler http2SettingsHandler = initializer.settingsHandler();
    try {
        http2SettingsHandler.awaitSettings(5, TimeUnit.SECONDS);
    } catch (Exception exception) {
        sampleResult.setSuccessful(false);
        return sampleResult;
    }

    HttpResponseHandler responseHandler = initializer.responseHandler();
    final int streamId = 3;
    final URI hostName = URI.create("https://" + host + ':' + port);

    // Set attributes to SampleResult
    try {
        sampleResult.setURL(new URL(hostName.toString()));
    } catch (MalformedURLException exception) {
        sampleResult.setSuccessful(false);
        return sampleResult;
    }

    FullHttpRequest request = new DefaultFullHttpRequest(HTTP_1_1, GET, path);
    request.headers().addObject(HttpHeaderNames.HOST, hostName);

    // Add request headers set by HeaderManager
    if (headerManager != null) {
        CollectionProperty headers = headerManager.getHeaders();
        if (headers != null) {
            PropertyIterator i = headers.iterator();
            while (i.hasNext()) {
                org.apache.jmeter.protocol.http.control.Header header = (org.apache.jmeter.protocol.http.control.Header) i
                        .next().getObjectValue();
                request.headers().add(header.getName(), header.getValue());
            }
        }
    }

    channel.writeAndFlush(request);
    responseHandler.put(streamId, channel.newPromise());

    final SortedMap<Integer, FullHttpResponse> responseMap;
    try {
        responseMap = responseHandler.awaitResponses(5, TimeUnit.SECONDS);

        // Currently pick up only one response of a stream
        final FullHttpResponse response = responseMap.get(streamId);
        final AsciiString responseCode = response.status().codeAsText();
        final AsciiString reasonPhrase = response.status().reasonPhrase();
        sampleResult.setResponseCode(new StringBuilder(responseCode.length()).append(responseCode).toString());
        sampleResult
                .setResponseMessage(new StringBuilder(reasonPhrase.length()).append(reasonPhrase).toString());
        sampleResult.setResponseHeaders(getResponseHeaders(response));
    } catch (Exception exception) {
        sampleResult.setSuccessful(false);
        return sampleResult;
    }

    // Wait until the connection is closed.
    channel.close().syncUninterruptibly();

    // End sampling
    sampleResult.sampleEnd();
    sampleResult.setSuccessful(true);

    return sampleResult;
}

From source file:jun.flume.netty.http.HTTPSource.java

License:Apache License

@Override
public void start() {
    Preconditions.checkState(srv == null, "Running HTTP Server found in source: " + getName()
            + " before I started one." + "Will not attempt to start.");
    bossGroup = new NioEventLoopGroup();
    workerGroup = new NioEventLoopGroup();
    LOG.info("HTTP Server in source:" + getName() + " starting...");
    srv = new ServerBootstrap();

    try {//from  ww w  .  j av  a2s . c  om
        srv.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .childHandler(new ChannelInitializer<SocketChannel>() {
                    @Override
                    public void initChannel(SocketChannel ch) throws Exception {
                        ch.pipeline().addLast(new HttpRequestDecoder());
                        ch.pipeline().addLast(new HttpObjectAggregator(nettyHttpObjectAggregatorMaxLength));
                        ch.pipeline().addLast(new HttpResponseEncoder());
                        ch.pipeline().addLast(new ResponseHandler());
                    }
                }).option(ChannelOption.SO_BACKLOG, 128).childOption(ChannelOption.SO_KEEPALIVE, true);

        t = new Thread() {
            @Override
            public void run() {
                try {
                    f = srv.bind(host, port).sync();
                    f.channel().closeFuture().sync();
                } catch (InterruptedException ex) {
                    LOG.warn("Thread interrupted... ");
                } catch (Exception ex) {
                    LOG.error("Error while starting HTTPSource. Exception follows.", ex);
                    Throwables.propagate(ex);
                } finally {
                    workerGroup.shutdownGracefully();
                    bossGroup.shutdownGracefully();
                }
            }
        };

        t.start();
        LOG.info("HTTP Server in source:" + getName() + " started...");
    } catch (Exception ex) {
        LOG.error("Error while starting HTTPSource. Exception follows.", ex);
        Throwables.propagate(ex);
    }
    sourceCounter.start();
    super.start();
}

From source file:lunarion.cluster.coordinator.server.CoordinatorServer.java

License:Open Source License

public void bind(int port) throws InterruptedException {

    ServerBootstrap bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .childOption(ChannelOption.RCVBUF_ALLOCATOR,
                    new AdaptiveRecvByteBufAllocator(64, 1024, 65536 * 512))
            //.childHandler(new ChildChannelHandler())
            .childHandler(new CoordinatorServerChannelInitializer(co, logger))
            .option(ChannelOption.SO_BACKLOG, 1024).childOption(ChannelOption.SO_KEEPALIVE, true);

    Channel ch = bootstrap.bind(port).sync().channel();
    System.err//  ww  w. ja  v a  2 s.c om
            .println(Timer.currentTime() + "[INFO]: coordinator server channel binded at port: " + port + '.');
    logger.info(Timer.currentTime() + " [COORDINATOR INFO]:  coordinator server channel binded at port: " + port
            + '.');
    ch.closeFuture().sync();
    /*
     *  Bind and start to accept incoming connections.
     */
    // ChannelFuture future = bootstrap.bind(port).sync();
    /*
     *  Wait until the server socket is closed.
     *  In this example, this does not happen, but you can do that to gracefully 
     *  shut down your server.
     */

    // future.channel().closeFuture().sync();

}