Example usage for io.netty.channel.nio NioEventLoopGroup NioEventLoopGroup

List of usage examples for io.netty.channel.nio NioEventLoopGroup NioEventLoopGroup

Introduction

In this page you can find the example usage for io.netty.channel.nio NioEventLoopGroup NioEventLoopGroup.

Prototype

public NioEventLoopGroup(ThreadFactory threadFactory) 

Source Link

Document

Create a new instance using the default number of threads, the given ThreadFactory and the SelectorProvider which is returned by SelectorProvider#provider() .

Usage

From source file:com.cdg.study.netty.discard.DiscardServer.java

License:Open Source License

public static void main(String[] args) throws Exception {
    EventLoopGroup bossGroup = new NioEventLoopGroup(1); // bossGroup 1?  ??
    EventLoopGroup workerGroup = new NioEventLoopGroup(); // default *2  ??
    try {//ww w  .  ja va  2s  .co m
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .handler(new LoggingHandler(LogLevel.INFO)).childHandler(new DiscardServerHandler());

        ChannelFuture f = b.bind(8010).sync();

        System.err.println("Ready for 0.0.0.0:8010");

        f.channel().closeFuture().sync();
    } finally {
        workerGroup.shutdownGracefully();
        bossGroup.shutdownGracefully();
    }
}

From source file:com.cdg.study.netty.echo.EchoServer.java

License:Open Source License

public static void main(String[] args) throws Exception {
    EventLoopGroup bossGroup = new NioEventLoopGroup(1);
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {//from ww w. j a va2s. c om
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .childHandler(new EchoServerHandler());

        ChannelFuture f = b.bind(8011).sync();

        f.channel().closeFuture().sync();
    } finally {
        workerGroup.shutdownGracefully();
        bossGroup.shutdownGracefully();
    }
}

From source file:com.cdg.study.netty.util.NettyStartupUtil.java

License:Open Source License

public static void runServer(int port, ChannelHandler childHandler, Consumer<ServerBootstrap> block)
        throws Exception {
    EventLoopGroup bossGroup = new NioEventLoopGroup(1);
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {//  w w w.  ja va  2s.  c o m
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class);
        b.handler(new LoggingHandler(LogLevel.INFO));
        b.childHandler(childHandler);
        block.accept(b);
        Channel ch = b.bind(port).sync().channel();
        System.err.println("Ready for 0.0.0.0:" + port);
        ch.closeFuture().sync();
    } finally {
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:com.chen.opensourceframework.netty.discard.DiscardServer.java

License:Apache License

public static void main(String[] args) throws Exception {
    // Configure SSL.
    final SslContext sslCtx;
    if (SSL) {/*w  w w  . j  a va2 s.  c  om*/
        SelfSignedCertificate ssc = new SelfSignedCertificate();
        sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build();
    } else {
        sslCtx = null;
    }

    EventLoopGroup bossGroup = new NioEventLoopGroup(1);
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .handler(new LoggingHandler(LogLevel.INFO))
                .childHandler(new ChannelInitializer<SocketChannel>() {
                    @Override
                    public void initChannel(SocketChannel ch) {
                        ChannelPipeline p = ch.pipeline();
                        if (sslCtx != null) {
                            p.addLast(sslCtx.newHandler(ch.alloc()));
                        }
                        p.addLast(new DiscardServerHandler());
                    }
                });

        // Bind and start to accept incoming connections.
        ChannelFuture f = b.bind(PORT).sync();

        // Wait until the server socket is closed.
        // In this example, this does not happen, but you can do that to gracefully
        // shut down your server.
        f.channel().closeFuture().sync();
    } finally {
        workerGroup.shutdownGracefully();
        bossGroup.shutdownGracefully();
    }
}

From source file:com.chenyang.proxy.EchoServer.java

License:Apache License

public static void start() throws Exception {
    // Configure SSL.
    final SslContext sslCtx;
    if (SSL) {/* ww  w . j  a  v  a  2s.co m*/
        SelfSignedCertificate ssc = new SelfSignedCertificate();
        sslCtx = SslContext.newServerContext(ssc.certificate(), ssc.privateKey());
    } else {
        sslCtx = null;
    }

    // Configure the server.
    EventLoopGroup bossGroup = new NioEventLoopGroup(1);
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO))
                .childHandler(new ChannelInitializer<SocketChannel>() {
                    @Override
                    public void initChannel(SocketChannel ch) throws Exception {
                        ChannelPipeline p = ch.pipeline();
                        if (sslCtx != null) {
                            p.addLast(sslCtx.newHandler(ch.alloc()));
                        }
                        // p.addLast(new LoggingHandler(LogLevel.INFO));
                        p.addLast(new EchoServerHandler());
                    }
                });

        // Start the server.
        System.out.println(" server start in port " + PORT);
        ChannelFuture f = b.bind(PORT).sync();

        // Wait until the server socket is closed.
        f.channel().closeFuture().sync();
    } finally {
        // Shut down all event loops to terminate all threads.
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:com.chenyang.proxy.http.HttpServer.java

License:Apache License

public void start() {
    int port = Constants.Http.PORT;

    logger.info("ApnProxy Server Listen on: " + port);

    ServerBootstrap serverBootStrap = new ServerBootstrap();

    bossGroup = new NioEventLoopGroup(1);
    workerGroup = new NioEventLoopGroup();

    try {/*from   w  ww .j  av  a  2 s.  com*/
        serverBootStrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).localAddress(port)
                .childHandler(new HttpServerChannelInitializer());
        serverBootStrap.bind().sync().channel().closeFuture().sync();
    } catch (Exception e) {
        logger.error(e.getMessage(), e);
    } finally {
        logger.error("showdown the server");
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:com.chuck.netty4.websocket.WebSocketServer.java

License:Apache License

public static void main(String[] args) throws Exception {
    // Configure SSL.
    final SslContext sslCtx;
    if (SSL) {//from w w w . ja v  a2s.  c o  m
        SelfSignedCertificate ssc = new SelfSignedCertificate();
        sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build();
    } else {
        sslCtx = null;
    }

    EventLoopGroup bossGroup = new NioEventLoopGroup(1);
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .handler(new LoggingHandler(LogLevel.INFO))
                .childHandler(new WebSocketServerInitializer(sslCtx));

        Channel ch = b.bind(PORT).sync().channel();

        System.out.println("Open your web browser and navigate to " + (SSL ? "https" : "http") + "://127.0.0.1:"
                + PORT + '/');

        ch.closeFuture().sync();
    } finally {
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:com.cloudhopper.smpp.demo.ClientMain.java

License:Apache License

static public void main(String[] args) throws Exception {
    ////from   ww w.ja  v  a  2s.c  o  m
    // setup 3 things required for any session we plan on creating
    //

    // create and assign the NioEventLoopGroup instances to handle event processing,
    // such as accepting new connections, receiving data, writing data, and so on.
    NioEventLoopGroup group = new NioEventLoopGroup(1);

    // to enable automatic expiration of requests, a second scheduled executor
    // is required which is what a monitor task will be executed with - this
    // is probably a thread pool that can be shared with between all client bootstraps
    ScheduledThreadPoolExecutor monitorExecutor = (ScheduledThreadPoolExecutor) Executors
            .newScheduledThreadPool(1, new ThreadFactory() {
                private AtomicInteger sequence = new AtomicInteger(0);

                @Override
                public Thread newThread(Runnable r) {
                    Thread t = new Thread(r);
                    t.setName("SmppClientSessionWindowMonitorPool-" + sequence.getAndIncrement());
                    return t;
                }
            });

    // a single instance of a client bootstrap can technically be shared
    // between any sessions that are created (a session can go to any different
    // number of SMSCs) - each session created under
    // a client bootstrap will use the executor and monitorExecutor set
    // in its constructor - just be *very* careful with the "expectedSessions"
    // value to make sure it matches the actual number of total concurrent
    // open sessions you plan on handling - the underlying netty library
    // used for NIO sockets essentially uses this value as the max number of
    // threads it will ever use, despite the "max pool size", etc. set on
    // the executor passed in here
    DefaultSmppClient clientBootstrap = new DefaultSmppClient(group, monitorExecutor);

    //
    // setup configuration for a client session
    //
    DefaultSmppSessionHandler sessionHandler = new ClientSmppSessionHandler();

    SmppSessionConfiguration config0 = new SmppSessionConfiguration();
    config0.setWindowSize(1);
    config0.setName("Tester.Session.0");
    config0.setType(SmppBindType.TRANSCEIVER);
    config0.setHost("127.0.0.1");
    config0.setPort(2776);
    config0.setConnectTimeout(10000);
    config0.setSystemId("1234567890");
    config0.setPassword("password");
    config0.getLoggingOptions().setLogBytes(true);
    // to enable monitoring (request expiration)
    config0.setRequestExpiryTimeout(30000);
    config0.setWindowMonitorInterval(15000);
    config0.setCountersEnabled(true);

    //
    // create session, enquire link, submit an sms, close session
    //
    SmppSession session0 = null;

    try {
        // create session a session by having the bootstrap connect a
        // socket, send the bind request, and wait for a bind response
        session0 = clientBootstrap.bind(config0, sessionHandler);

        System.out.println("Press any key to send enquireLink #1");
        System.in.read();

        // demo of a "synchronous" enquireLink call - send it and wait for a response
        EnquireLinkResp enquireLinkResp1 = session0.enquireLink(new EnquireLink(), 10000);
        logger.info("enquire_link_resp #1: commandStatus [" + enquireLinkResp1.getCommandStatus() + "="
                + enquireLinkResp1.getResultMessage() + "]");

        System.out.println("Press any key to send enquireLink #2");
        System.in.read();

        // demo of an "asynchronous" enquireLink call - send it, get a future,
        // and then optionally choose to pick when we wait for it
        WindowFuture<Integer, PduRequest, PduResponse> future0 = session0.sendRequestPdu(new EnquireLink(),
                10000, true);
        if (!future0.await()) {
            logger.error("Failed to receive enquire_link_resp within specified time");
        } else if (future0.isSuccess()) {
            EnquireLinkResp enquireLinkResp2 = (EnquireLinkResp) future0.getResponse();
            logger.info("enquire_link_resp #2: commandStatus [" + enquireLinkResp2.getCommandStatus() + "="
                    + enquireLinkResp2.getResultMessage() + "]");
        } else {
            logger.error("Failed to properly receive enquire_link_resp: " + future0.getCause());
        }

        System.out.println("Press any key to send submit #1");
        System.in.read();

        String text160 = "\u20AC Lorem [ipsum] dolor sit amet, consectetur adipiscing elit. Proin feugiat, leo id commodo tincidunt, nibh diam ornare est, vitae accumsan risus lacus sed sem metus.";
        byte[] textBytes = CharsetUtil.encode(text160, CharsetUtil.CHARSET_GSM);

        SubmitSm submit0 = new SubmitSm();

        // add delivery receipt
        //submit0.setRegisteredDelivery(SmppConstants.REGISTERED_DELIVERY_SMSC_RECEIPT_REQUESTED);

        submit0.setSourceAddress(new Address((byte) 0x03, (byte) 0x00, "40404"));
        submit0.setDestAddress(new Address((byte) 0x01, (byte) 0x01, "44555519205"));
        submit0.setShortMessage(textBytes);

        SubmitSmResp submitResp = session0.submit(submit0, 10000);

        logger.info("sendWindow.size: {}", session0.getSendWindow().getSize());

        System.out.println("Press any key to unbind and close sessions");
        System.in.read();

        session0.unbind(5000);
    } catch (Exception e) {
        logger.error("", e);
    }

    if (session0 != null) {
        logger.info("Cleaning up session... (final counters)");
        if (session0.hasCounters()) {
            logger.info("tx-enquireLink: {}", session0.getCounters().getTxEnquireLink());
            logger.info("tx-submitSM: {}", session0.getCounters().getTxSubmitSM());
            logger.info("tx-deliverSM: {}", session0.getCounters().getTxDeliverSM());
            logger.info("tx-dataSM: {}", session0.getCounters().getTxDataSM());
            logger.info("rx-enquireLink: {}", session0.getCounters().getRxEnquireLink());
            logger.info("rx-submitSM: {}", session0.getCounters().getRxSubmitSM());
            logger.info("rx-deliverSM: {}", session0.getCounters().getRxDeliverSM());
            logger.info("rx-dataSM: {}", session0.getCounters().getRxDataSM());
        }

        session0.destroy();
        // alternatively, could call close(), get outstanding requests from
        // the sendWindow (if we wanted to retry them later), then call shutdown()
    }

    // this is required to not causing server to hang from non-daemon threads
    // this also makes sure all open Channels are closed to I *think*
    logger.info("Shutting down client bootstrap and executors...");
    clientBootstrap.destroy();
    monitorExecutor.shutdownNow();

    logger.info("Done. Exiting");
}

From source file:com.cloudhopper.smpp.demo.PerformanceClientMain.java

License:Apache License

static public void main(String[] args) throws Exception {
    ///* ww w . j  a v a2  s .c  o  m*/
    // setup 3 things required for any session we plan on creating
    //

    // create and assign the NioEventLoopGroup instances to handle event processing,
    // such as accepting new connections, receiving data, writing data, and so on.
    NioEventLoopGroup group = new NioEventLoopGroup(1);

    // to enable automatic expiration of requests, a second scheduled executor
    // is required which is what a monitor task will be executed with - this
    // is probably a thread pool that can be shared with between all client bootstraps
    ScheduledThreadPoolExecutor monitorExecutor = (ScheduledThreadPoolExecutor) Executors
            .newScheduledThreadPool(1, new ThreadFactory() {
                private AtomicInteger sequence = new AtomicInteger(0);

                @Override
                public Thread newThread(Runnable r) {
                    Thread t = new Thread(r);
                    t.setName("SmppClientSessionWindowMonitorPool-" + sequence.getAndIncrement());
                    return t;
                }
            });

    // a single instance of a client bootstrap can technically be shared
    // between any sessions that are created (a session can go to any different
    // number of SMSCs) - each session created under
    // a client bootstrap will use the executor and monitorExecutor set
    // in its constructor - just be *very* careful with the "expectedSessions"
    // value to make sure it matches the actual number of total concurrent
    // open sessions you plan on handling - the underlying netty library
    // used for NIO sockets essentially uses this value as the max number of
    // threads it will ever use, despite the "max pool size", etc. set on
    // the executor passed in here
    DefaultSmppClient clientBootstrap = new DefaultSmppClient(group, monitorExecutor);

    // same configuration for each client runner
    SmppSessionConfiguration config = new SmppSessionConfiguration();
    config.setWindowSize(WINDOW_SIZE);
    config.setName("Tester.Session.0");
    config.setType(SmppBindType.TRANSCEIVER);
    config.setHost("127.0.0.1");
    config.setPort(2776);
    config.setConnectTimeout(10000);
    config.setSystemId("1234567890");
    config.setPassword("password");
    config.getLoggingOptions().setLogBytes(false);
    // to enable monitoring (request expiration)
    config.setRequestExpiryTimeout(30000);
    config.setWindowMonitorInterval(15000);
    config.setCountersEnabled(true);

    // various latches used to signal when things are ready
    CountDownLatch allSessionsBoundSignal = new CountDownLatch(SESSION_COUNT);
    CountDownLatch startSendingSignal = new CountDownLatch(1);

    // create all session runners and executors to run them
    ThreadPoolExecutor taskExecutor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
    ClientSessionTask[] tasks = new ClientSessionTask[SESSION_COUNT];
    for (int i = 0; i < SESSION_COUNT; i++) {
        tasks[i] = new ClientSessionTask(allSessionsBoundSignal, startSendingSignal, clientBootstrap, config);
        taskExecutor.submit(tasks[i]);
    }

    // wait for all sessions to bind
    logger.info("Waiting up to 7 seconds for all sessions to bind...");
    if (!allSessionsBoundSignal.await(7000, TimeUnit.MILLISECONDS)) {
        throw new Exception("One or more sessions were unable to bind, cancelling test");
    }

    logger.info("Sending signal to start test...");
    long startTimeMillis = System.currentTimeMillis();
    startSendingSignal.countDown();

    // wait for all tasks to finish
    taskExecutor.shutdown();
    taskExecutor.awaitTermination(3, TimeUnit.DAYS);
    long stopTimeMillis = System.currentTimeMillis();

    // did everything succeed?
    int actualSubmitSent = 0;
    int sessionFailures = 0;
    for (int i = 0; i < SESSION_COUNT; i++) {
        if (tasks[i].getCause() != null) {
            sessionFailures++;
            logger.error("Task #" + i + " failed with exception: " + tasks[i].getCause());
        } else {
            actualSubmitSent += tasks[i].getSubmitRequestSent();
        }
    }

    logger.info("Performance client finished:");
    logger.info("       Sessions: " + SESSION_COUNT);
    logger.info("    Window Size: " + WINDOW_SIZE);
    logger.info("Sessions Failed: " + sessionFailures);
    logger.info("           Time: " + (stopTimeMillis - startTimeMillis) + " ms");
    logger.info("  Target Submit: " + SUBMIT_TO_SEND);
    logger.info("  Actual Submit: " + actualSubmitSent);
    double throughput = (double) actualSubmitSent
            / ((double) (stopTimeMillis - startTimeMillis) / (double) 1000);
    logger.info("     Throughput: " + DecimalUtil.toString(throughput, 3) + " per sec");

    for (int i = 0; i < SESSION_COUNT; i++) {
        if (tasks[i].session != null && tasks[i].session.hasCounters()) {
            logger.info(" Session " + i + ": submitSM {}", tasks[i].session.getCounters().getTxSubmitSM());
        }
    }

    // this is required to not causing server to hang from non-daemon threads
    // this also makes sure all open Channels are closed to I *think*
    logger.info("Shutting down client bootstrap and executors...");
    clientBootstrap.destroy();
    monitorExecutor.shutdownNow();

    logger.info("Done. Exiting");
}

From source file:com.cloudhopper.smpp.demo.QueryCancelMain.java

License:Apache License

static public void main(String[] args) throws Exception {
    ///*from w w  w .  j  a va  2  s  .  co m*/
    // setup 3 things required for any session we plan on creating
    //

    // create and assign the NioEventLoopGroup instances to handle event processing,
    // such as accepting new connections, receiving data, writing data, and so on.
    NioEventLoopGroup group = new NioEventLoopGroup(1);

    // to enable automatic expiration of requests, a second scheduled executor
    // is required which is what a monitor task will be executed with - this
    // is probably a thread pool that can be shared with between all client bootstraps
    ScheduledThreadPoolExecutor monitorExecutor = (ScheduledThreadPoolExecutor) Executors
            .newScheduledThreadPool(1, new ThreadFactory() {
                private AtomicInteger sequence = new AtomicInteger(0);

                @Override
                public Thread newThread(Runnable r) {
                    Thread t = new Thread(r);
                    t.setName("SmppClientSessionWindowMonitorPool-" + sequence.getAndIncrement());
                    return t;
                }
            });

    // a single instance of a client bootstrap can technically be shared
    // between any sessions that are created (a session can go to any different
    // number of SMSCs) - each session created under
    // a client bootstrap will use the executor and monitorExecutor set
    // in its constructor - just be *very* careful with the "expectedSessions"
    // value to make sure it matches the actual number of total concurrent
    // open sessions you plan on handling - the underlying netty library
    // used for NIO sockets essentially uses this value as the max number of
    // threads it will ever use, despite the "max pool size", etc. set on
    // the executor passed in here
    DefaultSmppClient clientBootstrap = new DefaultSmppClient(group, monitorExecutor);

    //
    // setup configuration for a client session
    //
    DefaultSmppSessionHandler sessionHandler = new ClientSmppSessionHandler();

    SmppSessionConfiguration config0 = new SmppSessionConfiguration();
    config0.setWindowSize(1);
    config0.setName("Tester.Session.0");
    config0.setType(SmppBindType.TRANSCEIVER);
    config0.setHost("127.0.0.1");
    config0.setPort(2776);
    config0.setConnectTimeout(10000);
    config0.setSystemId("smppclient1");
    config0.setPassword("password");
    config0.getLoggingOptions().setLogBytes(true);
    // to enable monitoring (request expiration)
    config0.setRequestExpiryTimeout(30000);
    config0.setWindowMonitorInterval(15000);
    config0.setCountersEnabled(true);

    //
    // create session, enquire link, submit an sms, close session
    //
    SmppSession session0 = null;

    try {
        // create session a session by having the bootstrap connect a
        // socket, send the bind request, and wait for a bind response
        session0 = clientBootstrap.bind(config0, sessionHandler);

        System.out.println("Press any key to send enquireLink #1");
        System.in.read();

        // demo of a "synchronous" enquireLink call - send it and wait for a response
        EnquireLinkResp enquireLinkResp1 = session0.enquireLink(new EnquireLink(), 10000);
        logger.info("enquire_link_resp #1: commandStatus [" + enquireLinkResp1.getCommandStatus() + "="
                + enquireLinkResp1.getResultMessage() + "]");

        System.out.println("Press any key to send enquireLink #2");
        System.in.read();

        // demo of an "asynchronous" enquireLink call - send it, get a future,
        // and then optionally choose to pick when we wait for it
        WindowFuture<Integer, PduRequest, PduResponse> future0 = session0.sendRequestPdu(new EnquireLink(),
                10000, true);
        if (!future0.await()) {
            logger.error("Failed to receive enquire_link_resp within specified time");
        } else if (future0.isSuccess()) {
            EnquireLinkResp enquireLinkResp2 = (EnquireLinkResp) future0.getResponse();
            logger.info("enquire_link_resp #2: commandStatus [" + enquireLinkResp2.getCommandStatus() + "="
                    + enquireLinkResp2.getResultMessage() + "]");
        } else {
            logger.error("Failed to properly receive enquire_link_resp: " + future0.getCause());
        }

        System.out.println("Press any key to send submit #1");
        System.in.read();

        String text160 = "\u20AC Lorem [ipsum] dolor sit amet, consectetur adipiscing elit. Proin feugiat, leo id commodo tincidunt, nibh diam ornare est, vitae accumsan risus lacus sed sem metus.";
        byte[] textBytes = CharsetUtil.encode(text160, CharsetUtil.CHARSET_GSM);

        SubmitSm submit0 = new SubmitSm();

        // add delivery receipt
        //submit0.setRegisteredDelivery(SmppConstants.REGISTERED_DELIVERY_SMSC_RECEIPT_REQUESTED);

        submit0.setSourceAddress(new Address((byte) 0x03, (byte) 0x00, "40404"));
        submit0.setDestAddress(new Address((byte) 0x01, (byte) 0x01, "44555519205"));
        submit0.setShortMessage(textBytes);

        SubmitSmResp submitResp = session0.submit(submit0, 10000);

        logger.info("Got messageId: {}", submitResp.getMessageId());

        System.out.println("Press any key to send query #1");
        System.in.read();

        QuerySm query0 = new QuerySm();
        query0.setMessageId(submitResp.getMessageId());
        query0.setSourceAddress(new Address((byte) 0x03, (byte) 0x00, "40404"));

        WindowFuture<Integer, PduRequest, PduResponse> future1 = session0.sendRequestPdu(query0, 10000, true);
        while (!future1.isDone()) {
        }
        QuerySmResp queryResp = (QuerySmResp) future1.getResponse();

        System.out.println("Press any key to send cancel #1");
        System.in.read();

        CancelSm cancel0 = new CancelSm();
        cancel0.setMessageId(submitResp.getMessageId());
        cancel0.setSourceAddress(new Address((byte) 0x03, (byte) 0x00, "40404"));
        cancel0.setDestAddress(new Address((byte) 0x01, (byte) 0x01, "44555519205"));
        WindowFuture<Integer, PduRequest, PduResponse> future2 = session0.sendRequestPdu(cancel0, 10000, true);
        while (!future2.isDone()) {
        }
        CancelSmResp cancelResp = (CancelSmResp) future2.getResponse();

        logger.info("sendWindow.size: {}", session0.getSendWindow().getSize());

        System.out.println("Press any key to unbind and close sessions");
        System.in.read();

        session0.unbind(5000);
    } catch (Exception e) {
        logger.error("", e);
    }

    if (session0 != null) {
        logger.info("Cleaning up session... (final counters)");
        if (session0.hasCounters()) {
            logger.info("tx-enquireLink: {}", session0.getCounters().getTxEnquireLink());
            logger.info("tx-submitSM: {}", session0.getCounters().getTxSubmitSM());
            logger.info("tx-deliverSM: {}", session0.getCounters().getTxDeliverSM());
            logger.info("tx-dataSM: {}", session0.getCounters().getTxDataSM());
            logger.info("rx-enquireLink: {}", session0.getCounters().getRxEnquireLink());
            logger.info("rx-submitSM: {}", session0.getCounters().getRxSubmitSM());
            logger.info("rx-deliverSM: {}", session0.getCounters().getRxDeliverSM());
            logger.info("rx-dataSM: {}", session0.getCounters().getRxDataSM());
        }

        session0.destroy();
        // alternatively, could call close(), get outstanding requests from
        // the sendWindow (if we wanted to retry them later), then call shutdown()
    }

    // this is required to not causing server to hang from non-daemon threads
    // this also makes sure all open Channels are closed to I *think*
    logger.info("Shutting down client bootstrap and executors...");
    clientBootstrap.destroy();
    monitorExecutor.shutdownNow();

    logger.info("Done. Exiting");
}