Example usage for io.netty.buffer ByteBuf readBytes

List of usage examples for io.netty.buffer ByteBuf readBytes

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf readBytes.

Prototype

public abstract ByteBuf readBytes(ByteBuffer dst);

Source Link

Document

Transfers this buffer's data to the specified destination starting at the current readerIndex until the destination's position reaches its limit, and increases the readerIndex by the number of the transferred bytes.

Usage

From source file:com.linkedin.pinot.broker.requesthandler.BrokerRequestHandler.java

License:Apache License

/**
 * Deserialize the server responses, put the de-serialized data table into the data table map passed in, append
 * processing exceptions to the processing exception list passed in.
 * <p>For hybrid use case, multiple responses might be from the same instance. Use response sequence to distinguish
 * them.// w w w . ja  v  a2  s .co  m
 *
 * @param responseMap map from server to response.
 * @param isOfflineTable whether the responses are from an OFFLINE table.
 * @param dataTableMap map from server to data table.
 * @param tableName table name.
 * @param processingExceptions list of processing exceptions.
 */
private void deserializeServerResponses(@Nonnull Map<ServerInstance, ByteBuf> responseMap,
        boolean isOfflineTable, @Nonnull Map<ServerInstance, DataTable> dataTableMap, @Nonnull String tableName,
        @Nonnull List<ProcessingException> processingExceptions) {
    for (Entry<ServerInstance, ByteBuf> entry : responseMap.entrySet()) {
        ServerInstance serverInstance = entry.getKey();
        if (!isOfflineTable) {
            serverInstance = new ServerInstance(serverInstance.getHostname(), serverInstance.getPort(), 1);
        }
        ByteBuf byteBuf = entry.getValue();
        try {
            byte[] byteArray = new byte[byteBuf.readableBytes()];
            byteBuf.readBytes(byteArray);
            dataTableMap.put(serverInstance, DataTableFactory.getDataTable(byteArray));
        } catch (Exception e) {
            LOGGER.error("Caught exceptions while deserializing response for table: {} from server: {}",
                    tableName, serverInstance, e);
            _brokerMetrics.addMeteredTableValue(tableName, BrokerMeter.DATA_TABLE_DESERIALIZATION_EXCEPTIONS,
                    1);
            processingExceptions
                    .add(QueryException.getException(QueryException.DATA_TABLE_DESERIALIZATION_ERROR, e));
        }
    }
}

From source file:com.linkedin.pinot.requestHandler.BrokerRequestHandler.java

License:Apache License

private Object getDataTableFromBrokerRequest(final BrokerRequest request,
        BucketingSelection overriddenSelection) throws InterruptedException {
    // Step1/*  w  ww .  jav a 2 s. com*/
    final long routingStartTime = System.nanoTime();
    RoutingTableLookupRequest rtRequest = new RoutingTableLookupRequest(
            request.getQuerySource().getTableName());
    Map<ServerInstance, SegmentIdSet> segmentServices = _routingTable.findServers(rtRequest);
    if (segmentServices == null || segmentServices.isEmpty()) {
        LOGGER.warn("Not found ServerInstances to Segments Mapping:");
        return BrokerResponse.getEmptyBrokerResponse();
    }

    final long queryRoutingTime = System.nanoTime() - routingStartTime;
    _brokerMetrics.addPhaseTiming(request, BrokerQueryPhase.QUERY_ROUTING, queryRoutingTime);

    // Step 2-4
    final long scatterGatherStartTime = System.nanoTime();
    ScatterGatherRequestImpl scatterRequest = new ScatterGatherRequestImpl(request, segmentServices,
            _replicaSelection, ReplicaSelectionGranularity.SEGMENT_ID_SET, request.getBucketHashKey(), 0, //TODO: Speculative Requests not yet supported
            overriddenSelection, _requestIdGen.incrementAndGet(), _brokerTimeOut);
    CompositeFuture<ServerInstance, ByteBuf> response = _scatterGatherer.scatterGather(scatterRequest);

    //Step 5 - Deserialize Responses and build instance response map
    final Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
    {
        Map<ServerInstance, ByteBuf> responses = null;
        try {
            responses = response.get();
        } catch (ExecutionException e) {
            LOGGER.warn("Caught exception while fetching response", e);
            _brokerMetrics.addMeteredValue(request, BrokerMeter.REQUEST_FETCH_EXCEPTIONS, 1);
        }

        final long scatterGatherTime = System.nanoTime() - scatterGatherStartTime;
        _brokerMetrics.addPhaseTiming(request, BrokerQueryPhase.SCATTER_GATHER, scatterGatherTime);

        final long deserializationStartTime = System.nanoTime();

        Map<ServerInstance, Throwable> errors = response.getError();

        if (null != responses) {
            for (Entry<ServerInstance, ByteBuf> e : responses.entrySet()) {
                try {
                    ByteBuf b = e.getValue();
                    byte[] b2 = new byte[b.readableBytes()];
                    if (b2 == null || b2.length == 0) {
                        continue;
                    }
                    b.readBytes(b2);
                    DataTable r2 = new DataTable(b2);
                    if (errors != null && errors.containsKey(e.getKey())) {
                        Throwable throwable = errors.get(e.getKey());
                        r2.getMetadata().put("exception", new RequestProcessingException(throwable).toString());
                        _brokerMetrics.addMeteredValue(request, BrokerMeter.REQUEST_FETCH_EXCEPTIONS, 1);
                    }
                    instanceResponseMap.put(e.getKey(), r2);
                } catch (Exception ex) {
                    LOGGER.error("Got exceptions in collect query result for instance " + e.getKey()
                            + ", error: " + ex.getMessage(), ex);
                    _brokerMetrics.addMeteredValue(request, BrokerMeter.REQUEST_DESERIALIZATION_EXCEPTIONS, 1);
                }
            }
        }
        final long deserializationTime = System.nanoTime() - deserializationStartTime;
        _brokerMetrics.addPhaseTiming(request, BrokerQueryPhase.DESERIALIZATION, deserializationTime);
    }

    // Step 6 : Do the reduce and return
    try {
        return _brokerMetrics.timePhase(request, BrokerQueryPhase.REDUCE, new Callable<BrokerResponse>() {
            @Override
            public BrokerResponse call() {
                BrokerResponse returnValue = _reduceService.reduceOnDataTable(request, instanceResponseMap);
                _brokerMetrics.addMeteredValue(request, BrokerMeter.DOCUMENTS_SCANNED,
                        returnValue.getNumDocsScanned());
                return returnValue;
            }
        });
    } catch (Exception e) {
        // Shouldn't happen, this is only here because timePhase() can throw a checked exception, even though the nested callable can't.
        LOGGER.error("Caught exception while processing return", e);
        Utils.rethrowException(e);
        throw new AssertionError("Should not reach this");
    }
}

From source file:com.linkedin.pinot.requestHandler.BrokerRequestHandler.java

License:Apache License

private Object getDataTableFromBrokerRequestList(final BrokerRequest federatedBrokerRequest,
        final List<BrokerRequest> requests, BucketingSelection overriddenSelection)
        throws InterruptedException {
    // Step1/*w  w  w. j a  v a 2s.c  o m*/
    long scatterGatherStartTime = System.nanoTime();
    long queryRoutingTime = 0;
    Map<BrokerRequest, CompositeFuture<ServerInstance, ByteBuf>> responseFuturesList = new HashMap<BrokerRequest, CompositeFuture<ServerInstance, ByteBuf>>();
    for (BrokerRequest request : requests) {
        final long routingStartTime = System.nanoTime();
        RoutingTableLookupRequest rtRequest = new RoutingTableLookupRequest(
                request.getQuerySource().getTableName());
        Map<ServerInstance, SegmentIdSet> segmentServices = _routingTable.findServers(rtRequest);
        if (segmentServices == null || segmentServices.isEmpty()) {
            LOGGER.info(
                    "Not found ServerInstances to Segments Mapping for Table - " + rtRequest.getTableName());
            continue;
        }
        LOGGER.debug("Find ServerInstances to Segments Mapping for table - " + rtRequest.getTableName());
        for (ServerInstance serverInstance : segmentServices.keySet()) {
            LOGGER.debug(serverInstance + " : " + segmentServices.get(serverInstance));
        }
        queryRoutingTime += System.nanoTime() - routingStartTime;

        // Step 2-4
        scatterGatherStartTime = System.nanoTime();
        ScatterGatherRequestImpl scatterRequest = new ScatterGatherRequestImpl(request, segmentServices,
                _replicaSelection, ReplicaSelectionGranularity.SEGMENT_ID_SET, request.getBucketHashKey(), 0, //TODO: Speculative Requests not yet supported
                overriddenSelection, _requestIdGen.incrementAndGet(), _brokerTimeOut);
        responseFuturesList.put(request, _scatterGatherer.scatterGather(scatterRequest));
    }
    _brokerMetrics.addPhaseTiming(federatedBrokerRequest, BrokerQueryPhase.QUERY_ROUTING, queryRoutingTime);

    long scatterGatherTime = 0;
    long deserializationTime = 0;
    //Step 5 - Deserialize Responses and build instance response map
    final Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
    final AtomicInteger responseSeq = new AtomicInteger(-1);
    {
        for (BrokerRequest request : responseFuturesList.keySet()) {
            CompositeFuture<ServerInstance, ByteBuf> response = responseFuturesList.get(request);

            Map<ServerInstance, ByteBuf> responses = null;
            try {
                responses = response.get();
            } catch (ExecutionException e) {
                LOGGER.warn("Caught exception while fetching response", e);
                _brokerMetrics.addMeteredValue(federatedBrokerRequest, BrokerMeter.REQUEST_FETCH_EXCEPTIONS, 1);
            }

            scatterGatherTime += System.nanoTime() - scatterGatherStartTime;

            final long deserializationStartTime = System.nanoTime();

            Map<ServerInstance, Throwable> errors = response.getError();

            if (null != responses) {
                for (Entry<ServerInstance, ByteBuf> e : responses.entrySet()) {
                    try {
                        ByteBuf b = e.getValue();
                        byte[] b2 = new byte[b.readableBytes()];
                        if (b2 == null || b2.length == 0) {
                            continue;
                        }
                        b.readBytes(b2);
                        DataTable r2 = new DataTable(b2);
                        // Hybrid requests may get response from same instance, so we need to distinguish them.
                        ServerInstance decoratedServerInstance = new ServerInstance(e.getKey().getHostname(),
                                e.getKey().getPort(), responseSeq.incrementAndGet());
                        if (errors != null && errors.containsKey(e.getKey())) {
                            Throwable throwable = errors.get(e.getKey());
                            if (throwable != null) {
                                r2.getMetadata().put("exception",
                                        new RequestProcessingException(throwable).toString());
                                _brokerMetrics.addMeteredValue(federatedBrokerRequest,
                                        BrokerMeter.REQUEST_FETCH_EXCEPTIONS, 1);
                            }
                        }
                        instanceResponseMap.put(decoratedServerInstance, r2);
                    } catch (Exception ex) {
                        LOGGER.error("Got exceptions in collect query result for instance " + e.getKey()
                                + ", error: " + ex.getMessage(), ex);
                        _brokerMetrics.addMeteredValue(federatedBrokerRequest,
                                BrokerMeter.REQUEST_DESERIALIZATION_EXCEPTIONS, 1);
                    }
                }
            }
            deserializationTime += System.nanoTime() - deserializationStartTime;
        }
    }
    _brokerMetrics.addPhaseTiming(federatedBrokerRequest, BrokerQueryPhase.SCATTER_GATHER, scatterGatherTime);
    _brokerMetrics.addPhaseTiming(federatedBrokerRequest, BrokerQueryPhase.DESERIALIZATION,
            deserializationTime);

    // Step 6 : Do the reduce and return
    try {
        return _brokerMetrics.timePhase(federatedBrokerRequest, BrokerQueryPhase.REDUCE,
                new Callable<BrokerResponse>() {
                    @Override
                    public BrokerResponse call() {
                        BrokerResponse returnValue = _reduceService.reduceOnDataTable(federatedBrokerRequest,
                                instanceResponseMap);
                        _brokerMetrics.addMeteredValue(federatedBrokerRequest, BrokerMeter.DOCUMENTS_SCANNED,
                                returnValue.getNumDocsScanned());
                        return returnValue;
                    }
                });
    } catch (Exception e) {
        // Shouldn't happen, this is only here because timePhase() can throw a checked exception, even though the nested callable can't.
        LOGGER.error("Caught exception while processing query", e);
        Utils.rethrowException(e);
        throw new AssertionError("Should not reach this");
    }
}

From source file:com.linkedin.pinot.server.request.ScheduledRequestHandler.java

License:Apache License

@Override
public ListenableFuture<byte[]> processRequest(ChannelHandlerContext channelHandlerContext, ByteBuf request) {
    final long queryStartTimeNs = System.nanoTime();
    serverMetrics.addMeteredGlobalValue(ServerMeter.QUERIES, 1);

    LOGGER.debug("Processing request : {}", request);

    byte[] byteArray = new byte[request.readableBytes()];
    request.readBytes(byteArray);
    SerDe serDe = new SerDe(new TCompactProtocol.Factory());
    final InstanceRequest instanceRequest = new InstanceRequest();

    if (!serDe.deserialize(instanceRequest, byteArray)) {
        LOGGER.error("Failed to deserialize query request from broker ip: {}",
                ((InetSocketAddress) channelHandlerContext.channel().remoteAddress()).getAddress()
                        .getHostAddress());
        DataTable result = new DataTableImplV2();
        result.addException(QueryException.INTERNAL_ERROR);
        serverMetrics.addMeteredGlobalValue(ServerMeter.REQUEST_DESERIALIZATION_EXCEPTIONS, 1);
        QueryRequest queryRequest = new QueryRequest(null, serverMetrics);
        queryRequest.getTimerContext().setQueryArrivalTimeNs(queryStartTimeNs);
        return Futures.immediateFuture(serializeDataTable(queryRequest, result));
    }//from   w w w.  j ava  2s. c o  m
    final QueryRequest queryRequest = new QueryRequest(instanceRequest, serverMetrics);
    final TimerContext timerContext = queryRequest.getTimerContext();
    timerContext.setQueryArrivalTimeNs(queryStartTimeNs);
    TimerContext.Timer deserializationTimer = timerContext
            .startNewPhaseTimerAtNs(ServerQueryPhase.REQUEST_DESERIALIZATION, queryStartTimeNs);
    deserializationTimer.stopAndRecord();

    LOGGER.debug("Processing requestId:{},request={}", instanceRequest.getRequestId(), instanceRequest);

    ListenableFuture<DataTable> queryTask = queryScheduler.submit(queryRequest);

    // following future will provide default response in case of uncaught
    // exceptions from query processing
    ListenableFuture<DataTable> queryResponse = Futures.catching(queryTask, Throwable.class,
            new Function<Throwable, DataTable>() {
                @Nullable
                @Override
                public DataTable apply(@Nullable Throwable input) {
                    // this is called iff queryTask fails with unhandled exception
                    serverMetrics.addMeteredGlobalValue(ServerMeter.UNCAUGHT_EXCEPTIONS, 1);
                    DataTable result = new DataTableImplV2();
                    result.addException(QueryException.INTERNAL_ERROR);
                    return result;
                }
            });

    // transform the DataTable to serialized byte[] to send back to broker
    ListenableFuture<byte[]> serializedQueryResponse = Futures.transform(queryResponse,
            new Function<DataTable, byte[]>() {
                @Nullable
                @Override
                public byte[] apply(@Nullable DataTable instanceResponse) {
                    byte[] responseData = serializeDataTable(queryRequest, instanceResponse);
                    LOGGER.info(
                            "Processed requestId {},reqSegments={},prunedToSegmentCount={},deserTimeMs={},planTimeMs={},planExecTimeMs={},totalExecMs={},serTimeMs={}TotalTimeMs={},broker={}",
                            queryRequest.getInstanceRequest().getRequestId(),
                            queryRequest.getInstanceRequest().getSearchSegments().size(),
                            queryRequest.getSegmentCountAfterPruning(),
                            timerContext.getPhaseDurationMs(ServerQueryPhase.REQUEST_DESERIALIZATION),
                            timerContext.getPhaseDurationMs(ServerQueryPhase.BUILD_QUERY_PLAN),
                            timerContext.getPhaseDurationMs(ServerQueryPhase.QUERY_PLAN_EXECUTION),
                            timerContext.getPhaseDurationMs(ServerQueryPhase.QUERY_PROCESSING),
                            timerContext.getPhaseDurationMs(ServerQueryPhase.RESPONSE_SERIALIZATION),
                            timerContext.getPhaseDurationMs(ServerQueryPhase.TOTAL_QUERY_TIME),
                            queryRequest.getBrokerId());
                    return responseData;
                }
            });

    return serializedQueryResponse;
}

From source file:com.linkedin.pinot.server.request.SimpleRequestHandler.java

License:Apache License

@Override
public byte[] processRequest(ByteBuf request) {

    long queryStartTime = System.nanoTime();
    _serverMetrics.addMeteredValue(null, ServerMeter.QUERIES, 1);

    LOGGER.debug("processing request : {}", request);

    DataTable instanceResponse = null;/*  www.j a va2 s  .c o m*/

    byte[] byteArray = new byte[request.readableBytes()];
    request.readBytes(byteArray);
    SerDe serDe = new SerDe(new TCompactProtocol.Factory());
    BrokerRequest brokerRequest = null;
    try {
        final InstanceRequest queryRequest = new InstanceRequest();
        serDe.deserialize(queryRequest, byteArray);
        long deserRequestTime = System.nanoTime();
        _serverMetrics.addPhaseTiming(brokerRequest, ServerQueryPhase.TOTAL_QUERY_TIME,
                deserRequestTime - queryStartTime);
        LOGGER.info("instance request : {}", queryRequest);
        brokerRequest = queryRequest.getQuery();

        long startTime = System.nanoTime();
        instanceResponse = _queryExecutor.processQuery(queryRequest);
        long totalNanos = System.nanoTime() - startTime;
        _serverMetrics.addPhaseTiming(brokerRequest, ServerQueryPhase.QUERY_PROCESSING, totalNanos);
    } catch (Exception e) {
        LOGGER.error("Got exception while processing request. Returning error response", e);
        _serverMetrics.addMeteredValue(null, ServerMeter.UNCAUGHT_EXCEPTIONS, 1);
        DataTableBuilder dataTableBuilder = new DataTableBuilder(null);
        List<ProcessingException> exceptions = new ArrayList<ProcessingException>();
        ProcessingException exception = QueryException.INTERNAL_ERROR.deepCopy();
        exception.setMessage(e.getMessage());
        exceptions.add(exception);
        instanceResponse = dataTableBuilder.buildExceptions();
    }

    byte[] responseByte;
    long serializationStartTime = System.nanoTime();
    try {
        if (instanceResponse == null) {
            LOGGER.warn("Instance response is null.");
            responseByte = new byte[0];
        } else {
            responseByte = instanceResponse.toBytes();
        }
    } catch (Exception e) {
        _serverMetrics.addMeteredValue(null, ServerMeter.RESPONSE_SERIALIZATION_EXCEPTIONS, 1);
        LOGGER.error("Got exception while serializing response.", e);
        responseByte = null;
    }
    long serializationEndTime = System.nanoTime();
    _serverMetrics.addPhaseTiming(brokerRequest, ServerQueryPhase.RESPONSE_SERIALIZATION,
            serializationEndTime - serializationStartTime);
    _serverMetrics.addPhaseTiming(brokerRequest, ServerQueryPhase.TOTAL_QUERY_TIME,
            serializationEndTime - queryStartTime);
    return responseByte;
}

From source file:com.linkedin.pinot.transport.netty.NettySingleConnectionIntegrationTest.java

License:Apache License

@Test
/**/*w  ww .  j  ava  2  s .  c  om*/
 * Test Single small request response
 * @throws Exception
 */
public void testSingleSmallRequestResponse() throws Exception {
    NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
    Timer timer = new HashedWheelTimer();

    String response = "dummy response";
    int port = 9089;
    MyRequestHandler handler = new MyRequestHandler(response, null);
    MyRequestHandlerFactory handlerFactory = new MyRequestHandlerFactory(handler);
    NettyTCPServer serverConn = new NettyTCPServer(port, handlerFactory, null);
    Thread serverThread = new Thread(serverConn, "ServerMain");
    serverThread.start();
    Thread.sleep(1000);
    ServerInstance server = new ServerInstance("localhost", port);
    EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
    NettyTCPClientConnection clientConn = new NettyTCPClientConnection(server, eventLoopGroup, timer, metric);
    try {
        LOGGER.info("About to connect the client !!");
        boolean connected = clientConn.connect();
        LOGGER.info("Client connected !!");
        Assert.assertTrue(connected, "connected");
        Thread.sleep(1000);
        String request = "dummy request";
        LOGGER.info("Sending the request !!");
        ResponseFuture serverRespFuture = clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()), 1L,
                5000L);
        LOGGER.info("Request  sent !!");
        ByteBuf serverResp = serverRespFuture.getOne();
        byte[] b2 = new byte[serverResp.readableBytes()];
        serverResp.readBytes(b2);
        String gotResponse = new String(b2);
        Assert.assertEquals(gotResponse, response, "Response Check at client");
        Assert.assertEquals(handler.getRequest(), request, "Request Check at server");
        System.out.println(metric);
    } finally {
        if (null != clientConn) {
            clientConn.close();
        }

        if (null != serverConn) {
            serverConn.shutdownGracefully();
        }
    }
}

From source file:com.linkedin.pinot.transport.netty.NettySingleConnectionIntegrationTest.java

License:Apache License

@Test
public void testConcurrentRequestDispatchError() throws Exception {
    NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
    String response = "dummy response";
    int port = 9089;
    CountDownLatch latch = new CountDownLatch(1);
    MyRequestHandler handler = new MyRequestHandler(response, latch);
    MyRequestHandlerFactory handlerFactory = new MyRequestHandlerFactory(handler);
    NettyTCPServer serverConn = new NettyTCPServer(port, handlerFactory, null);
    Thread serverThread = new Thread(serverConn, "ServerMain");
    serverThread.start();//w w  w. j  a v a 2 s .c  om
    Thread.sleep(1000);
    ServerInstance server = new ServerInstance("localhost", port);
    EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
    NettyTCPClientConnection clientConn = new NettyTCPClientConnection(server, eventLoopGroup,
            new HashedWheelTimer(), metric);
    LOGGER.info("About to connect the client !!");
    boolean connected = clientConn.connect();
    LOGGER.info("Client connected !!");
    Assert.assertTrue(connected, "connected");
    Thread.sleep(1000);
    String request = "dummy request";
    LOGGER.info("Sending the request !!");
    ResponseFuture serverRespFuture = clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()), 1L,
            5000L);
    boolean gotException = false;
    try {
        clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()), 1L, 5000L);
    } catch (IllegalStateException ex) {
        gotException = true;
        // Second request should have failed.
        LOGGER.info("got exception ", ex);
    }
    latch.countDown();
    ByteBuf serverResp = serverRespFuture.getOne();
    byte[] b2 = new byte[serverResp.readableBytes()];
    serverResp.readBytes(b2);
    String gotResponse = new String(b2);
    Assert.assertEquals(gotResponse, response, "Response Check at client");
    Assert.assertEquals(handler.getRequest(), request, "Request Check at server");
    clientConn.close();
    serverConn.shutdownGracefully();
    Assert.assertTrue(gotException, "GotException ");
}

From source file:com.linkedin.pinot.transport.netty.NettySingleConnectionIntegrationTest.java

License:Apache License

@Test
/**/* w w  w. j av a2s. c om*/
 * Test Single Large  ( 2 MB) request response
 * @throws Exception
 */
public void testSingleLargeRequestResponse() throws Exception {
    NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
    String response_prefix = "response_";
    String response = generatePayload(response_prefix, 1024 * 1024 * 2);
    int port = 9089;
    MyRequestHandler handler = new MyRequestHandler(response, null);
    MyRequestHandlerFactory handlerFactory = new MyRequestHandlerFactory(handler);
    NettyTCPServer serverConn = new NettyTCPServer(port, handlerFactory, null);
    Thread serverThread = new Thread(serverConn, "ServerMain");
    serverThread.start();
    Thread.sleep(1000);
    ServerInstance server = new ServerInstance("localhost", port);
    EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
    NettyTCPClientConnection clientConn = new NettyTCPClientConnection(server, eventLoopGroup,
            new HashedWheelTimer(), metric);
    try {
        LOGGER.info("About to connect the client !!");
        boolean connected = clientConn.connect();
        LOGGER.info("Client connected !!");
        Assert.assertTrue(connected, "connected");
        Thread.sleep(1000);
        String request_prefix = "request_";
        String request = generatePayload(request_prefix, 1024 * 1024 * 2);
        LOGGER.info("Sending the request !!");
        ResponseFuture serverRespFuture = clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()), 1L,
                5000L);
        LOGGER.info("Request  sent !!");
        ByteBuf serverResp = serverRespFuture.getOne();
        byte[] b2 = new byte[serverResp.readableBytes()];
        serverResp.readBytes(b2);
        String gotResponse = new String(b2);
        Assert.assertEquals(gotResponse, response, "Response Check at client");
        Assert.assertEquals(handler.getRequest(), request, "Request Check at server");
    } finally {
        if (null != clientConn) {
            clientConn.close();
        }
        if (null != serverConn) {
            serverConn.shutdownGracefully();
        }
    }
}

From source file:com.linkedin.pinot.transport.netty.NettySingleConnectionIntegrationTest.java

License:Apache License

@Test
/**//from  w ww .j a v a 2 s  .c  o m
 * Send 10K small sized request in sequence. Verify each request and response.
 * @throws Exception
 */
public void test10KSmallRequestResponses() throws Exception {
    NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
    int port = 9089;
    MyRequestHandler handler = new MyRequestHandler(null, null);
    MyRequestHandlerFactory handlerFactory = new MyRequestHandlerFactory(handler);
    NettyTCPServer serverConn = new NettyTCPServer(port, handlerFactory, null);
    Thread serverThread = new Thread(serverConn, "ServerMain");
    serverThread.start();
    Thread.sleep(1000);
    ServerInstance server = new ServerInstance("localhost", port);
    EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
    NettyTCPClientConnection clientConn = new NettyTCPClientConnection(server, eventLoopGroup,
            new HashedWheelTimer(), metric);
    try {
        LOGGER.info("About to connect the client !!");
        boolean connected = clientConn.connect();
        LOGGER.info("Client connected !!");
        Assert.assertTrue(connected, "connected");
        Thread.sleep(1000);
        for (int i = 0; i < 10000; i++) {
            String request = "dummy request :" + i;
            String response = "dummy response :" + i;
            handler.setResponse(response);
            LOGGER.info("Sending the request (" + request + ")");
            ResponseFuture serverRespFuture = clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()),
                    1L, 5000L);
            LOGGER.info("Request  sent !!");
            ByteBuf serverResp = serverRespFuture.getOne();
            if (null == serverResp) {
                LOGGER.error("Got unexpected error while trying to get response.", serverRespFuture.getError());
            }

            byte[] b2 = new byte[serverResp.readableBytes()];
            serverResp.readBytes(b2);
            String gotResponse = new String(b2);
            Assert.assertEquals(gotResponse, response, "Response Check at client");
            Assert.assertEquals(handler.getRequest(), request, "Request Check at server");
        }
    } finally {
        if (null != clientConn) {
            clientConn.close();
        }

        if (null != serverConn) {
            serverConn.shutdownGracefully();
        }
    }
}

From source file:com.linkedin.pinot.transport.netty.NettySingleConnectionIntegrationTest.java

License:Apache License

/**
 * Send 100 large ( 2MB) sized request in sequence. Verify each request and response.
 * @throws Exception/*ww  w.j a  va2s.  c  o  m*/
 */
//@Test
public void test100LargeRequestResponses() throws Exception {
    NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
    int port = 9089;
    MyRequestHandler handler = new MyRequestHandler(null, null);
    MyRequestHandlerFactory handlerFactory = new MyRequestHandlerFactory(handler);
    NettyTCPServer serverConn = new NettyTCPServer(port, handlerFactory, null);
    Thread serverThread = new Thread(serverConn, "ServerMain");
    serverThread.start();
    Thread.sleep(1000);
    ServerInstance server = new ServerInstance("localhost", port);
    EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
    NettyTCPClientConnection clientConn = new NettyTCPClientConnection(server, eventLoopGroup,
            new HashedWheelTimer(), metric);
    LOGGER.info("About to connect the client !!");
    boolean connected = clientConn.connect();
    LOGGER.info("Client connected !!");
    Assert.assertTrue(connected, "connected");
    Thread.sleep(1000);
    try {
        for (int i = 0; i < 100; i++) {
            String request_prefix = "request_";
            String request = generatePayload(request_prefix, 1024 * 1024 * 20);
            String response_prefix = "response_";
            String response = generatePayload(response_prefix, 1024 * 1024 * 20);
            handler.setResponse(response);
            //LOG.info("Sending the request (" + request + ")");
            ResponseFuture serverRespFuture = clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()),
                    1L, 5000L);
            //LOG.info("Request  sent !!");
            ByteBuf serverResp = serverRespFuture.getOne();
            byte[] b2 = new byte[serverResp.readableBytes()];
            serverResp.readBytes(b2);
            String gotResponse = new String(b2);
            Assert.assertEquals(gotResponse, response, "Response Check at client");
            Assert.assertEquals(handler.getRequest(), request, "Request Check at server");
        }
    } finally {
        if (null != clientConn) {
            clientConn.close();
        }

        if (null != serverConn) {
            serverConn.shutdownGracefully();
        }
    }
}