List of usage examples for io.netty.channel ChannelFuture cause
Throwable cause();
From source file:com.github.sparkfy.network.server.TransportRequestHandler.java
License:Apache License
/** * Responds to a single message with some Encodable object. If a failure occurs while sending, * it will be logged and the channel closed. *//*w w w .j a va 2 s . c om*/ private void respond(final Encodable result) { final String remoteAddress = channel.remoteAddress().toString(); channel.writeAndFlush(result).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { logger.trace(String.format("Sent result %s to client %s", result, remoteAddress)); } else { logger.error(String.format("Error sending result %s to %s; closing connection", result, remoteAddress), future.cause()); channel.close(); } } }); }
From source file:com.googlecode.protobuf.pro.duplex.client.DuplexTcpClientPipelineFactory.java
License:Apache License
/** * Creates a new client with the provided channel attributes to the remoteAddress. * @param remoteAddress/*from w ww. j a v a 2 s . co m*/ * @param bootstrap * @param attributes * @return * @throws IOException */ public RpcClient peerWith(InetSocketAddress remoteAddress, Bootstrap bootstrap, Map<String, Object> attributes) throws IOException { if (remoteAddress == null) { throw new NullPointerException("remotedAddress"); } InetSocketAddress localAddress = null; if (clientInfo.getHostName() != null) { localAddress = new InetSocketAddress(clientInfo.getHostName(), clientInfo.getPort()); } ChannelFuture connectFuture = bootstrap.connect(remoteAddress, localAddress).awaitUninterruptibly(); if (!connectFuture.isSuccess()) { throw new IOException("Failed to connect to " + remoteAddress, connectFuture.cause()); } Channel channel = connectFuture.channel(); InetSocketAddress connectedAddress = (InetSocketAddress) channel.localAddress(); PeerInfo effectiveClientInfo = new PeerInfo( clientInfo.getHostName() == null ? connectedAddress.getHostName() : clientInfo.getHostName(), connectedAddress.getPort(), clientInfo.getPid()); ConnectRequest connectRequest = ConnectRequest.newBuilder() .setClientHostName(effectiveClientInfo.getHostName()).setClientPort(effectiveClientInfo.getPort()) .setClientPID(effectiveClientInfo.getPid()).setCorrelationId(correlationId.incrementAndGet()) .setCompress(isCompression()).build(); WirePayload payload = WirePayload.newBuilder().setConnectRequest(connectRequest).build(); if (log.isDebugEnabled()) { log.debug("Sending [" + connectRequest.getCorrelationId() + "]ConnectRequest."); } channel.writeAndFlush(payload); ClientConnectResponseHandler connectResponseHandler = (ClientConnectResponseHandler) channel.pipeline() .get(Handler.CLIENT_CONNECT); if (connectResponseHandler == null) { throw new IllegalStateException("No connectReponse handler in channel pipeline."); } ConnectResponse connectResponse = connectResponseHandler.getConnectResponse(connectResponseTimeoutMillis); if (connectResponse == null) { connectFuture.channel().close().awaitUninterruptibly(); throw new IOException( "No Channel response received before " + connectResponseTimeoutMillis + " millis timeout."); } if (connectResponse.hasErrorCode()) { connectFuture.channel().close().awaitUninterruptibly(); throw new IOException( "DuplexTcpServer CONNECT_RESPONSE indicated error " + connectResponse.getErrorCode()); } if (!connectResponse.hasCorrelationId()) { connectFuture.channel().close().awaitUninterruptibly(); throw new IOException("DuplexTcpServer CONNECT_RESPONSE missing correlationId."); } if (connectResponse.getCorrelationId() != connectRequest.getCorrelationId()) { connectFuture.channel().close().awaitUninterruptibly(); throw new IOException("DuplexTcpServer CONNECT_RESPONSE correlationId mismatch. TcpClient sent " + connectRequest.getCorrelationId() + " received " + connectResponse.getCorrelationId() + " from TcpServer."); } PeerInfo serverInfo = null; if (connectResponse.hasServerPID()) { serverInfo = new PeerInfo(remoteAddress.getHostName(), remoteAddress.getPort(), connectResponse.getServerPID()); } else { serverInfo = new PeerInfo(remoteAddress.getHostName(), remoteAddress.getPort()); } RpcClient rpcClient = new RpcClient(channel, effectiveClientInfo, serverInfo, connectResponse.getCompress(), getRpcLogger(), getExtensionRegistry()); if (attributes != null) { // transfer the input attributes to the channel before we state it's opened. for (Entry<String, Object> attr : attributes.entrySet()) { rpcClient.setAttribute(attr.getKey(), attr.getValue()); } } RpcClientHandler rpcClientHandler = completePipeline(rpcClient); rpcClientHandler.notifyOpened(); // register the rpcClient in the RpcClientRegistry if (!getRpcClientRegistry().registerRpcClient(rpcClient)) { log.warn("Client RpcClient already registered. Bug??"); } // channels remove themselves when closed. return rpcClient; }
From source file:com.googlecode.protobuf.pro.duplex.example.nonrpc.StatusClient.java
License:Apache License
public static void main(String[] args) throws Exception { if (args.length != 2) { System.err.println("usage: <serverHostname> <serverPort>"); System.exit(-1);/*from w w w.j a v a 2s.co m*/ } String serverHostname = args[0]; int serverPort = Integer.parseInt(args[1]); PeerInfo server = new PeerInfo(serverHostname, serverPort); try { DuplexTcpClientPipelineFactory clientFactory = new DuplexTcpClientPipelineFactory(); clientFactory.setConnectResponseTimeoutMillis(10000); clientFactory.setRpcServerCallExecutor(new ThreadPoolCallExecutor(3, 10)); // RPC payloads are uncompressed when logged - so reduce logging CategoryPerServiceLogger logger = new CategoryPerServiceLogger(); logger.setLogRequestProto(false); logger.setLogResponseProto(false); clientFactory.setRpcLogger(logger); final RpcCallback<PingPong.Status> serverStatusCallback = new RpcCallback<PingPong.Status>() { @Override public void run(PingPong.Status parameter) { log.info("Received " + parameter); } }; // Set up the event pipeline factory. // setup a RPC event listener - it just logs what happens RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier(); final RpcConnectionEventListener listener = new RpcConnectionEventListener() { @Override public void connectionReestablished(RpcClientChannel clientChannel) { log.info("connectionReestablished " + clientChannel); channel = clientChannel; channel.setOobMessageCallback(PingPong.Status.getDefaultInstance(), serverStatusCallback); } @Override public void connectionOpened(RpcClientChannel clientChannel) { log.info("connectionOpened " + clientChannel); channel = clientChannel; channel.setOobMessageCallback(PingPong.Status.getDefaultInstance(), serverStatusCallback); } @Override public void connectionLost(RpcClientChannel clientChannel) { log.info("connectionLost " + clientChannel); } @Override public void connectionChanged(RpcClientChannel clientChannel) { log.info("connectionChanged " + clientChannel); channel = clientChannel; channel.setOobMessageCallback(PingPong.Status.getDefaultInstance(), serverStatusCallback); } }; rpcEventNotifier.addEventListener(listener); clientFactory.registerConnectionEventListener(rpcEventNotifier); Bootstrap bootstrap = new Bootstrap(); EventLoopGroup workers = new NioEventLoopGroup(16, new RenamingThreadFactoryProxy("workers", Executors.defaultThreadFactory())); bootstrap.group(workers); bootstrap.handler(clientFactory); bootstrap.channel(NioSocketChannel.class); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000); bootstrap.option(ChannelOption.SO_SNDBUF, 1048576); bootstrap.option(ChannelOption.SO_RCVBUF, 1048576); RpcClientConnectionWatchdog watchdog = new RpcClientConnectionWatchdog(clientFactory, bootstrap); rpcEventNotifier.addEventListener(watchdog); watchdog.start(); CleanShutdownHandler shutdownHandler = new CleanShutdownHandler(); shutdownHandler.addResource(workers); clientFactory.peerWith(server, bootstrap); while (true && channel != null) { PingPong.Status clientStatus = PingPong.Status.newBuilder() .setMessage("Client " + channel + " OK@" + System.currentTimeMillis()).build(); ChannelFuture oobSend = channel.sendOobMessage(clientStatus); if (!oobSend.isDone()) { log.info("Waiting for completion."); oobSend.syncUninterruptibly(); } if (!oobSend.isSuccess()) { log.warn("OobMessage send failed.", oobSend.cause()); } Thread.sleep(1000); } } finally { System.exit(0); } }
From source file:com.googlecode.protobuf.pro.duplex.example.nonrpc.StatusServer.java
License:Apache License
public static void main(String[] args) throws Exception { if (args.length != 2) { System.err.println("usage: <serverHostname> <serverPort>"); System.exit(-1);/*from w ww. j a v a 2s . com*/ } String serverHostname = args[0]; int serverPort = Integer.parseInt(args[1]); PeerInfo serverInfo = new PeerInfo(serverHostname, serverPort); // RPC payloads are uncompressed when logged - so reduce logging CategoryPerServiceLogger logger = new CategoryPerServiceLogger(); logger.setLogRequestProto(false); logger.setLogResponseProto(false); // Configure the server. DuplexTcpServerPipelineFactory serverFactory = new DuplexTcpServerPipelineFactory(serverInfo); RpcServerCallExecutor rpcExecutor = new ThreadPoolCallExecutor(10, 10); serverFactory.setRpcServerCallExecutor(rpcExecutor); serverFactory.setLogger(logger); final RpcCallback<PingPong.Status> clientStatusCallback = new RpcCallback<PingPong.Status>() { @Override public void run(PingPong.Status parameter) { log.info("Received " + parameter); } }; // setup a RPC event listener - it just logs what happens RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier(); RpcConnectionEventListener listener = new RpcConnectionEventListener() { @Override public void connectionReestablished(RpcClientChannel clientChannel) { log.info("connectionReestablished " + clientChannel); clientChannel.setOobMessageCallback(Status.getDefaultInstance(), clientStatusCallback); } @Override public void connectionOpened(RpcClientChannel clientChannel) { log.info("connectionOpened " + clientChannel); clientChannel.setOobMessageCallback(Status.getDefaultInstance(), clientStatusCallback); } @Override public void connectionLost(RpcClientChannel clientChannel) { log.info("connectionLost " + clientChannel); } @Override public void connectionChanged(RpcClientChannel clientChannel) { log.info("connectionChanged " + clientChannel); clientChannel.setOobMessageCallback(Status.getDefaultInstance(), clientStatusCallback); } }; rpcEventNotifier.setEventListener(listener); serverFactory.registerConnectionEventListener(rpcEventNotifier); ServerBootstrap bootstrap = new ServerBootstrap(); EventLoopGroup boss = new NioEventLoopGroup(2, new RenamingThreadFactoryProxy("boss", Executors.defaultThreadFactory())); EventLoopGroup workers = new NioEventLoopGroup(16, new RenamingThreadFactoryProxy("worker", Executors.defaultThreadFactory())); bootstrap.group(boss, workers); bootstrap.channel(NioServerSocketChannel.class); bootstrap.option(ChannelOption.SO_SNDBUF, 1048576); bootstrap.option(ChannelOption.SO_RCVBUF, 1048576); bootstrap.childOption(ChannelOption.SO_RCVBUF, 1048576); bootstrap.childOption(ChannelOption.SO_SNDBUF, 1048576); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.childHandler(serverFactory); bootstrap.localAddress(serverInfo.getPort()); CleanShutdownHandler shutdownHandler = new CleanShutdownHandler(); shutdownHandler.addResource(boss); shutdownHandler.addResource(workers); shutdownHandler.addResource(rpcExecutor); // Bind and start to accept incoming connections. bootstrap.bind(); log.info("Serving " + bootstrap); while (true) { List<RpcClientChannel> clients = serverFactory.getRpcClientRegistry().getAllClients(); for (RpcClientChannel client : clients) { PingPong.Status serverStatus = PingPong.Status.newBuilder() .setMessage("Server " + serverFactory.getServerInfo() + " OK@" + System.currentTimeMillis()) .build(); ChannelFuture oobSend = client.sendOobMessage(serverStatus); if (!oobSend.isDone()) { log.info("Waiting for completion."); oobSend.syncUninterruptibly(); } if (!oobSend.isSuccess()) { log.warn("OobMessage send failed.", oobSend.cause()); } } log.info("Sleeping 5s before sending serverStatus to all clients."); Thread.sleep(5000); } }
From source file:com.graylog.splunk.output.senders.TCPSender.java
License:Open Source License
protected void createBootstrap(final EventLoopGroup workerGroup) { final Bootstrap bootstrap = new Bootstrap(); final SplunkSenderThread senderThread = new SplunkSenderThread(queue); bootstrap.group(workerGroup).channel(NioSocketChannel.class) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 5000) .remoteAddress(new InetSocketAddress(hostname, port)) .handler(new ChannelInitializer<SocketChannel>() { @Override//from w ww . ja v a 2 s. c o m protected void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(new StringEncoder()); ch.pipeline().addLast(new SimpleChannelInboundHandler<ByteBuf>() { @Override protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { // we only send data, never read on the socket } @Override public void channelActive(ChannelHandlerContext ctx) throws Exception { senderThread.start(ctx.channel()); } @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { LOG.info("Channel disconnected."); senderThread.stop(); scheduleReconnect(ctx.channel().eventLoop()); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { LOG.error("Exception caught", cause); } }); } }); bootstrap.connect().addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { LOG.info("Connected."); } else { LOG.error("Connection failed: {}", future.cause().getMessage()); scheduleReconnect(future.channel().eventLoop()); } } }); }
From source file:com.ibasco.agql.core.transport.NettyTransport.java
License:Open Source License
/** * <p>A method to send data over the transport. Since the current netty version does not yet support {@link * CompletableFuture}, we need to convert the returned {@link ChannelFuture} to it's {@link CompletableFuture} * version.</p>/*w ww. j a v a 2s .co m*/ * * @param channel * The underlying {@link Channel} to be used for data transport. * @param data * An instance of {@link AbstractMessage} that will be sent through the transport * @param flushImmediately * True if transport should immediately flush the message after send. * * @return A {@link CompletableFuture} with return type of {@link Channel} (The channel used for the transport) */ private CompletableFuture<Void> writeToChannel(Channel channel, Msg data, boolean flushImmediately) { final CompletableFuture<Void> writeResultFuture = new CompletableFuture<>(); log.debug("Writing data '{}' to channel : {}", data, channel); final ChannelFuture writeFuture = (flushImmediately) ? channel.writeAndFlush(data) : channel.write(data); writeFuture.addListener((ChannelFuture future) -> { try { if (future.isSuccess()) writeResultFuture.complete(null); else writeResultFuture.completeExceptionally(future.cause()); } finally { cleanupChannel(future.channel()); } }); return writeResultFuture; }
From source file:com.ibasco.agql.core.transport.tcp.NettyBasicTcpTransport.java
License:Open Source License
@Override public CompletableFuture<Channel> getChannel(M address) { final CompletableFuture<Channel> channelFuture = new CompletableFuture<>(); ChannelFuture f = getBootstrap().connect(address.recipient()); //Acquire from pool and listen for completion f.addListener((ChannelFuture future) -> { if (future.isSuccess()) { channelFuture.complete(future.channel()); } else {// ww w.j av a 2 s .co m channelFuture.completeExceptionally(future.cause()); } }); return channelFuture; }
From source file:com.ibasco.agql.core.transport.udp.NettyBasicUdpTransport.java
License:Open Source License
@Override public CompletableFuture<Channel> getChannel(M message) { final CompletableFuture<Channel> cf = new CompletableFuture<>(); //lazy initialization if (channel == null || !channel.isOpen()) { bind(0).addListener((ChannelFuture future) -> { if (future.isSuccess()) { channel = (NioDatagramChannel) future.channel(); channel.closeFuture()//from w w w .ja v a 2 s. c om .addListener((ChannelFuture f) -> log.debug( "CHANNEL CLOSED: {}, Is Open: {}, For Address: {}, Cause: {}", f.channel().id(), f.channel().isOpen(), message.recipient(), f.cause())); cf.complete(channel); } else { channel = null; cf.completeExceptionally(future.cause()); } }); } else { cf.complete(channel); } return cf; }
From source file:com.ict.dtube.broker.processor.PullMessageProcessor.java
License:Apache License
private RemotingCommand processRequest(final Channel channel, RemotingCommand request, boolean brokerAllowSuspend) throws RemotingCommandException { RemotingCommand response = RemotingCommand.createResponseCommand(PullMessageResponseHeader.class); final PullMessageResponseHeader responseHeader = (PullMessageResponseHeader) response.getCustomHeader(); final PullMessageRequestHeader requestHeader = (PullMessageRequestHeader) request .decodeCommandCustomHeader(PullMessageRequestHeader.class); // sendfile?// ww w.j a v a2 s .c om response.setOpaque(request.getOpaque()); if (log.isDebugEnabled()) { log.debug("receive PullMessage request command, " + request); } // Broker?? if (!PermName.isReadable(this.brokerController.getBrokerConfig().getBrokerPermission())) { response.setCode(ResponseCode.NO_PERMISSION); response.setRemark("the broker[" + this.brokerController.getBrokerConfig().getBrokerIP1() + "] pulling message is forbidden"); return response; } // ? SubscriptionGroupConfig subscriptionGroupConfig = this.brokerController.getSubscriptionGroupManager() .findSubscriptionGroupConfig(requestHeader.getConsumerGroup()); if (null == subscriptionGroupConfig) { response.setCode(ResponseCode.SUBSCRIPTION_GROUP_NOT_EXIST); response.setRemark("subscription group not exist, " + requestHeader.getConsumerGroup() + " " + FAQUrl.suggestTodo(FAQUrl.SUBSCRIPTION_GROUP_NOT_EXIST)); return response; } // ??? if (!subscriptionGroupConfig.isConsumeEnable()) { response.setCode(ResponseCode.NO_PERMISSION); response.setRemark("subscription group no permission, " + requestHeader.getConsumerGroup()); return response; } final boolean hasSuspendFlag = PullSysFlag.hasSuspendFlag(requestHeader.getSysFlag()); final boolean hasCommitOffsetFlag = PullSysFlag.hasCommitOffsetFlag(requestHeader.getSysFlag()); final boolean hasSubscriptionFlag = PullSysFlag.hasSubscriptionFlag(requestHeader.getSysFlag()); final long suspendTimeoutMillisLong = hasSuspendFlag ? requestHeader.getSuspendTimeoutMillis() : 0; // topic? TopicConfig topicConfig = this.brokerController.getTopicConfigManager() .selectTopicConfig(requestHeader.getTopic()); if (null == topicConfig) { log.error("the topic " + requestHeader.getTopic() + " not exist, consumer: " + RemotingHelper.parseChannelRemoteAddr(channel)); response.setCode(ResponseCode.TOPIC_NOT_EXIST); response.setRemark("topic[" + requestHeader.getTopic() + "] not exist, apply first please!" + FAQUrl.suggestTodo(FAQUrl.APPLY_TOPIC_URL)); return response; } // topic?? if (!PermName.isReadable(topicConfig.getPerm())) { response.setCode(ResponseCode.NO_PERMISSION); response.setRemark("the topic[" + requestHeader.getTopic() + "] pulling message is forbidden"); return response; } // if (requestHeader.getQueueId() < 0 || requestHeader.getQueueId() >= topicConfig.getReadQueueNums()) { String errorInfo = "queueId[" + requestHeader.getQueueId() + "] is illagal,Topic :" + requestHeader.getTopic() + " topicConfig.readQueueNums: " + topicConfig.getReadQueueNums() + " consumer: " + channel.remoteAddress(); log.warn(errorInfo); response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark(errorInfo); return response; } // ? SubscriptionData subscriptionData = null; if (hasSubscriptionFlag) { try { subscriptionData = FilterAPI.buildSubscriptionData(requestHeader.getConsumerGroup(), requestHeader.getTopic(), requestHeader.getSubscription()); } catch (Exception e) { log.warn("parse the consumer's subscription[{}] failed, group: {}", requestHeader.getSubscription(), // requestHeader.getConsumerGroup()); response.setCode(ResponseCode.SUBSCRIPTION_PARSE_FAILED); response.setRemark("parse the consumer's subscription failed"); return response; } } else { ConsumerGroupInfo consumerGroupInfo = this.brokerController.getConsumerManager() .getConsumerGroupInfo(requestHeader.getConsumerGroup()); if (null == consumerGroupInfo) { log.warn("the consumer's group info not exist, group: {}", requestHeader.getConsumerGroup()); response.setCode(ResponseCode.SUBSCRIPTION_NOT_EXIST); response.setRemark("the consumer's group info not exist" + FAQUrl.suggestTodo(FAQUrl.SAME_GROUP_DIFFERENT_TOPIC)); return response; } if (!subscriptionGroupConfig.isConsumeBroadcastEnable() // && consumerGroupInfo.getMessageModel() == MessageModel.BROADCASTING) { response.setCode(ResponseCode.NO_PERMISSION); response.setRemark("the consumer group[" + requestHeader.getConsumerGroup() + "] can not consume by broadcast way"); return response; } subscriptionData = consumerGroupInfo.findSubscriptionData(requestHeader.getTopic()); if (null == subscriptionData) { log.warn("the consumer's subscription not exist, group: {}", requestHeader.getConsumerGroup()); response.setCode(ResponseCode.SUBSCRIPTION_NOT_EXIST); response.setRemark("the consumer's subscription not exist" + FAQUrl.suggestTodo(FAQUrl.SAME_GROUP_DIFFERENT_TOPIC)); return response; } // Broker? if (subscriptionData.getSubVersion() < requestHeader.getSubVersion()) { log.warn("the broker's subscription is not latest, group: {} {}", requestHeader.getConsumerGroup(), subscriptionData.getSubString()); response.setCode(ResponseCode.SUBSCRIPTION_NOT_LATEST); response.setRemark("the consumer's subscription not latest"); return response; } } final GetMessageResult getMessageResult = this.brokerController.getMessageStore().getMessage( requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getQueueOffset(), requestHeader.getMaxMsgNums(), subscriptionData); if (getMessageResult != null) { response.setRemark(getMessageResult.getStatus().name()); responseHeader.setNextBeginOffset(getMessageResult.getNextBeginOffset()); responseHeader.setMinOffset(getMessageResult.getMinOffset()); responseHeader.setMaxOffset(getMessageResult.getMaxOffset()); // ???? if (getMessageResult.isSuggestPullingFromSlave()) { responseHeader.setSuggestWhichBrokerId(subscriptionGroupConfig.getWhichBrokerWhenConsumeSlowly()); } // ??? else { responseHeader.setSuggestWhichBrokerId(subscriptionGroupConfig.getBrokerId()); } switch (getMessageResult.getStatus()) { case FOUND: response.setCode(ResponseCode.SUCCESS); break; case MESSAGE_WAS_REMOVING: response.setCode(ResponseCode.PULL_RETRY_IMMEDIATELY); break; // ?Offset?0 case NO_MATCHED_LOGIC_QUEUE: case NO_MESSAGE_IN_QUEUE: if (0 != requestHeader.getQueueOffset()) { response.setCode(ResponseCode.PULL_OFFSET_MOVED); // XXX: warn and notify me log.info( "the broker store no queue data, fix the request offset {} to {}, Topic: {} QueueId: {} Consumer Group: {}", // requestHeader.getQueueOffset(), // getMessageResult.getNextBeginOffset(), // requestHeader.getTopic(), // requestHeader.getQueueId(), // requestHeader.getConsumerGroup()// ); } else { response.setCode(ResponseCode.PULL_NOT_FOUND); } break; case NO_MATCHED_MESSAGE: response.setCode(ResponseCode.PULL_RETRY_IMMEDIATELY); break; case OFFSET_FOUND_NULL: response.setCode(ResponseCode.PULL_NOT_FOUND); break; case OFFSET_OVERFLOW_BADLY: response.setCode(ResponseCode.PULL_OFFSET_MOVED); // XXX: warn and notify me log.info("the request offset: " + requestHeader.getQueueOffset() + " over flow badly, broker max offset: " + getMessageResult.getMaxOffset() + ", consumer: " + channel.remoteAddress()); break; case OFFSET_OVERFLOW_ONE: response.setCode(ResponseCode.PULL_NOT_FOUND); break; case OFFSET_TOO_SMALL: response.setCode(ResponseCode.PULL_OFFSET_MOVED); // XXX: warn and notify me log.info("the request offset: " + requestHeader.getQueueOffset() + " too small, broker min offset: " + getMessageResult.getMinOffset() + ", consumer: " + channel.remoteAddress()); break; default: assert false; break; } switch (response.getCode()) { case ResponseCode.SUCCESS: // this.brokerController.getBrokerStatsManager().incGroupGetNums(requestHeader.getConsumerGroup(), requestHeader.getTopic(), getMessageResult.getMessageCount()); this.brokerController.getBrokerStatsManager().incGroupGetSize(requestHeader.getConsumerGroup(), requestHeader.getTopic(), getMessageResult.getBufferTotalSize()); this.brokerController.getBrokerStatsManager().incBrokerGetNums(getMessageResult.getMessageCount()); try { FileRegion fileRegion = new ManyMessageTransfer( response.encodeHeader(getMessageResult.getBufferTotalSize()), getMessageResult); channel.writeAndFlush(fileRegion).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { getMessageResult.release(); if (!future.isSuccess()) { log.error("transfer many message by pagecache failed, " + channel.remoteAddress(), future.cause()); } } }); } catch (Throwable e) { log.error("", e); getMessageResult.release(); } response = null; break; case ResponseCode.PULL_NOT_FOUND: // if (brokerAllowSuspend && hasSuspendFlag) { PullRequest pullRequest = new PullRequest(request, channel, suspendTimeoutMillisLong, this.brokerController.getMessageStore().now(), requestHeader.getQueueOffset()); this.brokerController.getPullRequestHoldService().suspendPullRequest(requestHeader.getTopic(), requestHeader.getQueueId(), pullRequest); response = null; break; } // ?Consumer case ResponseCode.PULL_RETRY_IMMEDIATELY: break; case ResponseCode.PULL_OFFSET_MOVED: MessageQueue mq = new MessageQueue(); mq.setTopic(requestHeader.getTopic()); mq.setQueueId(requestHeader.getQueueId()); mq.setBrokerName(this.brokerController.getBrokerConfig().getBrokerName()); OffsetMovedEvent event = new OffsetMovedEvent(); event.setConsumerGroup(requestHeader.getConsumerGroup()); event.setMessageQueue(mq); event.setOffsetRequest(requestHeader.getQueueOffset()); event.setOffsetNew(getMessageResult.getNextBeginOffset()); this.generateOffsetMovedEvent(event); break; default: assert false; } } else { response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("store getMessage return null"); } // Consumer boolean storeOffsetEnable = brokerAllowSuspend; // storeOffsetEnable = storeOffsetEnable && hasCommitOffsetFlag; // Consumer? storeOffsetEnable = storeOffsetEnable // ?Master?offset && this.brokerController.getMessageStoreConfig().getBrokerRole() != BrokerRole.SLAVE; if (storeOffsetEnable) { this.brokerController.getConsumerOffsetManager().commitOffset(requestHeader.getConsumerGroup(), requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getCommitOffset()); } return response; }
From source file:com.ict.dtube.broker.processor.QueryMessageProcessor.java
License:Apache License
public RemotingCommand queryMessage(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final RemotingCommand response = RemotingCommand.createResponseCommand(QueryMessageResponseHeader.class); final QueryMessageResponseHeader responseHeader = (QueryMessageResponseHeader) response.getCustomHeader(); final QueryMessageRequestHeader requestHeader = (QueryMessageRequestHeader) request .decodeCommandCustomHeader(QueryMessageRequestHeader.class); // sendfile?/*from ww w. j a va 2 s . c o m*/ response.setOpaque(request.getOpaque()); final QueryMessageResult queryMessageResult = this.brokerController.getMessageStore().queryMessage( requestHeader.getTopic(), requestHeader.getKey(), requestHeader.getMaxNum(), requestHeader.getBeginTimestamp(), requestHeader.getEndTimestamp()); assert queryMessageResult != null; responseHeader.setIndexLastUpdatePhyoffset(queryMessageResult.getIndexLastUpdatePhyoffset()); responseHeader.setIndexLastUpdateTimestamp(queryMessageResult.getIndexLastUpdateTimestamp()); // ? if (queryMessageResult.getBufferTotalSize() > 0) { response.setCode(ResponseCode.SUCCESS); response.setRemark(null); try { FileRegion fileRegion = new QueryMessageTransfer( response.encodeHeader(queryMessageResult.getBufferTotalSize()), queryMessageResult); ctx.channel().writeAndFlush(fileRegion).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { queryMessageResult.release(); if (!future.isSuccess()) { log.error("transfer query message by pagecache failed, ", future.cause()); } } }); } catch (Throwable e) { log.error("", e); queryMessageResult.release(); } return null; } response.setCode(ResponseCode.QUERY_NOT_FOUND); response.setRemark("can not find message, maybe time range not correct"); return response; }