List of usage examples for io.netty.channel ChannelHandlerContext flush
@Override ChannelHandlerContext flush();
From source file:org.apache.giraph.comm.netty.handler.SaslServerHandler.java
License:Apache License
@Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { if (LOG.isDebugEnabled()) { LOG.debug("messageReceived: Got " + msg.getClass()); }/* w ww . jav a 2 s .c om*/ WritableRequest writableRequest = (WritableRequest) msg; // Simulate a closed connection on the first request (if desired) // TODO: Move out into a separate, dedicated handler. if (closeFirstRequest && !ALREADY_CLOSED_FIRST_REQUEST) { LOG.info("messageReceived: Simulating closing channel on first " + "request " + writableRequest.getRequestId() + " from " + writableRequest.getClientId()); setAlreadyClosedFirstRequest(); ctx.close(); return; } if (writableRequest.getType() == RequestType.SASL_TOKEN_MESSAGE_REQUEST) { // initialize server-side SASL functionality, if we haven't yet // (in which case we are looking at the first SASL message from the // client). SaslNettyServer saslNettyServer = ctx.attr(NettyServer.CHANNEL_SASL_NETTY_SERVERS).get(); if (saslNettyServer == null) { if (LOG.isDebugEnabled()) { LOG.debug("No saslNettyServer for " + ctx.channel() + " yet; creating now, with secret manager: " + secretManager); } try { saslNettyServer = new SaslNettyServer(secretManager, AuthMethod.SIMPLE); } catch (IOException ioe) { //TODO: throw new RuntimeException(ioe); } ctx.attr(NettyServer.CHANNEL_SASL_NETTY_SERVERS).set(saslNettyServer); } else { if (LOG.isDebugEnabled()) { LOG.debug("Found existing saslNettyServer on server:" + ctx.channel().localAddress() + " for client " + ctx.channel().remoteAddress()); } } ((SaslTokenMessageRequest) writableRequest).processToken(saslNettyServer); // Send response to client. ctx.write(writableRequest); if (saslNettyServer.isComplete()) { // If authentication of client is complete, we will also send a // SASL-Complete message to the client. if (LOG.isDebugEnabled()) { LOG.debug("SASL authentication is complete for client with " + "username: " + saslNettyServer.getUserName()); } SaslCompleteRequest saslComplete = new SaslCompleteRequest(); ctx.write(saslComplete); if (LOG.isDebugEnabled()) { LOG.debug( "Removing SaslServerHandler from pipeline since SASL " + "authentication is complete."); } ctx.pipeline().remove(this); } ctx.flush(); // do not send upstream to other handlers: no further action needs to be // done for SASL_TOKEN_MESSAGE_REQUEST requests. return; } else { // Client should not be sending other-than-SASL messages before // SaslServerHandler has removed itself from the pipeline. Such non-SASL // requests will be denied by the Authorize channel handler (the next // handler upstream in the server pipeline) if SASL authentication has // not completed. LOG.warn("Sending upstream an unexpected non-SASL message : " + writableRequest); ctx.fireChannelRead(msg); } }
From source file:org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler.java
License:Apache License
@Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { if (evt instanceof BufferCallEvent) { BufferCallEvent bcEvt = (BufferCallBeforeInitHandler.BufferCallEvent) evt; switch (bcEvt.action) { case FLUSH: for (Call call : id2Call.values()) { ctx.write(call);/* www .jav a 2 s . c om*/ } break; case FAIL: for (Call call : id2Call.values()) { call.setException(bcEvt.error); } break; } ctx.flush(); ctx.pipeline().remove(this); } else if (evt instanceof CallEvent) { // just remove the call for now until we add other call event other than timeout and cancel. id2Call.remove(((CallEvent) evt).call.id); } else { ctx.fireUserEventTriggered(evt); } }
From source file:org.apache.hadoop.hbase.security.CryptoAESWrapHandler.java
License:Apache License
@Override public void flush(ChannelHandlerContext ctx) throws Exception { if (queue.isEmpty()) { return;/*from w w w. j a va 2 s . c om*/ } ByteBuf buf = null; try { ChannelPromise promise = ctx.newPromise(); int readableBytes = queue.readableBytes(); buf = queue.remove(readableBytes, promise); byte[] bytes = new byte[readableBytes]; buf.readBytes(bytes); byte[] wrapperBytes = cryptoAES.wrap(bytes, 0, bytes.length); ChannelPromise lenPromise = ctx.newPromise(); ctx.write(ctx.alloc().buffer(4).writeInt(wrapperBytes.length), lenPromise); ChannelPromise contentPromise = ctx.newPromise(); ctx.write(Unpooled.wrappedBuffer(wrapperBytes), contentPromise); PromiseCombiner combiner = new PromiseCombiner(); combiner.addAll(lenPromise, contentPromise); combiner.finish(promise); ctx.flush(); } finally { if (buf != null) { ReferenceCountUtil.safeRelease(buf); } } }
From source file:org.apache.hadoop.hbase.security.SaslWrapHandler.java
License:Apache License
@Override public void flush(ChannelHandlerContext ctx) throws Exception { if (queue.isEmpty()) { return;/*from ww w.j a v a 2 s . co m*/ } ByteBuf buf = null; try { ChannelPromise promise = ctx.newPromise(); int readableBytes = queue.readableBytes(); buf = queue.remove(readableBytes, promise); byte[] bytes = new byte[readableBytes]; buf.readBytes(bytes); byte[] wrapperBytes = saslClient.wrap(bytes, 0, bytes.length); ChannelPromise lenPromise = ctx.newPromise(); ctx.write(ctx.alloc().buffer(4).writeInt(wrapperBytes.length), lenPromise); ChannelPromise contentPromise = ctx.newPromise(); ctx.write(Unpooled.wrappedBuffer(wrapperBytes), contentPromise); PromiseCombiner combiner = new PromiseCombiner(); combiner.addAll(lenPromise, contentPromise); combiner.finish(promise); ctx.flush(); } finally { if (buf != null) { ReferenceCountUtil.safeRelease(buf); } } }
From source file:org.apache.hyracks.http.server.CLFLogger.java
License:Apache License
@Override public void flush(ChannelHandlerContext ctx) throws Exception { if (lastChunk) { printAndPrepare();//from w w w.j a va2s. c o m lastChunk = false; } ctx.flush(); }
From source file:org.apache.pulsar.broker.service.Consumer.java
License:Apache License
/** * Dispatch a list of entries to the consumer. <br/> * <b>It is also responsible to release entries data and recycle entries object.</b> * * @return a promise that can be use to track when all the data has been written into the socket *///w ww.ja v a2s.c o m public Pair<ChannelPromise, Integer> sendMessages(final List<Entry> entries) { final ChannelHandlerContext ctx = cnx.ctx(); final MutablePair<ChannelPromise, Integer> sentMessages = new MutablePair<ChannelPromise, Integer>(); final ChannelPromise writePromise = ctx.newPromise(); sentMessages.setLeft(writePromise); if (entries.isEmpty()) { if (log.isDebugEnabled()) { log.debug("[{}] List of messages is empty, triggering write future immediately for consumerId {}", subscription, consumerId); } writePromise.setSuccess(); sentMessages.setRight(0); return sentMessages; } try { sentMessages.setRight(updatePermitsAndPendingAcks(entries)); } catch (PulsarServerException pe) { log.warn("[{}] [{}] consumer doesn't support batch-message {}", subscription, consumerId, cnx.getRemoteEndpointProtocolVersion()); subscription.markTopicWithBatchMessagePublished(); sentMessages.setRight(0); // disconnect consumer: it will update dispatcher's availablePermits and resend pendingAck-messages of this // consumer to other consumer disconnect(); return sentMessages; } ctx.channel().eventLoop().execute(() -> { for (int i = 0; i < entries.size(); i++) { Entry entry = entries.get(i); PositionImpl pos = (PositionImpl) entry.getPosition(); MessageIdData.Builder messageIdBuilder = MessageIdData.newBuilder(); MessageIdData messageId = messageIdBuilder.setLedgerId(pos.getLedgerId()) .setEntryId(pos.getEntryId()).build(); ByteBuf metadataAndPayload = entry.getDataBuffer(); // increment ref-count of data and release at the end of process: so, we can get chance to call entry.release metadataAndPayload.retain(); // skip checksum by incrementing reader-index if consumer-client doesn't support checksum verification if (cnx.getRemoteEndpointProtocolVersion() < ProtocolVersion.v6.getNumber()) { readChecksum(metadataAndPayload); } if (log.isDebugEnabled()) { log.debug("[{}] Sending message to consumerId {}, entry id {}", subscription, consumerId, pos.getEntryId()); } // We only want to pass the "real" promise on the last entry written ChannelPromise promise = ctx.voidPromise(); if (i == (entries.size() - 1)) { promise = writePromise; } ctx.write(Commands.newMessage(consumerId, messageId, metadataAndPayload), promise); messageId.recycle(); messageIdBuilder.recycle(); entry.release(); } ctx.flush(); }); return sentMessages; }
From source file:org.apache.rocketmq.namesrv.telnet.TelnetHandler.java
License:Apache License
@Override public void channelActive(ChannelHandlerContext ctx) throws Exception { // Send greeting for a new connection. // ctx.write("Welcome to " + InetAddress.getLocalHost().getHostName() + "!\r\n"); // ctx.write("It is " + new Date() + " now.\r\n"); ctx.write(builder.toString());/*from www. j av a2 s .co m*/ ctx.flush(); }
From source file:org.apache.spark.network.netty.FileServerHandler.java
License:Apache License
@Override public void channelRead0(ChannelHandlerContext ctx, String blockIdString) { BlockId blockId = BlockId.apply(blockIdString); FileSegment fileSegment = pResolver.getBlockLocation(blockId); // if getBlockLocation returns null, close the channel if (fileSegment == null) { //ctx.close(); return;/* ww w . j av a2 s . c o m*/ } File file = fileSegment.file(); if (file.exists()) { if (!file.isFile()) { ctx.write(new FileHeader(0, blockId).buffer()); ctx.flush(); return; } long length = fileSegment.length(); if (length > Integer.MAX_VALUE || length <= 0) { ctx.write(new FileHeader(0, blockId).buffer()); ctx.flush(); return; } int len = (int) length; ctx.write((new FileHeader(len, blockId)).buffer()); try { ctx.write(new DefaultFileRegion(new FileInputStream(file).getChannel(), fileSegment.offset(), fileSegment.length())); } catch (Exception e) { LOG.error("Exception: ", e); } } else { ctx.write(new FileHeader(0, blockId).buffer()); } ctx.flush(); }
From source file:org.apache.tajo.pullserver.HttpDataServerHandler.java
License:Apache License
@Override public void channelRead0(ChannelHandlerContext ctx, FullHttpRequest request) throws Exception { if (request.getMethod() != HttpMethod.GET) { sendError(ctx, HttpResponseStatus.METHOD_NOT_ALLOWED); return;//from ww w . j a va 2s.co m } String base = ContainerLocalizer.USERCACHE + "/" + userName + "/" + ContainerLocalizer.APPCACHE + "/" + appId + "/output" + "/"; final Map<String, List<String>> params = new QueryStringDecoder(request.getUri()).parameters(); List<FileChunk> chunks = Lists.newArrayList(); List<String> taskIds = splitMaps(params.get("ta")); int sid = Integer.valueOf(params.get("sid").get(0)); int partitionId = Integer.valueOf(params.get("p").get(0)); for (String ta : taskIds) { File file = new File(base + "/" + sid + "/" + ta + "/output/" + partitionId); FileChunk chunk = new FileChunk(file, 0, file.length()); chunks.add(chunk); } FileChunk[] file = chunks.toArray(new FileChunk[chunks.size()]); // Write the content. if (file == null) { HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.NO_CONTENT); if (!HttpHeaders.isKeepAlive(request)) { ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE); } else { response.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE); ctx.writeAndFlush(response); } } else { HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); ChannelFuture writeFuture = null; long totalSize = 0; for (FileChunk chunk : file) { totalSize += chunk.length(); } HttpHeaders.setContentLength(response, totalSize); if (HttpHeaders.isKeepAlive(request)) { response.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE); } // Write the initial line and the header. writeFuture = ctx.write(response); for (FileChunk chunk : file) { writeFuture = sendFile(ctx, chunk); if (writeFuture == null) { sendError(ctx, HttpResponseStatus.NOT_FOUND); return; } } if (ctx.pipeline().get(SslHandler.class) == null) { writeFuture = ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT); } else { ctx.flush(); } // Decide whether to close the connection or not. if (!HttpHeaders.isKeepAlive(request)) { // Close the connection when the whole content is written out. writeFuture.addListener(ChannelFutureListener.CLOSE); } } }
From source file:org.bridje.http.impl.HttpServerChannelHandler.java
License:Apache License
private void sendResponse(ChannelHandlerContext ctx) throws IOException { resp.close();/*from w ww .j a va2s. c o m*/ int length = resp.getBuffer().readableBytes(); DefaultHttpResponse response = new DefaultFullHttpResponse(HTTP_1_1, HttpResponseStatus.valueOf(resp.getStatusCode()), resp.getBuffer()); resp.setHeader(SERVER, server.getServerName()); resp.setHeader(CONTENT_TYPE, resp.getContentType()); resp.setHeader(CONTENT_LENGTH, length); resp.setHeader(CONNECTION, HttpHeaders.Values.KEEP_ALIVE); Map<String, Object> headers = resp.getHeadersMap(); for (Map.Entry<String, Object> entry : headers.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); if (value instanceof Iterable) { response.headers().set(key, (Iterable<?>) value); } else { response.headers().set(key, value); } } writeCookies(response); ctx.write(response); ctx.flush(); closeAll(); }