Example usage for io.netty.channel ChannelFutureListener CLOSE

List of usage examples for io.netty.channel ChannelFutureListener CLOSE

Introduction

In this page you can find the example usage for io.netty.channel ChannelFutureListener CLOSE.

Prototype

ChannelFutureListener CLOSE

To view the source code for io.netty.channel ChannelFutureListener CLOSE.

Click Source Link

Document

A ChannelFutureListener that closes the Channel which is associated with the specified ChannelFuture .

Usage

From source file:org.apache.hadoop.hdfs.server.datanode.web.SimpleHttpProxyHandler.java

License:Apache License

private static void closeOnFlush(Channel ch) {
    if (ch.isActive()) {
        ch.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE);
    }/* w  w w  . j ava  2s .co m*/
}

From source file:org.apache.hadoop.hdfs.server.datanode.web.webhdfs.HdfsWriter.java

License:Apache License

@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpContent chunk) throws IOException {
    chunk.content().readBytes(out, chunk.content().readableBytes());
    if (chunk instanceof LastHttpContent) {
        response.headers().set(CONNECTION, CLOSE);
        ctx.write(response).addListener(ChannelFutureListener.CLOSE);
        releaseDfsResources();//from ww  w  .j a va  2s .com
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.web.webhdfs.HdfsWriter.java

License:Apache License

@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
    releaseDfsResources();//from   w ww.  j ava  2 s .  c om
    DefaultHttpResponse resp = ExceptionHandler.exceptionCaught(cause);
    resp.headers().set(CONNECTION, CLOSE);
    ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
}

From source file:org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.java

License:Apache License

@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
    LOG.debug("Error ", cause);
    DefaultHttpResponse resp = ExceptionHandler.exceptionCaught(cause);
    resp.headers().set(CONNECTION, CLOSE);
    ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
}

From source file:org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.java

License:Apache License

private void onOpen(ChannelHandlerContext ctx) throws IOException {
    final String nnId = params.namenodeId();
    final int bufferSize = params.bufferSize();
    final long offset = params.offset();
    final long length = params.length();

    DefaultHttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
    HttpHeaders headers = response.headers();
    // Allow the UI to access the file
    headers.set(ACCESS_CONTROL_ALLOW_METHODS, GET);
    headers.set(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
    headers.set(CONTENT_TYPE, APPLICATION_OCTET_STREAM);
    headers.set(CONNECTION, CLOSE);/*from  w  ww  .j av  a  2 s  .c  o  m*/

    final DFSClient dfsclient = newDfsClient(nnId, conf);
    HdfsDataInputStream in = dfsclient.createWrappedInputStream(dfsclient.open(path, bufferSize, true));
    in.seek(offset);

    long contentLength = in.getVisibleLength() - offset;
    if (length >= 0) {
        contentLength = Math.min(contentLength, length);
    }
    final InputStream data;
    if (contentLength >= 0) {
        headers.set(CONTENT_LENGTH, contentLength);
        data = new LimitInputStream(in, contentLength);
    } else {
        data = in;
    }

    ctx.write(response);
    ctx.writeAndFlush(new ChunkedStream(data) {
        @Override
        public void close() throws Exception {
            super.close();
            dfsclient.close();
        }
    }).addListener(ChannelFutureListener.CLOSE);
}

From source file:org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.java

License:Apache License

private void onGetFileChecksum(ChannelHandlerContext ctx) throws IOException {
    MD5MD5CRC32FileChecksum checksum = null;
    final String nnId = params.namenodeId();
    DFSClient dfsclient = newDfsClient(nnId, conf);
    try {// ww w.j  a  v a 2 s. c o  m
        checksum = dfsclient.getFileChecksum(path, Long.MAX_VALUE);
        dfsclient.close();
        dfsclient = null;
    } finally {
        IOUtils.cleanup(LOG, dfsclient);
    }
    final byte[] js = JsonUtil.toJsonString(checksum).getBytes(Charsets.UTF_8);
    DefaultFullHttpResponse resp = new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(js));

    resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
    resp.headers().set(CONTENT_LENGTH, js.length);
    resp.headers().set(CONNECTION, CLOSE);
    ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
}

From source file:org.apache.hadoop.hdfs.tools.offlineImageViewer.FSImageHandler.java

License:Apache License

@Override
public void channelRead0(ChannelHandlerContext ctx, HttpRequest request) throws Exception {
    if (request.method() != HttpMethod.GET) {
        DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1, METHOD_NOT_ALLOWED);
        resp.headers().set(CONNECTION, CLOSE);
        ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
        return;/* w  ww.j  a  v  a2  s .c o  m*/
    }

    QueryStringDecoder decoder = new QueryStringDecoder(request.uri());
    final String op = getOp(decoder);

    final String content;
    String path = getPath(decoder);
    switch (op) {
    case "GETFILESTATUS":
        content = image.getFileStatus(path);
        break;
    case "LISTSTATUS":
        content = image.listStatus(path);
        break;
    case "GETACLSTATUS":
        content = image.getAclStatus(path);
        break;
    case "GETXATTRS":
        List<String> names = getXattrNames(decoder);
        String encoder = getEncoder(decoder);
        content = image.getXAttrs(path, names, encoder);
        break;
    case "LISTXATTRS":
        content = image.listXAttrs(path);
        break;
    default:
        throw new IllegalArgumentException("Invalid value for webhdfs parameter" + " \"op\"");
    }

    LOG.info("op=" + op + " target=" + path);

    DefaultFullHttpResponse resp = new DefaultFullHttpResponse(HTTP_1_1, HttpResponseStatus.OK,
            Unpooled.wrappedBuffer(content.getBytes(Charsets.UTF_8)));
    resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
    resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
    resp.headers().set(CONNECTION, CLOSE);
    ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
}

From source file:org.apache.hadoop.hdfs.tools.offlineImageViewer.FSImageHandler.java

License:Apache License

@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
    Exception e = cause instanceof Exception ? (Exception) cause : new Exception(cause);
    final String output = JsonUtil.toJsonString(e);
    ByteBuf content = Unpooled.wrappedBuffer(output.getBytes(Charsets.UTF_8));
    final DefaultFullHttpResponse resp = new DefaultFullHttpResponse(HTTP_1_1, INTERNAL_SERVER_ERROR, content);

    resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
    if (e instanceof IllegalArgumentException) {
        resp.setStatus(BAD_REQUEST);//from w w w  .  ja v  a  2  s .  c  om
    } else if (e instanceof FileNotFoundException) {
        resp.setStatus(NOT_FOUND);
    } else if (e instanceof IOException) {
        resp.setStatus(FORBIDDEN);
    }
    resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
    resp.headers().set(CONNECTION, CLOSE);
    ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
}

From source file:org.apache.hyracks.http.server.ChunkedResponse.java

License:Apache License

@Override
public void close() throws IOException {
    writer.close();//w ww  .  j  av  a 2  s . c  om
    if (error == null && response.status() == HttpResponseStatus.OK) {
        if (!done) {
            future = ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
        }
    } else {
        // There was an error
        if (headerSent) {
            LOGGER.log(Level.WARNING, "Error after header write of chunked response");
            if (error != null) {
                error.release();
            }
            future = ctx.channel().close();
        } else {
            if (keepAlive && response.status() != HttpResponseStatus.UNAUTHORIZED) {
                response.headers().remove(HttpHeaderNames.CONNECTION);
            }
            // we didn't send anything to the user, we need to send an unchunked error response
            fullResponse(response.protocolVersion(), response.status(),
                    error == null ? ctx.alloc().buffer(0, 0) : error, response.headers());
        }
        if (response.status() != HttpResponseStatus.UNAUTHORIZED) {
            // since the request failed, we need to close the channel on complete
            future.addListener(ChannelFutureListener.CLOSE);
        }
    }
    done = true;
}

From source file:org.apache.hyracks.http.server.FullResponse.java

License:Apache License

@Override
public void close() throws IOException {
    writer.close();//  w  w  w .j a v a2  s  .c o  m
    FullHttpResponse fullResponse = response.replace(Unpooled.copiedBuffer(baos.toByteArray()));
    if (keepAlive) {
        if (response.status() == HttpResponseStatus.OK
                || response.status() == HttpResponseStatus.UNAUTHORIZED) {
            fullResponse.headers().setInt(HttpHeaderNames.CONTENT_LENGTH,
                    fullResponse.content().readableBytes());
        } else {
            fullResponse.headers().remove(HttpHeaderNames.CONNECTION);
        }
    }
    future = ctx.writeAndFlush(fullResponse);
    if (response.status() != HttpResponseStatus.OK && response.status() != HttpResponseStatus.UNAUTHORIZED) {
        future.addListener(ChannelFutureListener.CLOSE);
    }
}