List of usage examples for io.netty.buffer ByteBuf array
public abstract byte[] array();
From source file:com.datastax.driver.core.SnappyCompressor.java
License:Apache License
private ByteBuf compressHeap(ByteBuf input) throws IOException { int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes()); int inOffset = input.arrayOffset() + input.readerIndex(); byte[] in = input.array(); int len = input.readableBytes(); // Increase reader index. input.readerIndex(input.writerIndex()); // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and so // can eliminate the overhead of allocate a new byte[]. ByteBuf output = input.alloc().heapBuffer(maxCompressedLength); try {//from w w w . j a va 2 s. c o m // Calculate the correct offset. int offset = output.arrayOffset() + output.writerIndex(); byte[] out = output.array(); int written = Snappy.compress(in, inOffset, len, out, offset); // Increase the writerIndex with the written bytes. output.writerIndex(output.writerIndex() + written); } catch (IOException e) { // release output buffer so we not leak and rethrow exception. output.release(); throw e; } return output; }
From source file:com.datastax.driver.core.SnappyCompressor.java
License:Apache License
private ByteBuf decompressHeap(ByteBuf input) throws IOException { // Not a direct buffer so use byte arrays... int inOffset = input.arrayOffset() + input.readerIndex(); byte[] in = input.array(); int len = input.readableBytes(); // Increase reader index. input.readerIndex(input.writerIndex()); if (!Snappy.isValidCompressedBuffer(in, inOffset, len)) throw new DriverInternalError("Provided frame does not appear to be Snappy compressed"); // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and so // can eliminate the overhead of allocate a new byte[]. ByteBuf output = input.alloc().heapBuffer(Snappy.uncompressedLength(in, inOffset, len)); try {/*from w w w.j a va 2 s . c o m*/ // Calculate the correct offset. int offset = output.arrayOffset() + output.writerIndex(); byte[] out = output.array(); int written = Snappy.uncompress(in, inOffset, len, out, offset); // Increase the writerIndex with the written bytes. output.writerIndex(output.writerIndex() + written); } catch (IOException e) { // release output buffer so we not leak and rethrow exception. output.release(); throw e; } return output; }
From source file:com.dianping.cat.consumer.dump.TreeHelper.java
License:Open Source License
public static void init(MessageCodec codec) { MessageId id = MessageId.parse("domain0-0a010200-405746-0"); DefaultMessageTree tree = new DefaultMessageTree(); tree.setDomain(id.getDomain());/*from ww w.j a v a2 s . c o m*/ tree.setHostName("mock-host"); tree.setIpAddress(id.getIpAddress()); tree.setThreadGroupName("test"); tree.setThreadId("test"); tree.setThreadName("test"); tree.setMessageId(id.toString()); // test for rpc index tree.setSessionToken(id.toString()); if (codec != null) { ByteBuf buf = codec.encode(tree); tree.setBuffer(buf); m_data = buf.array(); } }
From source file:com.difference.historybook.proxy.littleproxy.LittleProxyResponse.java
License:Apache License
@Override public byte[] getContent() { ByteBuf buf = response.content(); byte[] bytes; int length = buf.readableBytes(); if (buf.hasArray()) { bytes = buf.array(); } else {/*w w w . j av a 2 s .c o m*/ bytes = new byte[length]; buf.getBytes(buf.readerIndex(), bytes); } buf.release(); return bytes; }
From source file:com.github.milenkovicm.kafka.protocol.Convert.java
License:Apache License
public static String decodeString(ByteBuf buf) { int readable = decodeShort(buf); // N => int16 ByteBuf bytes = buf.readBytes(readable); // content if (bytes.hasArray()) { return new String(bytes.array(), DEFAULT_CHARSET); } else {// w ww. j ava2 s. co m byte[] array = new byte[readable]; bytes.readBytes(array); return new String(array, DEFAULT_CHARSET); } }
From source file:com.github.mrstampy.kitchensync.stream.header.AbstractChunkProcessor.java
License:Open Source License
@Override public final byte[] process(Streamer<?> streamer, byte[] message) { ByteBuf buf = processImpl(streamer, message); return buf.array(); }
From source file:com.github.mrstampy.pprspray.core.receiver.AbstractChunkReceiver.java
License:Open Source License
/** * Rehydrate and transform./*from w w w . j a v a 2 s . com*/ * * @param array * the array * @return the byte[] * @see #setTransformer(MediaTransformer) */ protected byte[] rehydrateAndTransform(Set<AMC> set) { int size = calcSize(set); ByteBuf buf = Unpooled.buffer(size); for (AMC chunk : set) { buf.writeBytes(chunk.getData()); } return transform(buf.array()); }
From source file:com.github.mrstampy.pprspray.core.streamer.AbstractMediaStreamer.java
License:Open Source License
/** * Sends a//from w w w.ja v a2 s. co m * {@link NegotiationMessageUtils#getNegotiationMessage(int, MediaStreamType)} * to the destinations and awaits acknowledgement. If affirmative * {@link #start()} is invoked to commence streaming. * * @see NegotiationEventBus * @see NegotiationChunk * @see NegotiationAckChunk * @see NegotiationAckReceiver * @see AbstractNegotiationSubscriber * @see AcceptingNegotationSubscriber */ protected void negotiate() { log.debug("Negotiating with {} for media hash {}", getDestination(), getMediaHash()); notifying.set(true); notifyNegotiating(); ByteBuf buf = NegotiationMessageUtils.getNegotiationMessage(getMediaHash(), getType()); ChunkEventBus.register(new AckReceiver(getMediaHash())); getChannel().send(buf.array(), getDestination()); }
From source file:com.github.mrstampy.pprspray.core.streamer.audio.DefaultAudioTransformer.java
License:Open Source License
@Override public byte[] transform(ByteBuf buf) { return buf.array(); }
From source file:com.github.mrstampy.pprspray.core.streamer.footer.MediaFooter.java
License:Open Source License
private byte[] buildFooter() { ByteBuf buf = Unpooled.buffer(MediaStreamerUtils.FOOTER_LENGTH); buf.writeBytes(getType().eomBytes()); buf.writeInt(getMessageHash());/*from www .ja v a 2 s . com*/ buf.writeInt(getMediaHash()); return buf.array(); }