Example usage for java.nio ByteBuffer limit

List of usage examples for java.nio ByteBuffer limit

Introduction

In this page you can find the example usage for java.nio ByteBuffer limit.

Prototype

public final int limit() 

Source Link

Document

Returns the limit of this buffer.

Usage

From source file:org.apache.hadoop.hive.serde2.compression.TestSnappyCompDe.java

@Test
public void testDoubleCol() {
    ColumnBuffer[] inputCols = new ColumnBuffer[] { columnDouble };

    ByteBuffer compressed = compDe.compress(inputCols);
    ColumnBuffer[] outputCols = compDe.decompress(compressed, compressed.limit());

    assertArrayEquals(inputCols[0].toTColumn().getDoubleVal().getValues().toArray(),
            outputCols[0].toTColumn().getDoubleVal().getValues().toArray());
}

From source file:org.apache.hadoop.hive.serde2.compression.TestSnappyCompDe.java

@Test
public void testStringCol() {
    ColumnBuffer[] inputCols = new ColumnBuffer[] { columnStr };

    ByteBuffer compressed = compDe.compress(inputCols);
    ColumnBuffer[] outputCols = compDe.decompress(compressed, compressed.limit());

    assertArrayEquals(inputCols[0].toTColumn().getStringVal().getValues().toArray(),
            outputCols[0].toTColumn().getStringVal().getValues().toArray());
}

From source file:org.apache.hadoop.hive.serde2.compression.TestSnappyCompDe.java

@Test
public void testMulti() {
    ColumnBuffer[] inputCols = new ColumnBuffer[] { columnInt, columnStr };

    ByteBuffer compressed = compDe.compress(inputCols);
    ColumnBuffer[] outputCols = compDe.decompress(compressed, compressed.limit());

    assertArrayEquals(inputCols[0].toTColumn().getI32Val().getValues().toArray(),
            outputCols[0].toTColumn().getI32Val().getValues().toArray());
    assertArrayEquals(inputCols[1].toTColumn().getStringVal().getValues().toArray(),
            outputCols[1].toTColumn().getStringVal().getValues().toArray());
}

From source file:com.cloudera.ByteBufferRecordReader.java

@Override
public synchronized boolean nextKeyValue() throws IOException {
    if (key == null) {
        key = new LongWritable();
    }/*from  w w  w.  j a v  a  2  s .  c om*/
    if (value == null) {
        value = new ByteBufferWritable();
    }
    if (pos >= end) {
        return false;
    }

    int numBytesRead = 0;
    // Use zero-copy ByteBuffer reads if available
    if (inputStream instanceof FSDataInputStream) {
        FSDataInputStream fsIn = (FSDataInputStream) inputStream;
        ByteBuffer buf = fsIn.read(bufferPool, (int) (end - start), readOption);
        numBytesRead += buf.limit();
        pos += buf.limit();
        // Update stats
        InputStream wrappedStream = fsIn.getWrappedStream();
        if (wrappedStream instanceof DFSInputStream) {
            DFSInputStream dfsIn = (DFSInputStream) wrappedStream;
            updateStats(dfsIn.getReadStatistics());
        }
        // Switch out the buffers
        if (value.getBuffer() != null) {
            fsIn.releaseBuffer(value.getBuffer());
        }
        value.setByteBuffer(buf);
    }
    // Fallback to normal byte[] based reads with a copy to the ByteBuffer
    else {
        byte[] b = new byte[(int) (end - start)];
        IOUtils.readFully(inputStream, b);
        numBytesRead += b.length;
        pos += b.length;
        value.setByteBuffer(ByteBuffer.wrap(b));
    }

    return numBytesRead > 0;
}

From source file:org.apache.hadoop.hive.serde2.compression.TestSnappyCompDe.java

@Test
public void testNulls() {
    ColumnBuffer[] inputCols;/*from   ww  w  .  java2 s .  c o m*/
    ArrayList<String> someStrings = new ArrayList<String>();
    someStrings.add("test1");
    someStrings.add("test2");
    ColumnBuffer columnStr1 = new ColumnBuffer(
            TColumn.stringVal(new TStringColumn(someStrings, ByteBuffer.wrap(firstNullMask))));
    ColumnBuffer columnStr2 = new ColumnBuffer(
            TColumn.stringVal(new TStringColumn(someStrings, ByteBuffer.wrap(secondNullMask))));
    ColumnBuffer columnStr3 = new ColumnBuffer(
            TColumn.stringVal(new TStringColumn(someStrings, ByteBuffer.wrap(thirdNullMask))));

    inputCols = new ColumnBuffer[] { columnStr1, columnStr2, columnStr3 };

    ByteBuffer compressed = compDe.compress(inputCols);
    ColumnBuffer[] outputCols = compDe.decompress(compressed, compressed.limit());

    assertArrayEquals(inputCols, outputCols);
}

From source file:org.apache.hadoop.hive.ql.io.orc.OrcSplit.java

@Override
public void write(DataOutput out) throws IOException {
    //serialize path, offset, length using FileSplit
    super.write(out);

    int flags = (hasBase ? BASE_FLAG : 0) | (isOriginal ? ORIGINAL_FLAG : 0) | (hasFooter ? FOOTER_FLAG : 0)
            | (fileId != null ? HAS_FILEID_FLAG : 0);
    out.writeByte(flags);//from w w w  .  j av a 2 s. c  om
    out.writeInt(deltas.size());
    for (AcidInputFormat.DeltaMetaData delta : deltas) {
        delta.write(out);
    }
    if (hasFooter) {
        // serialize FileMetaInfo fields
        Text.writeString(out, fileMetaInfo.compressionType);
        WritableUtils.writeVInt(out, fileMetaInfo.bufferSize);
        WritableUtils.writeVInt(out, fileMetaInfo.metadataSize);

        // serialize FileMetaInfo field footer
        ByteBuffer footerBuff = fileMetaInfo.footerBuffer;
        footerBuff.reset();

        // write length of buffer
        WritableUtils.writeVInt(out, footerBuff.limit() - footerBuff.position());
        out.write(footerBuff.array(), footerBuff.position(), footerBuff.limit() - footerBuff.position());
        WritableUtils.writeVInt(out, fileMetaInfo.writerVersion.getId());
    }
    if (fileId != null) {
        out.writeLong(fileId.longValue());
    }
}

From source file:net.phoenix.thrift.hello.ThreadedSelectorTest.java

@Test
public void testByteBuffer() throws TException, IOException, InterruptedException {
    LOG.info("Client starting....");
    String name = "World";
    // TTransport transport = new TNonblockingSocket("localhost", 9804);
    TTransport transport = new TFramedTransport(new TSocket("localhost", 9804));
    transport.open();/*from w ww. j a  va2s .c o m*/
    // TTransport transport = new TTransport(socket);
    TProtocol protocol = new TBinaryProtocol(transport);
    HelloService.Client client = new HelloService.Client(protocol);
    try {
        Hello.HelloRequest.Builder request = Hello.HelloRequest.newBuilder();
        request.setName(name);
        Hello.User.Builder user = Hello.User.newBuilder();
        user.setName("hello");
        user.setPassword("hello");
        request.setUser(user.build());
        ByteBuffer requestBuffer = ByteBuffer.wrap(request.build().toByteArray());
        ByteBuffer buffer = client.hello(requestBuffer);
        Hello.HelloResponse response = Hello.HelloResponse
                .parseFrom(ArrayUtils.subarray(buffer.array(), buffer.position(), buffer.limit()));
        String message = response.getMessage();
        assertEquals(message, "Hello " + name);
        // System.out.println(message);
    } finally {
        transport.close();
    }
}

From source file:org.cloudata.core.commitlog.pipe.Bulk.java

private boolean readHeader(ByteBuffer buf) throws IOException {
    ByteBuffer headerBuf = buf.duplicate();

    if (seq < 0 && headerBuf.limit() >= readBytesLength) {
        headerBuf.position(0);/* w w w .j  ava2 s  . c  o m*/
        seq = headerBuf.getInt();

        if (seq == Constants.PIPE_DISCONNECT) {
            LOG.debug("receive PIPE_DISCONNECT");
            throw new PipeClosing();
        }

        readBytesLength += 4;
    }

    if (dirNameBytes == null && headerBuf.limit() >= readBytesLength) {
        headerBuf.position(readBytesLength - 4);
        int len = headerBuf.getInt();
        if (len > 1000000) {
            throw new IOException("dirName byte length is too long [" + len + "]");
        }

        dirNameBytes = new byte[len];
        readBytesLength += len;
    }

    if (dirNameBytes != null && headerBuf.limit() >= readBytesLength) {
        headerBuf.position(readBytesLength - dirNameBytes.length);
        headerBuf.get(dirNameBytes);
        readBytesLength += 4;
    }

    if (headerAndPayloadSize == 0 && headerBuf.limit() >= readBytesLength) {
        headerBuf.position(readBytesLength - 4);
        headerAndPayloadSize = headerBuf.getInt();

        return true;
    }

    return false;
}

From source file:client.MultiplexingClient.java

private String bufferToString(ByteBuffer bb) {
    StringBuffer sb = new StringBuffer();
    for (int i = 0; i < bb.limit(); i++) {
        sb.append((char) bb.get(i));
    }//from w w  w . j  av a  2s .c om
    return sb.toString();
}

From source file:com.serenegiant.media.TLMediaEncoder.java

/**
 * convert ByteBuffer into String/*from  ww w  .j  a v  a 2s  .co  m*/
 * @param buffer
 * @return
 */
private static final String asString(final ByteBuffer buffer) {
    final byte[] temp = new byte[16];
    final StringBuilder sb = new StringBuilder();
    int n = (buffer != null ? buffer.limit() : 0);
    if (n > 0) {
        buffer.rewind();
        int sz = (n > 16 ? 16 : n);
        n -= sz;
        for (; sz > 0; sz = (n > 16 ? 16 : n), n -= sz) {
            buffer.get(temp, 0, sz);
            for (int i = 0; i < sz; i++) {
                sb.append(temp[i]).append(',');
            }
        }
    }
    return sb.toString();
}