Example usage for java.nio ByteBuffer rewind

List of usage examples for java.nio ByteBuffer rewind

Introduction

In this page you can find the example usage for java.nio ByteBuffer rewind.

Prototype

public final Buffer rewind() 

Source Link

Document

Rewinds this buffer.

Usage

From source file:org.cosmo.common.record.Defn.java

public Object readAll(int maxCounts) throws IOException {
    int count = Math.min(readCount(), maxCounts);
    byte[] buf = readFullRawBytes(maxCounts).array();
    ByteBuffer readDataIO = ByteBuffer.allocate(size());
    Object elements = null;/*  www .j  a  v  a2s  .  c o  m*/

    if (this instanceof DefnRecord) {
        elements = Array.newInstance(long.class, count);
        for (int i = 0, offset = 0, size = size(), c = count; i < c; i++, offset += size) {
            readDataIO.put(buf, offset, size);
            readDataIO.rewind();
            readDataIO.get(); // skip header byte
            Array.set(elements, i, readDataIO.getLong());
            readDataIO.rewind();
        }
    } else {
        elements = Array.newInstance(field().getType(), count);
        for (int i = 0, offset = 0, size = size(), c = count; i < c; i++, offset += size) {
            readDataIO.put(buf, offset, size);
            readDataIO.rewind();
            readDataIO.get(); // skip header byte
            Array.set(elements, i, readImpl(readDataIO, false));
            readDataIO.rewind();
        }
    }

    return elements;
}

From source file:org.carbondata.query.aggregator.impl.CustomAggregatorHelper.java

/**
 * Below method will be used to read the level files
 *
 * @param memberFile/*from w w  w .ja  v a2  s  . co m*/
 * @param fileName
 * @throws IOException
 */
private void readLevelFileAndUpdateCache(File memberFile, String fileName) throws IOException {
    FileInputStream fos = null;
    FileChannel fileChannel = null;
    try {
        // create an object of FileOutputStream
        fos = new FileInputStream(memberFile);

        fileChannel = fos.getChannel();
        Map<Integer, String> memberMap = surrogateKeyMap.get(fileName);

        if (null == memberMap) {
            memberMap = new HashMap<Integer, String>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
            surrogateKeyMap.put(fileName, memberMap);
        }

        long size = fileChannel.size();
        int maxKey = 0;
        ByteBuffer rowlengthToRead = null;
        int len = 0;
        ByteBuffer row = null;
        int toread = 0;
        byte[] bb = null;
        String value = null;
        int surrogateValue = 0;

        boolean enableEncoding = Boolean.valueOf(
                CarbonProperties.getInstance().getProperty(CarbonCommonConstants.ENABLE_BASE64_ENCODING,
                        CarbonCommonConstants.ENABLE_BASE64_ENCODING_DEFAULT));

        while (fileChannel.position() < size) {
            rowlengthToRead = ByteBuffer.allocate(4);
            fileChannel.read(rowlengthToRead);
            rowlengthToRead.rewind();
            len = rowlengthToRead.getInt();
            if (len == 0) {
                continue;
            }

            row = ByteBuffer.allocate(len);
            fileChannel.read(row);
            row.rewind();
            toread = row.getInt();
            bb = new byte[toread];
            row.get(bb);

            if (enableEncoding) {
                value = new String(Base64.decodeBase64(bb), Charset.defaultCharset());
            } else {
                value = new String(bb, Charset.defaultCharset());
            }

            surrogateValue = row.getInt();
            memberMap.put(surrogateValue, value);

            // check if max key is less than Surrogate key then update the max key
            if (maxKey < surrogateValue) {
                maxKey = surrogateValue;
            }
        }

    } finally {
        CarbonUtil.closeStreams(fileChannel, fos);
    }
}

From source file:jext2.DataInode.java

/**
 * Write data in buffer to disk. This works best when whole blocks which
 * are a multiple of blocksize in size are written. Partial blocks are
 * written by first reading the block and then writing the new data
 * to that buffer than write that new buffer to disk.
 * @throws NoSpaceLeftOnDevice/*from ww  w.  j av a 2  s.  c o m*/
 * @throws FileTooLarge
 */
public int writeData(ByteBuffer buf, long offset) throws JExt2Exception, NoSpaceLeftOnDevice, FileTooLarge {
    /*
     * Note on sparse file support:
     * getBlocksAllocate does not care if there are holes. Just write as much
     * blocks as the buffer requires at the desired location an set inode.size
     * accordingly.
     */

    int blocksize = superblock.getBlocksize();
    long start = offset / blocksize;
    long end = (buf.capacity() + blocksize) / blocksize + start;
    int startOff = (int) (offset % blocksize);

    if (startOff > 0)
        end += 1;

    buf.rewind();

    while (start < end) {
        LinkedList<Long> blockNrs = accessData().getBlocksAllocate(start, 1);
        int bytesLeft = buf.capacity() - buf.position();

        if (bytesLeft < blocksize || startOff > 0) { /* write partial block */
            ByteBuffer onDisk = blockAccess.read(blockNrs.getFirst());

            onDisk.position(startOff);

            assert onDisk.limit() == blocksize;

            buf.limit(buf.position() + Math.min(bytesLeft, onDisk.remaining()));

            onDisk.put(buf);

            onDisk.position(startOff);
            blockAccess.writeFromBufferUnsynchronized((blockNrs.getFirst() & 0xffffffff) * blocksize, onDisk);
        } else { /* write whole block */
            buf.limit(buf.position() + blocksize);

            blockAccess.writeFromBufferUnsynchronized((blockNrs.getFirst() & 0xffffffff) * blocksize, buf);
        }

        start += 1;
        startOff = 0;
        accessData().unlockHierarchyChanges();

    }
    int written = buf.position();
    assert written == buf.capacity();

    /* increase inode.size if we grew the file */
    if (offset + written > getSize()) { /* file grew */
        setStatusChangeTime(new Date());
        setSize(offset + written);
    }

    return written;
}

From source file:com.github.ambry.utils.UtilsTest.java

@Test
public void testReadBuffers() throws IOException {
    byte[] buf = new byte[40004];
    new Random().nextBytes(buf);
    ByteBuffer inputBuf = ByteBuffer.wrap(buf);
    inputBuf.putInt(0, 40000);//  ww w.  jav a 2s.c o m
    ByteBuffer outputBuf = Utils.readIntBuffer(new DataInputStream(new ByteBufferInputStream(inputBuf)));
    for (int i = 0; i < 40000; i++) {
        Assert.assertEquals(buf[i + 4], outputBuf.array()[i]);
    }
    // 0 size
    inputBuf.rewind();
    inputBuf.putInt(0, 0);
    outputBuf = Utils.readIntBuffer(new DataInputStream(new ByteBufferInputStream(inputBuf)));
    Assert.assertEquals("Output should be of length 0", 0, outputBuf.array().length);
    // negative size
    inputBuf.rewind();
    inputBuf.putInt(0, -1);
    try {
        Utils.readIntBuffer(new DataInputStream(new ByteBufferInputStream(inputBuf)));
        Assert.fail("Should have encountered exception with negative length.");
    } catch (IllegalArgumentException e) {
    }

    buf = new byte[10];
    new Random().nextBytes(buf);
    inputBuf = ByteBuffer.wrap(buf);
    inputBuf.putShort(0, (short) 8);
    outputBuf = Utils.readShortBuffer(new DataInputStream(new ByteBufferInputStream(inputBuf)));
    for (int i = 0; i < 8; i++) {
        Assert.assertEquals(buf[i + 2], outputBuf.array()[i]);
    }
    // 0 size
    inputBuf.rewind();
    inputBuf.putShort(0, (short) 0);
    outputBuf = Utils.readShortBuffer(new DataInputStream(new ByteBufferInputStream(inputBuf)));
    Assert.assertEquals("Output should be of length 0", 0, outputBuf.array().length);
    // negative size
    inputBuf.rewind();
    inputBuf.putShort(0, (short) -1);
    try {
        Utils.readShortBuffer(new DataInputStream(new ByteBufferInputStream(inputBuf)));
        Assert.fail("Should have encountered exception with negative length.");
    } catch (IllegalArgumentException e) {
    }
}

From source file:io.github.dsheirer.source.tuner.hackrf.HackRFTunerController.java

@Override
public void setTunedFrequency(long frequency) throws SourceException {
    ByteBuffer buffer = ByteBuffer.allocateDirect(8);
    buffer.order(ByteOrder.LITTLE_ENDIAN);

    int mhz = (int) (frequency / 1E6);
    int hz = (int) (frequency - (mhz * 1E6));

    buffer.putInt(mhz);/*w  w w.  ja  v a  2  s. c o m*/
    buffer.putInt(hz);

    buffer.rewind();

    try {
        write(Request.SET_FREQUENCY, 0, 0, buffer);
    } catch (UsbException e) {
        mLog.error("error setting frequency [" + frequency + "]", e);

        throw new SourceException("error setting frequency [" + frequency + "]", e);
    }
}

From source file:org.apache.hadoop.crypto.CryptoStreamsTestBase.java

private void byteBufferReadCheck(InputStream in, ByteBuffer buf, int bufPos) throws Exception {
    buf.position(bufPos);/*from   w  ww .j  a v a2 s.  c o  m*/
    int n = ((ByteBufferReadable) in).read(buf);
    Assert.assertEquals(bufPos + n, buf.position());
    byte[] readData = new byte[n];
    buf.rewind();
    buf.position(bufPos);
    buf.get(readData);
    byte[] expectedData = new byte[n];
    System.arraycopy(data, 0, expectedData, 0, n);
    Assert.assertArrayEquals(readData, expectedData);
}

From source file:org.apache.hadoop.hbase.client.coprocessor.AggregationClient.java

/**
 * It computes average while fetching sum and row count from all the
 * corresponding regions. Approach is to compute a global sum of region level
 * sum and rowcount and then compute the average.
 * @param table/*w w w.j av  a 2 s  . c  om*/
 * @param scan
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> Pair<S, Long> getAvgArgs(
        final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false);
    class AvgCallBack implements Batch.Callback<Pair<S, Long>> {
        S sum = null;
        Long rowCount = 0l;

        public synchronized Pair<S, Long> getAvgArgs() {
            return new Pair<S, Long>(sum, rowCount);
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, Pair<S, Long> result) {
            sum = ci.add(sum, result.getFirst());
            rowCount += result.getSecond();
        }
    }
    AvgCallBack avgCallBack = new AvgCallBack();
    table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<AggregateService, Pair<S, Long>>() {
                @Override
                public Pair<S, Long> call(AggregateService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<AggregateResponse> rpcCallback = new BlockingRpcCallback<AggregateResponse>();
                    instance.getAvg(controller, requestArg, rpcCallback);
                    AggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    Pair<S, Long> pair = new Pair<S, Long>(null, 0L);
                    if (response.getFirstPartCount() == 0) {
                        return pair;
                    }
                    ByteString b = response.getFirstPart(0);
                    T t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                    S s = ci.getPromotedValueFromProto(t);
                    pair.setFirst(s);
                    ByteBuffer bb = ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart()));
                    bb.rewind();
                    pair.setSecond(bb.getLong());
                    return pair;
                }
            }, avgCallBack);
    return avgCallBack.getAvgArgs();
}

From source file:org.apache.hadoop.hbase.client.coprocessor.AggregationClient.java

/**
 * It computes average while fetching sum and row count from all the
 * corresponding regions. Approach is to compute a global sum of region level
 * sum and rowcount and then compute the average.
 * @param table/* w w w. ja  v  a  2  s.co m*/
 * @param scan
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> Pair<S, Long> getAvgArgs(
        final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false);
    class AvgCallBack implements Batch.Callback<Pair<S, Long>> {
        S sum = null;
        Long rowCount = 0l;

        public Pair<S, Long> getAvgArgs() {
            return new Pair<S, Long>(sum, rowCount);
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, Pair<S, Long> result) {
            sum = ci.add(sum, result.getFirst());
            rowCount += result.getSecond();
        }
    }
    AvgCallBack avgCallBack = new AvgCallBack();
    table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<AggregateService, Pair<S, Long>>() {
                @Override
                public Pair<S, Long> call(AggregateService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<AggregateResponse> rpcCallback = new BlockingRpcCallback<AggregateResponse>();
                    instance.getAvg(controller, requestArg, rpcCallback);
                    AggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    Pair<S, Long> pair = new Pair<S, Long>(null, 0L);
                    if (response.getFirstPartCount() == 0) {
                        return pair;
                    }
                    ByteString b = response.getFirstPart(0);
                    T t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                    S s = ci.getPromotedValueFromProto(t);
                    pair.setFirst(s);
                    ByteBuffer bb = ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart()));
                    bb.rewind();
                    pair.setSecond(bb.getLong());
                    return pair;
                }
            }, avgCallBack);
    return avgCallBack.getAvgArgs();
}

From source file:org.apache.hadoop.hbase.client.transactional.TransactionalAggregationClient.java

/**
 * It gives the row count, by summing up the individual results obtained from
 * regions. In case the qualifier is null, FirstKeyValueFilter is used to
 * optimised the operation. In case qualifier is provided, I can't use the
 * filter as it may set the flag to skip to next row, but the value read is
 * not of the given filter: in this case, this particular row will not be
 * counted ==> an error.//from  w w  w.  ja  v a 2s  . c  om
 * @param table
 * @param ci
 * @param scan
 * @return <R, S>
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message> long rowCount(final long transactionId,
        final TransactionalTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
        throws Throwable {
    byte[] currentBeginKey = scan.getStartRow();
    HRegionInfo currentRegion = table.getRegionLocation(currentBeginKey).getRegionInfo();
    com.google.protobuf.ByteString regionName = ByteString.copyFromUtf8(currentRegion.getRegionNameAsString());
    final TransactionalAggregateRequest requestArg = validateArgAndGetPB(regionName, transactionId, scan, ci,
            true);
    class RowNumCallback implements Batch.Callback<Long> {
        private final AtomicLong rowCountL = new AtomicLong(0);

        public long getRowNumCount() {
            return rowCountL.get();
        }

        @Override
        public void update(byte[] region, byte[] row, Long result) {
            rowCountL.addAndGet(result.longValue());
        }
    }
    RowNumCallback rowNum = new RowNumCallback();
    table.coprocessorService(TrxRegionService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TrxRegionService, Long>() {
                @Override
                public Long call(TrxRegionService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TransactionalAggregateResponse> rpcCallback = new BlockingRpcCallback<TransactionalAggregateResponse>();
                    instance.getRowNum(controller, requestArg, rpcCallback);
                    TransactionalAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    byte[] bytes = getBytesFromResponse(response.getFirstPart(0));
                    ByteBuffer bb = ByteBuffer.allocate(8).put(bytes);
                    bb.rewind();
                    return bb.getLong();
                }
            }, rowNum);
    return rowNum.getRowNumCount();
}

From source file:org.alfresco.repo.content.AbstractWritableContentStoreTest.java

/**
 * Tests random access reading//w  w w .  jav  a 2 s.com
 * <p>
 * Only executes if the reader implements {@link RandomAccessContent}.
 */
@Test
public void testRandomAccessRead() throws Exception {
    ContentWriter writer = getWriter();
    // put some content
    String content = "ABC";
    byte[] bytes = content.getBytes();
    writer.putContent(content);
    ContentReader reader = writer.getReader();

    FileChannel fileChannel = reader.getFileChannel();
    assertNotNull("No channel given", fileChannel);

    // check that no other content access is allowed
    try {
        reader.getReadableChannel();
        fail("Second channel access allowed");
    } catch (RuntimeException e) {
        // expected
    }

    // read the content
    ByteBuffer buffer = ByteBuffer.allocate(bytes.length);
    int count = fileChannel.read(buffer);
    assertEquals("Incorrect number of bytes read", bytes.length, count);
    // transfer back to array
    buffer.rewind();
    buffer.get(bytes);
    String checkContent = new String(bytes);
    assertEquals("Content read failure", content, checkContent);
    fileChannel.close();
}