Example usage for java.nio ByteBuffer remaining

List of usage examples for java.nio ByteBuffer remaining

Introduction

In this page you can find the example usage for java.nio ByteBuffer remaining.

Prototype

public final int remaining() 

Source Link

Document

Returns the number of remaining elements in this buffer, that is limit - position .

Usage

From source file:org.alfresco.contentstore.ChecksumTest.java

private void assertEqual(ByteBuffer expected, ByteBuffer actual, State state) throws IOException {
    int expectedRemaining = expected.remaining();
    int actualRemaining = actual.remaining();

    if (expectedRemaining != actualRemaining) {
        fail("Different lengths");
    }/*from  w  w w  .  j a v a 2  s  .c  o m*/
    if (expectedRemaining == 0) {
        return;
    }

    assertEquals(expected.remaining(), actual.remaining());
    while (expected.hasRemaining()) {
        byte expectedByte = expected.get();
        byte actualByte = actual.get();
        state.incIdx(1);
        assertEquals("Not equal at " + state, expectedByte, actualByte);
    }
    ;
}

From source file:org.apache.tajo.master.exec.QueryExecutor.java

public void execNonFromQuery(QueryContext queryContext, Session session, String query, LogicalPlan plan,
        SubmitQueryResponse.Builder responseBuilder) throws Exception {
    LogicalRootNode rootNode = plan.getRootBlock().getRoot();

    EvalContext evalContext = new EvalContext();

    //Non From query should be session's time zone. e,g, select to_char(now(), 'yyyy-MM-dd')
    String timezoneId = queryContext.get(SessionVars.TIMEZONE);
    evalContext.setTimeZone(TimeZone.getTimeZone(timezoneId));

    List<Target> targets = plan.getRootBlock().getRawTargets();
    if (targets == null) {
        throw new TajoInternalError("no targets");
    }//from  www .j a  v  a  2  s. co m
    try {
        // start script executor
        startScriptExecutors(queryContext, evalContext, targets);
        final VTuple outTuple = new VTuple(targets.size());
        for (int i = 0; i < targets.size(); i++) {
            EvalNode eval = targets.get(i).getEvalTree();
            eval.bind(evalContext, null);
            outTuple.put(i, eval.eval(null));
        }
        boolean isInsert = rootNode.getChild() != null && rootNode.getChild().getType() == NodeType.INSERT;
        if (isInsert) {
            InsertNode insertNode = rootNode.getChild();
            insertRowValues(queryContext, insertNode, responseBuilder);
        } else {
            Schema schema = PlannerUtil.targetToSchema(targets);
            SerializedResultSet.Builder serializedResBuilder = SerializedResultSet.newBuilder();
            MemoryRowBlock rowBlock = new MemoryRowBlock(SchemaUtil.toDataTypes(schema));

            try {
                rowBlock.getWriter().addTuple(outTuple);

                MemoryBlock memoryBlock = rowBlock.getMemory();
                ByteBuffer uncompressed = memoryBlock.getBuffer().nioBuffer(0, memoryBlock.readableBytes());
                int uncompressedLength = uncompressed.remaining();

                serializedResBuilder.setDecompressedLength(uncompressedLength);
                serializedResBuilder.setSerializedTuples(ByteString.copyFrom(uncompressed));
                serializedResBuilder.setSchema(schema.getProto());
                serializedResBuilder.setRows(rowBlock.rows());
            } finally {
                rowBlock.release();
            }

            QueryInfo queryInfo = context.getQueryJobManager().createNewSimpleQuery(queryContext, session,
                    query, (LogicalRootNode) plan.getRootBlock().getRoot());

            responseBuilder.setState(OK);
            responseBuilder.setResultType(ResultType.ENCLOSED);
            responseBuilder.setQueryId(queryInfo.getQueryId().getProto());
            responseBuilder.setResultSet(serializedResBuilder);
            responseBuilder.setMaxRowNum(1);
        }
    } finally {
        // stop script executor
        stopScriptExecutors(evalContext);
    }
}

From source file:com.newatlanta.appengine.nio.channels.GaeFileChannel.java

@Override
public synchronized int read(ByteBuffer dst) throws IOException {
    checkReadOptions();//from  w  ww  .  ja  v a2s . c o m
    long fileLen = doGetSize();
    if (position >= fileLen) {
        return -1;
    }
    int totalBytesRead = 0;
    while (dst.hasRemaining() && (position < fileLen)) {
        int r = dst.remaining();
        initBuffer(r);
        if (calcBlockIndex(position + r - 1) == index) {
            // within current block, read until dst is full or to EOF
            int eofoffset = calcBlockOffset(fileLen);
            int limit = Math.min(buffer.position() + r, eofoffset);
            if (limit > buffer.capacity()) {
                // copy the remaining bytes in buffer to dst, then fill dst
                // with empty bytes until full or to the calculated limit
                dst.put(buffer);
                dst.put(new byte[Math.min(limit - buffer.capacity(), dst.remaining())]);
            } else {
                buffer.limit(limit);
                dst.put(buffer);
                buffer.limit(buffer.capacity()); // restore original limit
            }
            int bytesRead = (r - dst.remaining());
            totalBytesRead += bytesRead;
            positionInternal(position + bytesRead, false);
        } else {
            // read to the end of the current block
            r = buffer.remaining();
            if (r == 0) {
                r = (int) (blockSize - position);
                dst.put(new byte[r]);
            } else {
                dst.put(buffer);
            }
            totalBytesRead += r;

            // move position to beginning of next buffer, repeat loop
            positionInternal(position + r, false);
        }
    }
    //closeBlock();
    return totalBytesRead;
}

From source file:org.apache.hadoop.hdfs.BlockReaderLocalLegacy.java

/**
 * Tries to read as many bytes as possible into supplied buffer, checksumming
 * each chunk if needed.//from   w w  w .  ja  v  a 2 s .co m
 *
 * <b>Preconditions:</b>
 * <ul>
 * <li>
 * If checksumming is enabled, buf.remaining must be a multiple of
 * bytesPerChecksum. Note that this is not a requirement for clients of
 * read(ByteBuffer) - in the case of non-checksum-sized read requests,
 * read(ByteBuffer) will substitute a suitably sized buffer to pass to this
 * method.
 * </li>
 * </ul>
 * <b>Postconditions:</b>
 * <ul>
 * <li>buf.limit and buf.mark are unchanged.</li>
 * <li>buf.position += min(offsetFromChunkBoundary, totalBytesRead) - so the
 * requested bytes can be read straight from the buffer</li>
 * </ul>
 *
 * @param buf
 *          byte buffer to write bytes to. If checksums are not required, buf
 *          can have any number of bytes remaining, otherwise there must be a
 *          multiple of the checksum chunk size remaining.
 * @return <tt>max(min(totalBytesRead, len) - offsetFromChunkBoundary, 0)</tt>
 *         that is, the the number of useful bytes (up to the amount
 *         requested) readable from the buffer by the client.
 */
private synchronized int doByteBufferRead(ByteBuffer buf) throws IOException {
    if (verifyChecksum) {
        assert buf.remaining() % bytesPerChecksum == 0;
    }
    int dataRead = -1;

    int oldpos = buf.position();
    // Read as much as we can into the buffer.
    dataRead = fillBuffer(dataIn, buf);

    if (dataRead == -1) {
        return -1;
    }

    if (verifyChecksum) {
        ByteBuffer toChecksum = buf.duplicate();
        toChecksum.position(oldpos);
        toChecksum.limit(oldpos + dataRead);

        checksumBuff.clear();
        // Equivalent to (int)Math.ceil(toChecksum.remaining() * 1.0 / bytesPerChecksum );
        int numChunks = (toChecksum.remaining() + bytesPerChecksum - 1) / bytesPerChecksum;
        checksumBuff.limit(checksumSize * numChunks);

        fillBuffer(checksumIn, checksumBuff);
        checksumBuff.flip();

        checksum.verifyChunkedSums(toChecksum, checksumBuff, filename, this.startOffset);
    }

    if (dataRead >= 0) {
        buf.position(oldpos + Math.min(offsetFromChunkBoundary, dataRead));
    }

    if (dataRead < offsetFromChunkBoundary) {
        // yikes, didn't even get enough bytes to honour offset. This can happen
        // even if we are verifying checksums if we are at EOF.
        offsetFromChunkBoundary -= dataRead;
        dataRead = 0;
    } else {
        dataRead -= offsetFromChunkBoundary;
        offsetFromChunkBoundary = 0;
    }

    return dataRead;
}

From source file:io.warp10.continuum.gts.GTSDecoder.java

public static GTSDecoder fromBlock(byte[] block, byte[] key) throws IOException {

    if (block.length < 6) {
        throw new IOException("Invalid block.");
    }/* w ww .  j  a v  a2  s. c  o m*/

    ByteBuffer buffer = ByteBuffer.wrap(block);

    //
    // Extract size
    //

    buffer.order(ByteOrder.BIG_ENDIAN);
    int size = buffer.getInt();

    // Check size

    if (block.length != size) {
        throw new IOException("Invalid block size, expected " + size + ", block is " + block.length);
    }

    // Extract compression

    byte comp = buffer.get();

    boolean compress = false;

    if (0 == comp) {
        compress = false;
    } else if (1 == comp) {
        compress = true;
    } else {
        throw new IOException("Invalid compression flag");
    }

    // Extract base timestamp

    long base = Varint.decodeSignedLong(buffer);

    InputStream in;

    ByteArrayInputStream bain = new ByteArrayInputStream(block, buffer.position(), buffer.remaining());

    if (compress) {
        in = new GZIPInputStream(bain);
    } else {
        in = bain;
    }

    byte[] buf = new byte[1024];

    ByteArrayOutputStream out = new ByteArrayOutputStream(buffer.remaining());

    while (true) {
        int len = in.read(buf);

        if (len <= 0) {
            break;
        }
        out.write(buf, 0, len);
    }

    GTSDecoder decoder = new GTSDecoder(base, key, ByteBuffer.wrap(out.toByteArray()));

    return decoder;
}

From source file:com.esri.geoevent.solutions.adapter.cot.CoTAdapter.java

@Override
public void receive(ByteBuffer buf, String channelId) {
    buf.mark();//from   w w w .  j a  v  a 2s .  c om
    int size = buf.remaining();
    if (size < 1)
        return;
    byte[] data = new byte[size];
    buf.get(data, 0, size);
    //System.out.println(" \n");
    //System.out.println("Read " + size + " bytes");

    String xml = new String(data);
    parseUsingDocument(xml, channelId);
    //parseUsingStream(buf);
}

From source file:com.healthmarketscience.jackcess.PageChannel.java

/**
 * Write a page to disk as a new page, appending it to the database
 * @param page Page to write/*w  w w. jav  a  2s. c om*/
 * @return Page number at which the page was written
 */
public int writeNewPage(ByteBuffer page) throws IOException {
    long size = _channel.size();
    if (size >= getFormat().MAX_DATABASE_SIZE) {
        throw new IOException("Database is at maximum size " + getFormat().MAX_DATABASE_SIZE);
    }
    if ((size % getFormat().PAGE_SIZE) != 0L) {
        throw new IOException("Database corrupted, file size " + size + " is not multiple of page size "
                + getFormat().PAGE_SIZE);
    }

    page.rewind();

    if (page.remaining() > getFormat().PAGE_SIZE) {
        throw new IllegalArgumentException("Page buffer is too large, size " + page.remaining());
    }

    // push the buffer to the end of the page, so that a full page's worth of
    // data is written regardless of the incoming buffer size (we use a tiny
    // buffer in allocateNewPage)
    int pageOffset = (getFormat().PAGE_SIZE - page.remaining());
    long offset = size + pageOffset;
    int pageNumber = getNextPageNumber(size);
    _channel.write(_codecHandler.encodePage(page, pageNumber, pageOffset), offset);
    // note, we "force" page removal because we know that this is an unused
    // page (since we just added it to the file)
    _globalUsageMap.removePageNumber(pageNumber, true);
    return pageNumber;
}

From source file:org.apache.jackrabbit.oak.plugins.segment.file.TarReader.java

private static TarReader openFirstFileWithValidIndex(List<File> files, boolean memoryMapping) {
    for (File file : files) {
        String name = file.getName();
        try {//from w  w w. j a v  a2s.c  o  m
            RandomAccessFile access = new RandomAccessFile(file, "r");
            try {
                ByteBuffer index = loadAndValidateIndex(access, name);
                if (index == null) {
                    log.info("No index found in tar file {}, skipping...", name);
                } else {
                    // found a file with a valid index, drop the others
                    for (File other : files) {
                        if (other != file) {
                            log.info("Removing unused tar file {}", other.getName());
                            other.delete();
                        }
                    }

                    if (memoryMapping) {
                        try {
                            FileAccess mapped = new FileAccess.Mapped(access);
                            // re-read the index, now with memory mapping
                            int indexSize = index.remaining();
                            index = mapped.read(mapped.length() - indexSize - 16 - 1024, indexSize);
                            return new TarReader(file, mapped, index);
                        } catch (IOException e) {
                            log.warn("Failed to mmap tar file " + name + ". Falling back to normal file IO,"
                                    + " which will negatively impact" + " repository performance. This"
                                    + " problem may have been caused by" + " restrictions on the amount of"
                                    + " virtual memory available to the" + " JVM. Please make sure that a"
                                    + " 64-bit JVM is being used and" + " that the process has access to"
                                    + " unlimited virtual memory" + " (ulimit option -v).", e);
                        }
                    }

                    FileAccess random = new FileAccess.Random(access);
                    // prevent the finally block from closing the file
                    // as the returned TarReader will take care of that
                    access = null;
                    return new TarReader(file, random, index);
                }
            } finally {
                if (access != null) {
                    access.close();
                }
            }
        } catch (IOException e) {
            log.warn("Could not read tar file " + name + ", skipping...", e);
        }
    }

    return null;
}

From source file:net.sf.jml.message.p2p.MsnP2PMessage.java

@Override
protected void parseMessage(byte[] message) {
    ByteBuffer split = Charset.encode(JmlConstants.LINE_SEPARATOR + JmlConstants.LINE_SEPARATOR);
    int pos = ByteBufferUtils.indexOf(ByteBuffer.wrap(message), split);

    // header//from   ww w  .  j  av a 2 s  .c  o m
    String header = pos == -1 ? Charset.decode(message) : Charset.decode(message, 0, pos);
    headers.parseString(header);

    // binaryHeader
    pos += split.remaining();
    binaryHeader.put(message, pos, BINARY_HEADER_LEN);
    binaryHeader.flip();

    // body
    pos += BINARY_HEADER_LEN;
    parseP2PBody(ByteBuffer.wrap(message, pos, message.length - pos - BINARY_FOOTER_LEN));

    // binaryFoot
    binaryFooter.put(message, message.length - BINARY_FOOTER_LEN, BINARY_FOOTER_LEN);
    binaryFooter.flip();
}

From source file:io.druid.hll.HyperLogLogCollectorTest.java

protected ByteBuffer shiftedBuffer(ByteBuffer buf, int offset) {
    ByteBuffer shifted = ByteBuffer.allocate(buf.remaining() + offset);
    shifted.position(offset);//from w  w  w .  java2 s .co m
    shifted.put(buf);
    shifted.position(offset);
    return shifted;
}