Example usage for java.nio ByteBuffer limit

List of usage examples for java.nio ByteBuffer limit

Introduction

In this page you can find the example usage for java.nio ByteBuffer limit.

Prototype

public final Buffer limit(int newLimit) 

Source Link

Document

Sets the limit of this buffer.

Usage

From source file:com.koda.integ.hbase.storage.FileExtStorage.java

@Override
public StorageHandle getData(StorageHandle storeHandle, ByteBuffer buf) {
    FileStorageHandle fsh = (FileStorageHandle) storeHandle;

    // Check if current file and offset > currentFileOffset
    int id = maxId.get();
    if (fsh.getId() > id || (fsh.getId() == id && fsh.getOffset() >= currentFileOffset.get())) {
        // not found
        buf.putInt(0, 0);//ww w . ja v a 2 s . c o m
        return fsh;
    }

    RandomAccessFile file = getFile(fsh.getId());//openFile(fsh.getId(), "r");

    boolean needSecondChance = needSecondChance(fsh.getId());

    try {
        if (file == null) {
            // return null
            buf.putInt(0, 0);
        } else {
            buf.clear();
            int toRead = fsh.getSize();
            buf.putInt(fsh.getSize());
            buf.limit(4 + toRead);
            try {
                FileChannel fc = file.getChannel();
                int total = 0;
                int c = 0;
                // offset start with overall object length .add +4
                int off = fsh.getOffset() + 4;
                while (total < toRead) {
                    c = fc.read(buf, off);
                    off += c;
                    if (c < 0) {
                        // return not found
                        buf.putInt(0, 0);
                        break;
                    }
                    total += c;
                }
            } catch (IOException e) {
                // return not found
                if (fsh.getId() > minId.get()) {
                    e.printStackTrace();
                }
                buf.putInt(0, 0);
            }
        }
        if (buf.getInt(0) != 0 && needSecondChance) {
            // store again
            fsh = (FileStorageHandle) storeData(buf);
        }
        return fsh;

    } finally {
        if (file != null) {
            // return file back
            // PUT we need for old version
            putFile(fsh.getId(), file);
        }
    }

}

From source file:com.koda.integ.hbase.storage.FileExtStorage.java

/**
 * Tries to store batch of blocks into a current buffer.
 *
 * @param buf the buf//from w  w  w  . j av a  2 s  .  c  o m
 * @return the list
 */
private List<StorageHandle> storeDataNoReleaseLock(ByteBuffer buf) {

    List<StorageHandle> handles = new ArrayList<StorageHandle>();
    writeLock.writeLock().lock();
    try {

        if (activeBuffer.get() == null) {
            return null;
        }
        int size = buf.getInt(0);
        long off = bufferOffset.get();
        if (off + size > bufferSize) {
            return null;
        }

        long currentFileLength = currentFileOffsetForWrites.get();
        if (bufferOffset.get() == 0 && currentFileLength + bufferSize > fileSizeLimit) {
            // previous buffer was flushed
            currentFileOffsetForWrites.set(0);
            maxIdForWrites.incrementAndGet();
        }

        buf.position(4);

        while (buf.position() < size + 4) {
            buf.limit(buf.capacity());
            int pos = buf.position();
            int blockSize = buf.getInt();
            buf.position(pos);
            buf.limit(pos + 4 + blockSize);
            activeBuffer.get().put(buf);
            FileStorageHandle fsh = new FileStorageHandle(maxIdForWrites.get(),
                    (int) (currentFileOffsetForWrites.get()), blockSize);
            handles.add(fsh);
            // Increase offset in current file for writes;
            currentFileOffsetForWrites.addAndGet(blockSize + 4);
            bufferOffset.getAndAdd(blockSize + 4);
        }
        return handles;
    } finally {
        WriteLock lock = writeLock.writeLock();
        if (lock.isHeldByCurrentThread()) {
            lock.unlock();
        }
    }
}

From source file:org.commoncrawl.io.internal.NIOHttpConnection.java

private void processChunkedContent() throws IOException {

    while (_inBuf.available() != 0 && _chunkState != ChunkState.STATE_DONE) {

        switch (_chunkState) {

        case STATE_AWAITING_CHUNK_HEADER: {

            _chunkCRLFReadState = _inBuf.readCRLFLine(_chunkLineBuffer, CHUNK_LINE_MAX, _chunkCRLFReadState);

            if (_chunkCRLFReadState == CRLFReadState.DONE) {
                // get the newly extracted line ... 
                String line = _chunkLineBuffer.toString();
                // now find first occurence of whitespace ... 
                int whiteSpaceIdx = line.indexOf(' ');
                if (whiteSpaceIdx != -1) {
                    line = line.substring(0, whiteSpaceIdx);
                }//from   w  ww.j  a v  a  2  s.  c o  m
                // now extract chunk length ... 
                try {
                    _chunkSize = Integer.parseInt(line, 16);
                } catch (NumberFormatException e) {
                    LOG.error("Invalid Chunk Size Encountered reading CHUNK HEADER:" + line);
                    throw new IOException("Invalid chunk size");
                }
                // reset chunk pos cursor ... 
                _chunkPos = 0;
                // reset chunk read state 
                _chunkCRLFReadState = CRLFReadState.NONE;
                // reset the buffer for the next potential line read ... 
                _chunkLineBuffer.setLength(0);

                // now interpret the chunk size value ... 
                if (_chunkSize > 0) {
                    _chunkState = ChunkState.STATE_READING_CHUNK;
                } else {
                    _chunkState = ChunkState.STATE_AWAITING_TRAILERS;
                }
            }
        }
            break;

        case STATE_READING_CHUNK: {

            // calculate amount we want to read in ... 
            int amountToRead = Math.min(_chunkSize - _chunkPos, _inBuf.available());
            // and track amount we wrote into chunk content buffer 
            int amountWritten = 0;

            while (amountToRead != 0) {

                // get a write buffer ... 
                ByteBuffer writeBuffer = _chunkContentBuffer.getWriteBuf();

                // get the next read buffer 
                ByteBuffer readBuffer = _inBuf.read();

                if (readBuffer == writeBuffer) {
                    throw new RuntimeException("BAD NEWS!!!");
                }

                //TODO: There is an opportunity here to skip buffer copy altogether and add read buffer directly to write buffer list 
                //            Need to look into this. 

                // if buffer size is > amountToRead ... 
                if (readBuffer.remaining() > writeBuffer.remaining() || readBuffer.remaining() > amountToRead) {

                    // slice the read buffer ... 
                    ByteBuffer sliced = readBuffer.slice();
                    // calculate slice amount 
                    int sliceAmount = Math.min(writeBuffer.remaining(), amountToRead);

                    // and increment original ... 
                    readBuffer.position(readBuffer.position() + sliceAmount);
                    // and limit sliced buffer scope ... 
                    sliced.limit(sliced.position() + sliceAmount);
                    // reduce amountToRead 
                    amountToRead -= sliceAmount;
                    // and increment chunk pos 
                    _chunkPos += sliceAmount;
                    // track amount written ... 
                    amountWritten += sliced.remaining();
                    // append it ... 
                    writeBuffer.put(sliced);
                    // and put back the read buffer 
                    _inBuf.putBack(readBuffer);
                }
                // otherwise... append whole buffer to write buffer 
                else {
                    // reduce amountToRead 
                    amountToRead -= readBuffer.remaining();
                    // and increment chunk pos 
                    _chunkPos += readBuffer.remaining();
                    // track amount written 
                    amountWritten += readBuffer.remaining();
                    // append as much as possible into the write buffer ... 
                    writeBuffer.put(readBuffer);
                }
            }

            // if we wrote some data to the content buffer ... 
            if (amountWritten != 0) {
                // update bytes downloaded ...
                _downloadedContentLength += amountWritten;

                if (getListener() != null) {
                    // inform listener of content availability 
                    getListener().HttpContentAvailable(this, _chunkContentBuffer);
                }
            }

            // now if we read in a chunks worth of data ... advance state ... 
            if (_chunkPos == _chunkSize) {
                _chunkState = ChunkState.STATE_AWAITING_CHUNK_EOL;
            }
        }
            break;

        case STATE_AWAITING_CHUNK_EOL: {

            if (_inBuf.available() >= 2) {
                ByteBuffer readBuffer = _inBuf.read();

                if (readBuffer.get() != '\r') {
                    LOG.error("Missing CR from Chunk Data Terminator");
                    throw new IOException("missing CR");
                }
                // now if read buffer is expended ... release it and get another one ... 
                if (readBuffer.remaining() == 0) {
                    readBuffer = _inBuf.read();
                }

                if (readBuffer.get() != '\n') {
                    LOG.error("Missing LFfrom Chunk Data Terminator");
                    throw new IOException("missing LF");
                }
                // put back the read buffer 
                _inBuf.putBack(readBuffer);
                // and transition to the next state ... 
                _chunkState = ChunkState.STATE_AWAITING_CHUNK_HEADER;
            } else {
                // break out and wait for more data 
                return;
            }
        }
            break;

        case STATE_AWAITING_TRAILERS: {

            _chunkCRLFReadState = _inBuf.readCRLFLine(_chunkLineBuffer, CHUNK_LINE_MAX, _chunkCRLFReadState);

            if (_chunkCRLFReadState == CRLFReadState.DONE) {
                // transition to a done state ... 
                _chunkState = ChunkState.STATE_DONE;
                // clear out intermediate crlf state
                _chunkCRLFReadState = CRLFReadState.NONE;
                _chunkLineBuffer.setLength(0);
            } else {
                break;
            }
        }
        // fall through if chunk state is done ... 

        case STATE_DONE: {
            // clear out existing input buffer ...
            _inBuf.reset();
            // flush chunk buffer ...
            _chunkContentBuffer.flush();
            // and swap it with the real content buffer ... 
            _inBuf = _chunkContentBuffer;
            // reset chunk state ... 
            _chunkContentBuffer = null;
            // reset chunked flag ... 
            _chunked = false;
            // set HTTP DONE state ... 
            setState(State.DONE, null);
        }
            break;
        }
    }
}

From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java

/**
 * Read external with codec.//from  w ww  .  j  a v a 2  s.c  om
 *
 * @param blockName the block name
 * @return the cacheable
 * @throws IOException Signals that an I/O exception has occurred.
 */
@SuppressWarnings("unused")
private Cacheable readExternalWithCodec(String blockName, boolean repeat, boolean caching) throws IOException {
    if (overflowExtEnabled == false)
        return null;
    // Check if we have  already this block in external storage cache
    try {
        // We use 16 - byte hash for external storage cache  
        byte[] hashed = Utils.hash128(blockName);

        StorageHandle handle = storage.newStorageHandle();
        byte[] data = (byte[]) extStorageCache.get(hashed);
        if (data == null) {
            if (repeat == false)
                extRefStats.miss(caching);
            return null;
        } else {
            extRefStats.hit(caching);
        }
        // Initialize handle 
        handle.fromBytes(data);

        ByteBuffer buffer = extStorageCache.getLocalBufferWithAddress().getBuffer();
        SerDe serde = extStorageCache.getSerDe();
        Codec codec = extStorageCache.getCompressionCodec();

        buffer.clear();

        StorageHandle newHandle = storage.getData(handle, buffer);
        if (buffer.position() > 0)
            buffer.flip();
        int size = buffer.getInt();
        if (size == 0) {
            // BIGBASE-45
            // Remove reference from reference cache
            // reference is in L3-RAM cache but no object in L3-DISK cache was found
            // remove only if handle is invalid
            if (storage.isValid(handle) == false) {
                extStorageCache.remove(hashed);
            }
            return null;
        }
        // Skip key
        int keySize = buffer.getInt();
        buffer.position(8 + keySize);

        boolean inMemory = buffer.get() == (byte) 1;

        buffer.limit(size + 4);
        Cacheable obj = (Cacheable) serde.readCompressed(buffer/*, codec*/);
        offHeapCache.put(blockName, obj);

        if (newHandle.equals(handle) == false) {
            extStorageCache.put(hashed, newHandle.toBytes());
        }

        return obj;

    } catch (Throwable e) {
        fatalExternalReads.incrementAndGet();
        throw new IOException(e);
    }

}

From source file:org.commoncrawl.io.NIOHttpConnection.java

private void processChunkedContent() throws IOException {

    while (_inBuf.available() != 0 && _chunkState != ChunkState.STATE_DONE) {

        switch (_chunkState) {

        case STATE_AWAITING_CHUNK_HEADER: {

            _chunkCRLFReadState = _inBuf.readCRLFLine(_chunkLineBuffer, CHUNK_LINE_MAX, _chunkCRLFReadState);

            if (_chunkCRLFReadState == CRLFReadState.DONE) {
                // get the newly extracted line ...
                String line = _chunkLineBuffer.toString();
                // now find first occurence of whitespace ...
                int whiteSpaceIdx = line.indexOf(' ');
                if (whiteSpaceIdx != -1) {
                    line = line.substring(0, whiteSpaceIdx);
                }//from  w w w  .  ja  va2s.c om
                // now extract chunk length ...
                try {
                    _chunkSize = Integer.parseInt(line, 16);
                } catch (NumberFormatException e) {
                    LOG.error("Connection:[" + getId()
                            + "] Invalid Chunk Size Encountered reading CHUNK HEADER:" + line);
                    throw new IOException("Invalid chunk size");
                }
                // reset chunk pos cursor ...
                _chunkPos = 0;
                // reset chunk read state
                _chunkCRLFReadState = CRLFReadState.NONE;
                // reset the buffer for the next potential line read ...
                _chunkLineBuffer.setLength(0);

                // now interpret the chunk size value ...
                if (_chunkSize > 0) {
                    _chunkState = ChunkState.STATE_READING_CHUNK;
                } else {
                    _chunkState = ChunkState.STATE_AWAITING_TRAILERS;
                }
            }
        }
            break;

        case STATE_READING_CHUNK: {

            // calculate amount we want to read in ...
            int amountToRead = Math.min(_chunkSize - _chunkPos, _inBuf.available());
            // and track amount we wrote into chunk content buffer
            int amountWritten = 0;

            while (amountToRead != 0) {

                // get a write buffer ...
                ByteBuffer writeBuffer = _chunkContentBuffer.getWriteBuf();

                // get the next read buffer
                ByteBuffer readBuffer = _inBuf.read();

                if (readBuffer == writeBuffer) {
                    throw new RuntimeException("BAD NEWS!!!");
                }

                // TODO: There is an opportunity here to skip buffer copy altogether
                // and add read buffer directly to write buffer list
                // Need to look into this.

                // if buffer size is > amountToRead ...
                if (readBuffer.remaining() > writeBuffer.remaining() || readBuffer.remaining() > amountToRead) {

                    // slice the read buffer ...
                    ByteBuffer sliced = readBuffer.slice();
                    // calculate slice amount
                    int sliceAmount = Math.min(writeBuffer.remaining(), amountToRead);

                    // and increment original ...
                    readBuffer.position(readBuffer.position() + sliceAmount);
                    // and limit sliced buffer scope ...
                    sliced.limit(sliced.position() + sliceAmount);
                    // reduce amountToRead
                    amountToRead -= sliceAmount;
                    // and increment chunk pos
                    _chunkPos += sliceAmount;
                    // track amount written ...
                    amountWritten += sliced.remaining();
                    // append it ...
                    writeBuffer.put(sliced);
                    // and put back the read buffer
                    _inBuf.putBack(readBuffer);
                }
                // otherwise... append whole buffer to write buffer
                else {
                    // reduce amountToRead
                    amountToRead -= readBuffer.remaining();
                    // and increment chunk pos
                    _chunkPos += readBuffer.remaining();
                    // track amount written
                    amountWritten += readBuffer.remaining();
                    // append as much as possible into the write buffer ...
                    writeBuffer.put(readBuffer);
                }
            }

            // if we wrote some data to the content buffer ...
            if (amountWritten != 0) {
                // update bytes downloaded ...
                _downloadedContentLength += amountWritten;

                if (getListener() != null) {
                    // inform listener of content availability
                    getListener().HttpContentAvailable(this, _chunkContentBuffer);
                }
            }

            // now if we read in a chunks worth of data ... advance state ...
            if (_chunkPos == _chunkSize) {
                _chunkState = ChunkState.STATE_AWAITING_CHUNK_EOL;
            }
        }
            break;

        case STATE_AWAITING_CHUNK_EOL: {

            if (_inBuf.available() >= 2) {
                ByteBuffer readBuffer = _inBuf.read();

                if (readBuffer.get() != '\r') {
                    LOG.error("Connection:[" + getId() + "] Missing CR from Chunk Data Terminator");
                    throw new IOException("missing CR");
                }
                // now if read buffer is expended ... release it and get another one
                // ...
                if (readBuffer.remaining() == 0) {
                    readBuffer = _inBuf.read();
                }

                if (readBuffer.get() != '\n') {
                    LOG.error("Connection:[" + getId() + "] Missing LFfrom Chunk Data Terminator");
                    throw new IOException("missing LF");
                }
                // put back the read buffer
                _inBuf.putBack(readBuffer);
                // and transition to the next state ...
                _chunkState = ChunkState.STATE_AWAITING_CHUNK_HEADER;
            } else {
                // break out and wait for more data
                return;
            }
        }
            break;

        case STATE_AWAITING_TRAILERS: {

            _chunkCRLFReadState = _inBuf.readCRLFLine(_chunkLineBuffer, CHUNK_LINE_MAX, _chunkCRLFReadState);

            if (_chunkCRLFReadState == CRLFReadState.DONE) {
                // transition to a done state ...
                _chunkState = ChunkState.STATE_DONE;
                // clear out intermediate crlf state
                _chunkCRLFReadState = CRLFReadState.NONE;
                _chunkLineBuffer.setLength(0);
            } else {
                break;
            }
        }
        // fall through if chunk state is done ...

        case STATE_DONE: {
            // clear out existing input buffer ...
            _inBuf.reset();
            // flush chunk buffer ...
            _chunkContentBuffer.flush();
            // and swap it with the real content buffer ...
            _inBuf = _chunkContentBuffer;
            // reset chunk state ...
            _chunkContentBuffer = null;
            // reset chunked flag ...
            _chunked = false;
            // set HTTP DONE state ...
            setState(State.DONE, null);
        }
            break;
        }
    }
}

From source file:com.healthmarketscience.jackcess.Column.java

/**
 * Write an LVAL column into a ByteBuffer inline if it fits, otherwise in
 * other data page(s)./*  w w w .j  av a  2  s.c om*/
 * @param value Value of the LVAL column
 * @return A buffer containing the LVAL definition and (possibly) the column
 *         value (unless written to other pages)
 * @usage _advanced_method_
 */
public ByteBuffer writeLongValue(byte[] value, int remainingRowLength) throws IOException {
    if (value.length > getType().getMaxSize()) {
        throw new IOException(
                "value too big for column, max " + getType().getMaxSize() + ", got " + value.length);
    }

    // determine which type to write
    byte type = 0;
    int lvalDefLen = getFormat().SIZE_LONG_VALUE_DEF;
    if (((getFormat().SIZE_LONG_VALUE_DEF + value.length) <= remainingRowLength)
            && (value.length <= getFormat().MAX_INLINE_LONG_VALUE_SIZE)) {
        type = LONG_VALUE_TYPE_THIS_PAGE;
        lvalDefLen += value.length;
    } else if (value.length <= getFormat().MAX_LONG_VALUE_ROW_SIZE) {
        type = LONG_VALUE_TYPE_OTHER_PAGE;
    } else {
        type = LONG_VALUE_TYPE_OTHER_PAGES;
    }

    ByteBuffer def = getPageChannel().createBuffer(lvalDefLen);
    // take length and apply type to first byte
    int lengthWithFlags = value.length | (type << 24);
    def.putInt(lengthWithFlags);

    if (type == LONG_VALUE_TYPE_THIS_PAGE) {
        // write long value inline
        def.putInt(0);
        def.putInt(0); //Unknown
        def.put(value);
    } else {

        TempPageHolder lvalBufferH = getTable().getLongValueBuffer();
        ByteBuffer lvalPage = null;
        int firstLvalPageNum = PageChannel.INVALID_PAGE_NUMBER;
        byte firstLvalRow = 0;

        // write other page(s)
        switch (type) {
        case LONG_VALUE_TYPE_OTHER_PAGE:
            lvalPage = getLongValuePage(value.length, lvalBufferH);
            firstLvalPageNum = lvalBufferH.getPageNumber();
            firstLvalRow = (byte) Table.addDataPageRow(lvalPage, value.length, getFormat(), 0);
            lvalPage.put(value);
            getPageChannel().writePage(lvalPage, firstLvalPageNum);
            break;

        case LONG_VALUE_TYPE_OTHER_PAGES:

            ByteBuffer buffer = ByteBuffer.wrap(value);
            int remainingLen = buffer.remaining();
            buffer.limit(0);
            lvalPage = getLongValuePage(getFormat().MAX_LONG_VALUE_ROW_SIZE, lvalBufferH);
            firstLvalPageNum = lvalBufferH.getPageNumber();
            int lvalPageNum = firstLvalPageNum;
            ByteBuffer nextLvalPage = null;
            int nextLvalPageNum = 0;
            while (remainingLen > 0) {
                lvalPage.clear();

                // figure out how much we will put in this page (we need 4 bytes for
                // the next page pointer)
                int chunkLength = Math.min(getFormat().MAX_LONG_VALUE_ROW_SIZE - 4, remainingLen);

                // figure out if we will need another page, and if so, allocate it
                if (chunkLength < remainingLen) {
                    // force a new page to be allocated
                    lvalBufferH.clear();
                    nextLvalPage = getLongValuePage(getFormat().MAX_LONG_VALUE_ROW_SIZE, lvalBufferH);
                    nextLvalPageNum = lvalBufferH.getPageNumber();
                } else {
                    nextLvalPage = null;
                    nextLvalPageNum = 0;
                }

                // add row to this page
                byte lvalRow = (byte) Table.addDataPageRow(lvalPage, chunkLength + 4, getFormat(), 0);

                // write next page info (we'll always be writing into row 0 for
                // newly created pages)
                lvalPage.put((byte) 0); // row number
                ByteUtil.put3ByteInt(lvalPage, nextLvalPageNum); // page number

                // write this page's chunk of data
                buffer.limit(buffer.limit() + chunkLength);
                lvalPage.put(buffer);
                remainingLen -= chunkLength;

                // write new page to database
                getPageChannel().writePage(lvalPage, lvalPageNum);

                if (lvalPageNum == firstLvalPageNum) {
                    // save initial row info
                    firstLvalRow = lvalRow;
                } else {
                    // check assertion that we wrote to row 0 for all subsequent pages
                    if (lvalRow != (byte) 0) {
                        throw new IllegalStateException("Expected row 0, but was " + lvalRow);
                    }
                }

                // move to next page
                lvalPage = nextLvalPage;
                lvalPageNum = nextLvalPageNum;
            }
            break;

        default:
            throw new IOException("Unrecognized long value type: " + type);
        }

        // update def
        def.put(firstLvalRow);
        ByteUtil.put3ByteInt(def, firstLvalPageNum);
        def.putInt(0); //Unknown

    }

    def.flip();
    return def;
}

From source file:com.yobidrive.diskmap.needles.NeedleManager.java

/** Read method for repair routines: reads sequentially the log from the given checkpoint until end of all files.
 * Last file is truncated after the last valid needle (MAGIC numbers and MD5 OK)
 * @param needlePointer// ww  w  . j a  va  2 s . c o  m
 * @param checkPoint No repair will occur for needles <= checkpoint, index should be repaired in this case and checkpoint reseted
 * @return
 * @throws NeedleManagerException
 */
public PointedNeedle readNextNeedleFromDiskInLogSequence(NeedlePointer checkPoint)
        throws NeedleManagerException {
    Boolean repairMode = (checkPoint != null);
    ByteBuffer needleBuffer = null;
    int retry = 2;
    // System.out.print("0") ;
    try {
        long position = -1L;
        int readBytes = -1;
        int totalHeaderReadBytes = 0;
        FileChannel fc = null;
        while (retry > 0) {
            retry--;
            // System.out.print("a") ;
            fc = getChannel(repairNeedle.getNeedleFileNumber());
            if (fc == null)
                return null;
            // System.out.print("b") ;
            // logger.info("Repairing: reading file "+repairNeedle.toString() ) ;
            // Position and read needle for check
            position = repairNeedle.getNeedleOffset();
            // System.out.print("c") ;
            // Acquires a ByteBuffer
            if (threadBufferQ == null)
                return null;
            // System.out.println("1") ;
            if (needleBuffer == null)
                needleBuffer = threadBufferQ.take();
            // System.out.println("2") ;
            // Finally we have a buffer
            needleBuffer.rewind();
            needleBuffer.limit(MAXKEYSIZE + MAXVERSIONSIZE + Needle.NEEDLEOVERHEAD);
            // First read header to know the data size
            totalHeaderReadBytes = 0;
            readBytes = 0;
            while (readBytes >= 0 && totalHeaderReadBytes < needleBuffer.limit()) {
                readBytes = fc.read(needleBuffer, position + totalHeaderReadBytes);
                totalHeaderReadBytes += readBytes;
            }
            if (totalHeaderReadBytes <= 0) {
                if (!repairMode)
                    return null;
                // End of file, select next file
                if (position == 0 || repairNeedle.positionToNextFile() == null) {
                    // Clean end
                    if (repairNeedle.compareTo(checkPoint) <= 0) {
                        // We should NEVER repair a checkpointed needle. Kill checkpoint and rebuild index!
                        throw new BrokenCheckPointException(
                                "Missing checkpointed record " + repairNeedle.toString());
                    }
                    return null;
                } else {
                    // Continue with next file
                    retry = 1;
                    // System.out.println("-") ;
                    logger.info("Reading in sequence: switching to next file, " + repairNeedle.toString());
                    continue;
                }
            } else {
                // We have our needle (good or bad), do not retry
                retry = 0;
            }
        }
        Needle needle = new Needle();
        if (!needle.getNeedleHeaderFromBuffer(needleBuffer)) {
            // Incorrect header: truncate file at this position and removes all subsequent files
            if (!repairMode)
                return null;
            if (repairNeedle.compareTo(checkPoint) <= 0) {
                // We should NEVER repair a checkpointed needle. Kill checkpoint and rebuild index!
                throw new BrokenCheckPointException("Broken checkpointed record " + repairNeedle.toString());
            }
            truncate(repairNeedle);
            return null;
        }
        // System.out.println("3") ;
        // Needle Header is OK, read the rest until end of needle. Change limit to include data
        needleBuffer.position(totalHeaderReadBytes);
        needleBuffer.limit(needle.getTotalSizeFromData());

        readBytes = 0;
        int totalContentReadBytes = 0;
        while (readBytes >= 0 && totalContentReadBytes < needleBuffer.limit() - totalHeaderReadBytes) {
            readBytes = fc.read(needleBuffer, position + totalHeaderReadBytes + totalContentReadBytes);
            totalContentReadBytes += readBytes;
        }

        needleBuffer.rewind();
        needleBuffer.position(needle.getHeaderSize());
        // Parse data and verifies checksum
        if (!needle.getNeedleDataFromBuffer(needleBuffer)) {
            // Incorrect data: truncate file at this position and removes all subsequent files
            if (!repairMode)
                return null;
            if (repairNeedle.compareTo(checkPoint) <= 0) {
                // We should NEVER repair a checkpointed needle. Kill checkpoint and rebuild index!
                throw new BrokenCheckPointException("Broken checkpointed record " + repairNeedle.toString());
            }
            // System.out.print("truncate...") ;
            truncate(repairNeedle);
            // System.out.print("truncated.") ;
            return null;
        }
        // Now needle is parsed and OK
        PointedNeedle pn = new PointedNeedle();
        pn.setNeedlePointer(repairNeedle.clone());
        pn.setNeedle(needle);
        // System.out.println("4") ;
        // Put needle in cache
        needleReadCache.put(pn.getNeedlePointer(), needle);
        // Put needleHeader in cache
        needleHeaderReadCache.put(pn.getNeedlePointer(), needle.getNeedleHeader(pn.getNeedlePointer()));
        repairNeedle.positionToNextNeedle(position + needle.getRoundedTotalSize());
        return pn;
    } catch (Throwable th) {
        logger.error("Error reading needle at " + repairNeedle.getFormattedNeedleFileNumber() + "/"
                + repairNeedle.getFormattedNeedleOffset(), th);
        throw new NeedleManagerException();
    } finally {
        if (needleBuffer != null) {
            try {
                threadBufferQ.put(needleBuffer);
            } catch (InterruptedException ie) {
                throw new BucketTableManagerException("Error giving back needle read thread", ie);
            }
        }
    }
}

From source file:org.apache.hadoop.hdfs.hoss.db.FileStreamStore.java

/**
 * Read block from file/*w  w  w  . ja va  2s .com*/
 * 
 * @param offset
 *            of block
 * @param ByteBuffer
 * @return new offset (offset+headerlen+datalen+footer)
 */
public synchronized long read(long offset, final ByteBuffer buf) {
    if (!validState)
        throw new InvalidStateException();
    try {
        int readed;
        while (true) {
            if (offset >= offsetOutputCommited) {
                if (bufOutput.position() > 0) {
                    LOG.warn("WARN: autoflush forced");
                    flushBuffer();
                }
            }
            bufInput.clear();
            readed = fcInput.position(offset).read(bufInput); // Read 1
            // sector
            if (readed < HEADER_LEN) { // short+int (6 bytes)
                return -1;
            }
            bufInput.flip();
            final int magicB1 = (bufInput.get() & 0xFF); // Header - Magic
            // (short, 2 bytes, msb-first)
            final int magicB2 = (bufInput.get() & 0xFF); // Header - Magic
            // (short, 2 bytes, lsb-last)
            if (alignBlocks && (magicB1 == MAGIC_PADDING)) {
                final int diffOffset = nextBlockBoundary(offset);
                if (diffOffset > 0) {
                    offset += diffOffset;
                    continue;
                }
            }
            final int magic = ((magicB1 << 8) | magicB2);
            if (magic != MAGIC) {
                LOG.error("MAGIC HEADER fake=" + Integer.toHexString(magic) + " expected="
                        + Integer.toHexString(MAGIC));
                return -1;
            }
            break;
        }
        // Header - Data Size (int, 4 bytes)
        final int datalen = bufInput.getInt();
        final int dataUnderFlow = (datalen - (readed - HEADER_LEN));
        int footer = -12345678;
        if (dataUnderFlow < 0) {
            footer = bufInput.get(datalen + HEADER_LEN); // Footer (byte)
        }
        bufInput.limit(Math.min(readed, datalen + HEADER_LEN));
        buf.put(bufInput);
        if (dataUnderFlow > 0) {
            buf.limit(datalen);
            int len = fcInput.read(buf);
            if (len < dataUnderFlow) {
                LOG.error("Unable to read payload readed=" + len + " expected=" + dataUnderFlow);
                return -1;
            }
        }
        if (dataUnderFlow >= 0) {
            // Read Footer (byte)
            bufInput.clear();
            bufInput.limit(FOOTER_LEN);
            if (fcInput.read(bufInput) < FOOTER_LEN)
                return -1;
            bufInput.flip();
            footer = bufInput.get();
        }
        if (footer != MAGIC_FOOT) {
            LOG.error("MAGIC FOOT fake=" + Integer.toHexString(footer) + " expected="
                    + Integer.toHexString(MAGIC_FOOT));
            return -1;
        }
        buf.flip();
        return (offset + HEADER_LEN + datalen + FOOTER_LEN);
    } catch (Exception e) {
        LOG.error("Exception in read(" + offset + ")", e);
    }
    return -1;
}

From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java

private void initializeBucketTableFromLastCommittedBucketFile() throws BucketTableManagerException {
    FileInputStream tableStream = null;
    FileChannel fileChannel = null;
    try {/*from   w ww .j  a v  a  2  s .c  o  m*/
        File latestCommittedFile = getLatestCommitedFile();
        if (latestCommittedFile != null) {
            tableStream = new FileInputStream(latestCommittedFile);
            fileChannel = tableStream.getChannel();
            ByteBuffer buffer = ByteBuffer.allocate(HEADERSIZE);
            fileChannel.position(0L);
            int read = fileChannel.read(buffer);
            if (read < HEADERSIZE) {
                fileChannel.close();
                throw new BucketTableManagerException(
                        "Wrong bucket table header size: " + read + "/" + HEADERSIZE);
            }
            // Check content of header. Start with Big Endian (default for Java)
            buffer.rewind();
            byteOrder = ByteOrder.BIG_ENDIAN;
            buffer.order(byteOrder);
            int magic = buffer.getInt();
            if (magic == MAGICSTART_BADENDIAN) {
                byteOrder = ByteOrder.LITTLE_ENDIAN;
                buffer.order(byteOrder);
            } else if (magic != MAGICSTART) {
                fileChannel.close();
                throw new BucketTableManagerException("Bad header in bucket table file");
            }
            // Read number of buckets
            long headerMapSize = buffer.getLong();
            // Read checkPoint
            NeedlePointer includedCheckpoint = new NeedlePointer();
            includedCheckpoint.getNeedlePointerFromBuffer(buffer);
            // Read second magic number
            magic = buffer.getInt();
            if (magic != MAGICEND) {
                fileChannel.close();
                throw new BucketTableManagerException("Bad header in bucket table file");
            }
            // Check number of buckets against requested map size
            if (headerMapSize != mapSize) {
                // Map size does not match
                fileChannel.close();
                throw new BucketTableManagerException(
                        "Requested map size " + mapSize + " does not match header map size " + headerMapSize);
            }
            // Sets initial checkpoint
            bucketTable.setInitialCheckPoint(includedCheckpoint);
            // Now reads all entries
            logger.info("Hot start: loading buckets...");
            for (int i = 0; i < nbBuffers; i++) {
                bucketTable.prepareBufferForReading(i);
                read = fileChannel.read(bucketTable.getBuffer(i));
                if (read < bucketTable.getBuffer(i).limit())
                    throw new BucketTableManagerException("Incomplete bucket table file "
                            + latestCommittedFile.getName() + ", expected " + mapSize + HEADERSIZE);
                //else
                //   logger.info("Hot start: loaded "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ;
            }
            // Checks second magic marker
            buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE);
            buffer.rewind();
            buffer.limit(INTSIZE);
            if (fileChannel.read(buffer) < INTSIZE)
                throw new BucketTableManagerException(
                        "Incomplete bucket table file, missing secong magic number "
                                + latestCommittedFile.getName());
            buffer.rewind();
            magic = buffer.getInt();
            if (magic != MAGICSTART) {
                fileChannel.close();
                throw new BucketTableManagerException("Bad header in bucket table file");
            }
            // Now reads clean counters
            while (true) {
                buffer.rewind();
                buffer.limit(NeedleLogInfo.INFOSIZE);
                read = fileChannel.read(buffer);
                if (read > 0 && read < NeedleLogInfo.INFOSIZE)
                    throw new BucketTableManagerException("Incomplete bucket table file, log info too short "
                            + latestCommittedFile.getName() + ", expected " + mapSize + HEADERSIZE);
                if (read <= 0)
                    break;
                else {
                    NeedleLogInfo nli = new NeedleLogInfo(useAverage);
                    buffer.rewind();
                    nli.getNeedleLogInfo(buffer);
                    logInfoPerLogNumber.put(new Integer(nli.getNeedleFileNumber()), nli);
                }
            }
            logger.info("Hot start: loaded " + (nbBuffers * entriesPerBuffer) + " buckets");

        } else {
            // Empty file
            bucketTable.setInitialCheckPoint(new NeedlePointer());
            bucketTable.format();
        }
    } catch (IOException ie) {
        throw new BucketTableManagerException("Failed initializing bucket table", ie);
    } catch (BufferUnderflowException bue) {
        throw new BucketTableManagerException("Bucket table too short", bue);
    } finally {
        if (fileChannel != null) {
            try {
                fileChannel.close();
            } catch (IOException ex) {
                throw new BucketTableManagerException("Error while closing file channel", ex);
            }
        }
    }
}

From source file:com.healthmarketscience.jackcess.Column.java

/**
 * @param lvalDefinition Column value that points to an LVAL record
 * @return The LVAL data//from  ww  w.  j a v a  2 s  .c  om
 */
private byte[] readLongValue(byte[] lvalDefinition) throws IOException {
    ByteBuffer def = ByteBuffer.wrap(lvalDefinition).order(PageChannel.DEFAULT_BYTE_ORDER);
    int lengthWithFlags = def.getInt();
    int length = lengthWithFlags & (~LONG_VALUE_TYPE_MASK);

    byte[] rtn = new byte[length];
    byte type = (byte) ((lengthWithFlags & LONG_VALUE_TYPE_MASK) >>> 24);

    if (type == LONG_VALUE_TYPE_THIS_PAGE) {

        // inline long value
        def.getInt(); //Skip over lval_dp
        def.getInt(); //Skip over unknown
        def.get(rtn);

    } else {

        // long value on other page(s)
        if (lvalDefinition.length != getFormat().SIZE_LONG_VALUE_DEF) {
            throw new IOException("Expected " + getFormat().SIZE_LONG_VALUE_DEF
                    + " bytes in long value definition, but found " + lvalDefinition.length);
        }

        int rowNum = ByteUtil.getUnsignedByte(def);
        int pageNum = ByteUtil.get3ByteInt(def, def.position());
        ByteBuffer lvalPage = getPageChannel().createPageBuffer();

        switch (type) {
        case LONG_VALUE_TYPE_OTHER_PAGE: {
            getPageChannel().readPage(lvalPage, pageNum);

            short rowStart = Table.findRowStart(lvalPage, rowNum, getFormat());
            short rowEnd = Table.findRowEnd(lvalPage, rowNum, getFormat());

            if ((rowEnd - rowStart) != length) {
                throw new IOException("Unexpected lval row length");
            }

            lvalPage.position(rowStart);
            lvalPage.get(rtn);
        }
            break;

        case LONG_VALUE_TYPE_OTHER_PAGES:

            ByteBuffer rtnBuf = ByteBuffer.wrap(rtn);
            int remainingLen = length;
            while (remainingLen > 0) {
                lvalPage.clear();
                getPageChannel().readPage(lvalPage, pageNum);

                short rowStart = Table.findRowStart(lvalPage, rowNum, getFormat());
                short rowEnd = Table.findRowEnd(lvalPage, rowNum, getFormat());

                // read next page information
                lvalPage.position(rowStart);
                rowNum = ByteUtil.getUnsignedByte(lvalPage);
                pageNum = ByteUtil.get3ByteInt(lvalPage);

                // update rowEnd and remainingLen based on chunkLength
                int chunkLength = (rowEnd - rowStart) - 4;
                if (chunkLength > remainingLen) {
                    rowEnd = (short) (rowEnd - (chunkLength - remainingLen));
                    chunkLength = remainingLen;
                }
                remainingLen -= chunkLength;

                lvalPage.limit(rowEnd);
                rtnBuf.put(lvalPage);
            }

            break;

        default:
            throw new IOException("Unrecognized long value type: " + type);
        }
    }

    return rtn;
}