Example usage for java.nio ByteBuffer rewind

List of usage examples for java.nio ByteBuffer rewind

Introduction

In this page you can find the example usage for java.nio ByteBuffer rewind.

Prototype

public final Buffer rewind() 

Source Link

Document

Rewinds this buffer.

Usage

From source file:srebrinb.compress.sevenzip.SevenZFile.java

private void readFully(ByteBuffer buf) throws IOException {
    buf.rewind();
    IOUtils.readFully(channel, buf);
    buf.flip();
}

From source file:org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMap.java

/**
 * Fill the measures min values with minimum , this is needed for backward version compatability
 * as older versions don't store min values for measures
 *//*from  w w w . j  a v a 2 s  .  c  o  m*/
private byte[][] updateMinValues(byte[][] minValues, int[] minMaxLen) {
    byte[][] updatedValues = minValues;
    if (minValues.length < minMaxLen.length) {
        updatedValues = new byte[minMaxLen.length][];
        System.arraycopy(minValues, 0, updatedValues, 0, minValues.length);
        List<CarbonMeasure> measures = segmentProperties.getMeasures();
        ByteBuffer buffer = ByteBuffer.allocate(8);
        for (int i = 0; i < measures.size(); i++) {
            buffer.rewind();
            DataType dataType = measures.get(i).getDataType();
            if (dataType == DataTypes.BYTE) {
                buffer.putLong(Byte.MIN_VALUE);
                updatedValues[minValues.length + i] = buffer.array().clone();
            } else if (dataType == DataTypes.SHORT) {
                buffer.putLong(Short.MIN_VALUE);
                updatedValues[minValues.length + i] = buffer.array().clone();
            } else if (dataType == DataTypes.INT) {
                buffer.putLong(Integer.MIN_VALUE);
                updatedValues[minValues.length + i] = buffer.array().clone();
            } else if (dataType == DataTypes.LONG) {
                buffer.putLong(Long.MIN_VALUE);
                updatedValues[minValues.length + i] = buffer.array().clone();
            } else if (DataTypes.isDecimal(dataType)) {
                updatedValues[minValues.length + i] = DataTypeUtil
                        .bigDecimalToByte(BigDecimal.valueOf(Long.MIN_VALUE));
            } else {
                buffer.putDouble(Double.MIN_VALUE);
                updatedValues[minValues.length + i] = buffer.array().clone();
            }
        }
    }
    return updatedValues;
}

From source file:org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMap.java

/**
 * Fill the measures max values with maximum , this is needed for backward version compatability
 * as older versions don't store max values for measures
 *///from   w w  w .j  a  v  a 2s . c  o  m
private byte[][] updateMaxValues(byte[][] maxValues, int[] minMaxLen) {
    byte[][] updatedValues = maxValues;
    if (maxValues.length < minMaxLen.length) {
        updatedValues = new byte[minMaxLen.length][];
        System.arraycopy(maxValues, 0, updatedValues, 0, maxValues.length);
        List<CarbonMeasure> measures = segmentProperties.getMeasures();
        ByteBuffer buffer = ByteBuffer.allocate(8);
        for (int i = 0; i < measures.size(); i++) {
            buffer.rewind();
            DataType dataType = measures.get(i).getDataType();
            if (dataType == DataTypes.BYTE) {
                buffer.putLong(Byte.MAX_VALUE);
                updatedValues[maxValues.length + i] = buffer.array().clone();
            } else if (dataType == DataTypes.SHORT) {
                buffer.putLong(Short.MAX_VALUE);
                updatedValues[maxValues.length + i] = buffer.array().clone();
            } else if (dataType == DataTypes.INT) {
                buffer.putLong(Integer.MAX_VALUE);
                updatedValues[maxValues.length + i] = buffer.array().clone();
            } else if (dataType == DataTypes.LONG) {
                buffer.putLong(Long.MAX_VALUE);
                updatedValues[maxValues.length + i] = buffer.array().clone();
            } else if (DataTypes.isDecimal(dataType)) {
                updatedValues[maxValues.length + i] = DataTypeUtil
                        .bigDecimalToByte(BigDecimal.valueOf(Long.MAX_VALUE));
            } else {
                buffer.putDouble(Double.MAX_VALUE);
                updatedValues[maxValues.length + i] = buffer.array().clone();
            }
        }
    }
    return updatedValues;
}

From source file:org.carbondata.core.util.CarbonUtil.java

/**
 * @param listOfNodeInfo//from w ww . j a v  a 2s . c om
 * @param filesLocation
 * @param measureCount
 * @param mdKeySize
 * @param fileSize
 * @return
 */
private static List<BlockletInfo> getBlockletDetails(List<BlockletInfo> listOfNodeInfo, String filesLocation,
        int measureCount, int mdKeySize, long fileSize) {
    long offset = fileSize - CarbonCommonConstants.LONG_SIZE_IN_BYTE;
    FileHolder fileHolder = FileFactory.getFileHolder(FileFactory.getFileType(filesLocation));
    offset = fileHolder.readDouble(filesLocation, offset);
    int totalMetaDataLength = (int) (fileSize - CarbonCommonConstants.LONG_SIZE_IN_BYTE - offset);
    ByteBuffer buffer = ByteBuffer.wrap(fileHolder.readByteArray(filesLocation, offset, totalMetaDataLength));
    buffer.rewind();
    while (buffer.hasRemaining()) {
        int[] msrLength = new int[measureCount];
        long[] msrOffset = new long[measureCount];
        BlockletInfo info = new BlockletInfo();
        byte[] startKey = new byte[mdKeySize];
        byte[] endKey = new byte[mdKeySize];
        info.setFileName(filesLocation);
        info.setNumberOfKeys(buffer.getInt());
        info.setKeyLength(buffer.getInt());
        info.setKeyOffset(buffer.getLong());
        buffer.get(startKey);
        buffer.get(endKey);
        info.setStartKey(startKey);
        info.setEndKey(endKey);
        for (int i = 0; i < measureCount; i++) {
            msrLength[i] = buffer.getInt();
            msrOffset[i] = buffer.getLong();
        }
        info.setMeasureLength(msrLength);
        info.setMeasureOffset(msrOffset);
        listOfNodeInfo.add(info);
    }
    fileHolder.finish();
    return listOfNodeInfo;
}

From source file:org.apache.carbondata.processing.store.writer.AbstractFactDataWriter.java

/**
 * Below method will be used to update the no dictionary start and end key
 *
 * @param key key to be updated//  ww w. j a  va2s  . c o  m
 * @return return no dictionary key
 */
protected byte[] updateNoDictionaryStartAndEndKey(byte[] key) {
    if (key.length == 0) {
        return key;
    }
    // add key to byte buffer remove the length part of the data
    ByteBuffer buffer = ByteBuffer.wrap(key, 2, key.length - 2);
    // create a output buffer without length
    ByteBuffer output = ByteBuffer.allocate(key.length - 2);
    short numberOfByteToStorLength = 2;
    // as length part is removed, so each no dictionary value index
    // needs to be reshuffled by 2 bytes
    for (int i = 0; i < dataWriterVo.getNoDictionaryCount(); i++) {
        output.putShort((short) (buffer.getShort() - numberOfByteToStorLength));
    }
    // copy the data part
    while (buffer.hasRemaining()) {
        output.put(buffer.get());
    }
    output.rewind();
    return output.array();
}

From source file:org.apache.carbondata.core.indexstore.blockletindex.BlockDataMap.java

private byte[] convertRowCountFromShortToByteArray(List<Short> blockletCountInEachBlock) {
    int bufferSize = blockletCountInEachBlock.size() * 2;
    ByteBuffer byteBuffer = ByteBuffer.allocate(bufferSize);
    for (Short blockletCount : blockletCountInEachBlock) {
        byteBuffer.putShort(blockletCount);
    }//  w  w  w  . j  a  v a2s.c  o m
    byteBuffer.rewind();
    return byteBuffer.array();
}

From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java

private void initializeBucketTableFromLastCommittedBucketFile() throws BucketTableManagerException {
    FileInputStream tableStream = null;
    FileChannel fileChannel = null;
    try {// ww w.j  a v  a2  s  . co  m
        File latestCommittedFile = getLatestCommitedFile();
        if (latestCommittedFile != null) {
            tableStream = new FileInputStream(latestCommittedFile);
            fileChannel = tableStream.getChannel();
            ByteBuffer buffer = ByteBuffer.allocate(HEADERSIZE);
            fileChannel.position(0L);
            int read = fileChannel.read(buffer);
            if (read < HEADERSIZE) {
                fileChannel.close();
                throw new BucketTableManagerException(
                        "Wrong bucket table header size: " + read + "/" + HEADERSIZE);
            }
            // Check content of header. Start with Big Endian (default for Java)
            buffer.rewind();
            byteOrder = ByteOrder.BIG_ENDIAN;
            buffer.order(byteOrder);
            int magic = buffer.getInt();
            if (magic == MAGICSTART_BADENDIAN) {
                byteOrder = ByteOrder.LITTLE_ENDIAN;
                buffer.order(byteOrder);
            } else if (magic != MAGICSTART) {
                fileChannel.close();
                throw new BucketTableManagerException("Bad header in bucket table file");
            }
            // Read number of buckets
            long headerMapSize = buffer.getLong();
            // Read checkPoint
            NeedlePointer includedCheckpoint = new NeedlePointer();
            includedCheckpoint.getNeedlePointerFromBuffer(buffer);
            // Read second magic number
            magic = buffer.getInt();
            if (magic != MAGICEND) {
                fileChannel.close();
                throw new BucketTableManagerException("Bad header in bucket table file");
            }
            // Check number of buckets against requested map size
            if (headerMapSize != mapSize) {
                // Map size does not match
                fileChannel.close();
                throw new BucketTableManagerException(
                        "Requested map size " + mapSize + " does not match header map size " + headerMapSize);
            }
            // Sets initial checkpoint
            bucketTable.setInitialCheckPoint(includedCheckpoint);
            // Now reads all entries
            logger.info("Hot start: loading buckets...");
            for (int i = 0; i < nbBuffers; i++) {
                bucketTable.prepareBufferForReading(i);
                read = fileChannel.read(bucketTable.getBuffer(i));
                if (read < bucketTable.getBuffer(i).limit())
                    throw new BucketTableManagerException("Incomplete bucket table file "
                            + latestCommittedFile.getName() + ", expected " + mapSize + HEADERSIZE);
                //else
                //   logger.info("Hot start: loaded "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ;
            }
            // Checks second magic marker
            buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE);
            buffer.rewind();
            buffer.limit(INTSIZE);
            if (fileChannel.read(buffer) < INTSIZE)
                throw new BucketTableManagerException(
                        "Incomplete bucket table file, missing secong magic number "
                                + latestCommittedFile.getName());
            buffer.rewind();
            magic = buffer.getInt();
            if (magic != MAGICSTART) {
                fileChannel.close();
                throw new BucketTableManagerException("Bad header in bucket table file");
            }
            // Now reads clean counters
            while (true) {
                buffer.rewind();
                buffer.limit(NeedleLogInfo.INFOSIZE);
                read = fileChannel.read(buffer);
                if (read > 0 && read < NeedleLogInfo.INFOSIZE)
                    throw new BucketTableManagerException("Incomplete bucket table file, log info too short "
                            + latestCommittedFile.getName() + ", expected " + mapSize + HEADERSIZE);
                if (read <= 0)
                    break;
                else {
                    NeedleLogInfo nli = new NeedleLogInfo(useAverage);
                    buffer.rewind();
                    nli.getNeedleLogInfo(buffer);
                    logInfoPerLogNumber.put(new Integer(nli.getNeedleFileNumber()), nli);
                }
            }
            logger.info("Hot start: loaded " + (nbBuffers * entriesPerBuffer) + " buckets");

        } else {
            // Empty file
            bucketTable.setInitialCheckPoint(new NeedlePointer());
            bucketTable.format();
        }
    } catch (IOException ie) {
        throw new BucketTableManagerException("Failed initializing bucket table", ie);
    } catch (BufferUnderflowException bue) {
        throw new BucketTableManagerException("Bucket table too short", bue);
    } finally {
        if (fileChannel != null) {
            try {
                fileChannel.close();
            } catch (IOException ex) {
                throw new BucketTableManagerException("Error while closing file channel", ex);
            }
        }
    }
}

From source file:com.obviousengine.android.focus.ZslFocusCamera.java

/**
 * Given an image reader, extracts the JPEG image bytes and then closes the
 * reader./*  w  w w  .j  av a2  s. c  o  m*/
 *
 * @param img the image from which to extract jpeg bytes or compress to
 *            jpeg.
 * @return The bytes of the JPEG image. Newly allocated.
 */
private byte[] acquireJpegBytes(Image img) {
    ByteBuffer buffer;

    if (img.getFormat() == ImageFormat.JPEG) {
        Image.Plane plane0 = img.getPlanes()[0];
        buffer = plane0.getBuffer();

        byte[] imageBytes = new byte[buffer.remaining()];
        buffer.get(imageBytes);
        buffer.rewind();
        return imageBytes;
    } else {
        throw new RuntimeException("Unsupported image format.");
    }
}

From source file:com.linkedin.databus.core.DbusEventV1.java

@Override
public ByteBuffer value() {
    ByteBuffer value = _buf.asReadOnlyBuffer().order(_buf.order());
    value.position(_position + LongKeyOffset + keyLength());
    value = value.slice().order(_buf.order());
    int valueSize = valueLength();
    value.limit(valueSize);/*from  w  w  w  . ja  va 2  s .c  o  m*/
    value.rewind();
    return value;
}

From source file:org.apache.hadoop.raid.JRSDecoder.java

void writeFixedBlock(FSDataInputStream[] inputs, int[] erasedLocations, long limit, byte[] outBuf,
        JRSStreamFactory sf) throws IOException {

    // Loop while the number of skipped + read bytes is less than the max.
    int seq = 0;/*from   w  ww. j a  va  2s .  c  om*/

    int target = erasedLocations[0];

    for (long read = 0; read < limit;) {

        int failNum = erasedLocations.length;
        int bufOffset = bufSize * stripeSize;
        ByteBuffer buf = ByteBuffer.allocate(bufOffset + 64);
        buf.putInt(bufOffset, seq);

        boolean important = false;

        //last threadNum# packet checked
        if ((limit - read + bufSize - 1) / bufSize <= threadNum) {
            important = true;
            buf.put(bufOffset + 4, (byte) 1);
        } else {
            buf.put(bufOffset + 4, (byte) 0);
        }
        LOG.info("anchor Decode_stripe " + seq + " Data_reading " + System.nanoTime());
        //read packets
        buf.rewind();
        erasedLocations = readFromInputs(inputs, erasedLocations, buf, sf, seq);
        LOG.info("anchor Decode_stripe " + seq + " Data_read " + System.nanoTime());

        int toRead = (int) Math.min((long) bufSize, limit - read);

        buf.rewind();

        //finding the best ring buffer
        int remain = -1;
        int chosen = -1;
        for (int i = 0; i < threadNum; i++) {
            int rc = q[i].remainingCapacity();
            if (remain < rc) {
                remain = rc;
                chosen = i;
            }
        }
        if (important) {
            chosen = (((int) (limit - read) + bufSize - 1) / bufSize - 1) % threadNum;
        }
        DecodePackage dp = (new DecodePackage(erasedLocations, buf)).limit(limit).outputBuffer(outBuf)
                .target(target);
        //dispatch
        boolean flag = true;
        while (flag) {
            flag = false;
            try {
                q[chosen].put(dp);
            } catch (InterruptedException e) {
                flag = true;
            }
        }
        LOG.info("anchor Decode_stripe " + seq + " Data_pushed " + System.nanoTime());

        seq++;
        read += toRead;
    }

    //waiting for the end of the decode
    for (int i = 0; i < threadNum; i++) {
        boolean flag = true;
        while (flag) {
            flag = false;
            try {
                p[i].take();
            } catch (InterruptedException e) {
                flag = true;
            }
        }
    }
}