Example usage for java.nio ByteBuffer rewind

List of usage examples for java.nio ByteBuffer rewind

Introduction

In this page you can find the example usage for java.nio ByteBuffer rewind.

Prototype

public final Buffer rewind() 

Source Link

Document

Rewinds this buffer.

Usage

From source file:org.apache.usergrid.persistence.Schema.java

public static ByteBuffer decrypt(ByteBuffer encrypted) {
    if (encrypted == null || !encrypted.hasRemaining()) {
        return encrypted;
    }/*  w  w  w . jav  a  2 s. c  o m*/
    try {
        SecretKeySpec sKeySpec = new SecretKeySpec(getRawKey(encryptionSeed), "AES");
        Cipher cipher = Cipher.getInstance("AES");
        cipher.init(Cipher.DECRYPT_MODE, sKeySpec);
        ByteBuffer decrypted = ByteBuffer.allocate(cipher.getOutputSize(encrypted.remaining()));
        cipher.doFinal(encrypted, decrypted);
        decrypted.rewind();
        return decrypted;
    } catch (Exception e) {
        throw new IllegalStateException(e);
    }
}

From source file:voldemort.store.cachestore.impl.LogChannel.java

public KeyValue readRecord(int recordNo) {
    if (isEOF(recordNo))
        throw new RuntimeException("record out of range " + getTotalRecord() + " expected " + recordNo);
    ByteBuffer buf = ByteBuffer.allocate(RECORD_SIZE);
    long pos = ChannelStore.OFFSET + (long) recordNo * RECORD_SIZE;
    try {//w  w w. j  av  a  2 s  . c  o m
        if (isLastRecord(recordNo)) {
            logger.info("skip due to " + totalRecord + " read " + recordNo);
            return null;
        }
        getIndexChannel().read(buf, pos);
        assert (buf.capacity() == RECORD_SIZE);
        buf.rewind();
        byte status = buf.get();
        long keyOffset2Len = buf.getLong();
        byte[] keys = readChannel(keyOffset2Len, getKeyChannel());
        Key k = toKey(keys);
        long dataOffset2Len = buf.getLong();
        byte[] datas = readChannel(dataOffset2Len, getDataChannel());
        long block2version = buf.getLong();
        Value<byte[]> value = null;
        //if delete return value=null, not delete read value
        if (!isDeleted(status))
            value = new BlockValue<byte[]>(datas, BlockUtil.getVersionNo(block2version), (short) 0);
        return new KeyValue(k, value);
    } catch (IOException e) {
        throw new RuntimeException(e.getMessage(), e);
    }
}

From source file:org.apache.hadoop.hbase.io.encoding.TestDataBlockEncoders.java

private void testAlgorithm(byte[] encodedData, ByteBuffer unencodedDataBuf, DataBlockEncoder encoder)
        throws IOException {
    // decode//from  w  w w  .  j a v a 2  s .  c  om
    ByteArrayInputStream bais = new ByteArrayInputStream(encodedData, ENCODED_DATA_OFFSET,
            encodedData.length - ENCODED_DATA_OFFSET);
    DataInputStream dis = new DataInputStream(bais);
    ByteBuffer actualDataset;
    HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS)
            .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build();
    actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(meta));
    actualDataset.rewind();

    // this is because in case of prefix tree the decoded stream will not have
    // the
    // mvcc in it.
    assertEquals("Encoding -> decoding gives different results for " + encoder,
            Bytes.toStringBinary(unencodedDataBuf), Bytes.toStringBinary(actualDataset));
}

From source file:oz.hadoop.yarn.api.FsByteBufferPersister.java

/**
 * /* ww w .  ja  v a2  s.c om*/
 * @param dataBuffer
 */
public void persist(String dataIdentifier, ByteBuffer dataBuffer) {
    WritableByteChannel outputChannel = this.outputChannels.get(dataIdentifier);
    if (outputChannel == null) {
        String fileName = this.generateFileName(dataIdentifier);
        try {
            OutputStream os = this.fileSystem.create(new Path(fileName), true);
            outputChannel = Channels.newChannel(os);
            this.outputChannels.put(dataIdentifier, outputChannel);
        } catch (Exception e) {
            throw new IllegalStateException(
                    "Failed to create FSDataOutputStream with fileIdentifier '" + dataIdentifier + "'", e);
        }
    }
    dataBuffer.rewind();
    try {
        outputChannel.write(dataBuffer);
    } catch (Exception e) {
        throw new IllegalStateException("Failed to write data to channel", e);
    }
}

From source file:org.apache.hadoop.hbase.client.coprocessor.TimeseriesAggregationClient.java

/**
 * It computes average while fetching sum and row count from all the corresponding regions.
 * Approach is to compute a global sum of region level sum and rowcount and then compute the
 * average./*from   w w w . ja  v  a2 s.  c  o  m*/
 * @param table
 * @param scan
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, Pair<S, Long>> getAvgArgs(
        final HTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final TimeseriesAggregateRequest requestArg = validateArgAndGetPB(scan, ci, false, intervalSeconds,
            timestampSecondsMin, timestampSecondsMax, keyFilterPattern);
    class AvgCallBack implements Batch.Callback<TimeseriesAggregateResponse> {
        ConcurrentSkipListMap<Long, Pair<S, Long>> averages = new ConcurrentSkipListMap<Long, Pair<S, Long>>();

        public synchronized ConcurrentSkipListMap<Long, Pair<S, Long>> getAvgArgs() {
            return averages;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, TimeseriesAggregateResponse result) {
            List<TimeseriesAggregateResponseMapEntry> results = result.getEntryList();
            for (TimeseriesAggregateResponseMapEntry entry : results) {

                if (entry.getValue().getFirstPartCount() == 0) {
                    if (!averages.containsKey(entry.getKey())) {
                        averages.put(entry.getKey(), new Pair<S, Long>(null, 0L));
                    }
                } else {

                    ByteString b = entry.getValue().getFirstPart(0);
                    T t = null;
                    try {
                        t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                    S s = ci.getPromotedValueFromProto(t);

                    ByteBuffer bb = ByteBuffer.allocate(8)
                            .put(getBytesFromResponse(entry.getValue().getSecondPart()));
                    bb.rewind();

                    if (averages.containsKey(entry.getKey())) {
                        S sum = averages.get(entry.getKey()).getFirst();
                        Long rowCount = averages.get(entry.getKey()).getSecond();
                        averages.put(entry.getKey(),
                                new Pair<S, Long>(ci.add(sum, s), rowCount + bb.getLong()));
                    } else {
                        averages.put(entry.getKey(), new Pair<S, Long>(s, bb.getLong()));
                    }
                }
            }
        }
    }
    AvgCallBack avgCallBack = new AvgCallBack();
    table.coprocessorService(TimeseriesAggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TimeseriesAggregateService, TimeseriesAggregateResponse>() {
                @Override
                public TimeseriesAggregateResponse call(TimeseriesAggregateService instance)
                        throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TimeseriesAggregateResponse> rpcCallback = new BlockingRpcCallback<TimeseriesAggregateResponse>();
                    instance.getAvg(controller, requestArg, rpcCallback);
                    TimeseriesAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    // if (response.getEntryCount() > 0) {
                    return response;
                    // }
                    // return null;
                }
            }, avgCallBack);
    return avgCallBack.getAvgArgs();
}

From source file:com.healthmarketscience.jackcess.PageChannel.java

/**
 * Write a page to disk as a new page, appending it to the database
 * @param page Page to write//from  w  w w . j  a v  a  2 s  .  com
 * @return Page number at which the page was written
 */
public int writeNewPage(ByteBuffer page) throws IOException {
    long size = _channel.size();
    if (size >= getFormat().MAX_DATABASE_SIZE) {
        throw new IOException("Database is at maximum size " + getFormat().MAX_DATABASE_SIZE);
    }
    if ((size % getFormat().PAGE_SIZE) != 0L) {
        throw new IOException("Database corrupted, file size " + size + " is not multiple of page size "
                + getFormat().PAGE_SIZE);
    }

    page.rewind();

    if (page.remaining() > getFormat().PAGE_SIZE) {
        throw new IllegalArgumentException("Page buffer is too large, size " + page.remaining());
    }

    // push the buffer to the end of the page, so that a full page's worth of
    // data is written regardless of the incoming buffer size (we use a tiny
    // buffer in allocateNewPage)
    int pageOffset = (getFormat().PAGE_SIZE - page.remaining());
    long offset = size + pageOffset;
    int pageNumber = getNextPageNumber(size);
    _channel.write(_codecHandler.encodePage(page, pageNumber, pageOffset), offset);
    // note, we "force" page removal because we know that this is an unused
    // page (since we just added it to the file)
    _globalUsageMap.removePageNumber(pageNumber, true);
    return pageNumber;
}

From source file:org.apache.hadoop.hbase.coprocessor.client.TimeseriesAggregationClient.java

/**
 * It computes average while fetching sum and row count from all the corresponding regions.
 * Approach is to compute a global sum of region level sum and rowcount and then compute the
 * average.//from   www .jav  a 2s  .c om
 * @param table
 * @param scan
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> ConcurrentSkipListMap<Long, Pair<S, Long>> getAvgArgs(
        final Table table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable {
    final TimeseriesAggregateRequest requestArg = validateArgAndGetPB(scan, ci, false, intervalSeconds,
            timestampSecondsMin, timestampSecondsMax, keyFilterPattern);
    class AvgCallBack implements Batch.Callback<TimeseriesAggregateResponse> {
        ConcurrentSkipListMap<Long, Pair<S, Long>> averages = new ConcurrentSkipListMap<Long, Pair<S, Long>>();

        public synchronized ConcurrentSkipListMap<Long, Pair<S, Long>> getAvgArgs() {
            return averages;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, TimeseriesAggregateResponse result) {
            List<TimeseriesAggregateResponseMapEntry> results = result.getEntryList();
            for (TimeseriesAggregateResponseMapEntry entry : results) {

                if (entry.getValue().getFirstPartCount() == 0) {
                    if (!averages.containsKey(entry.getKey())) {
                        averages.put(entry.getKey(), new Pair<S, Long>(null, 0L));
                    }
                } else {

                    ByteString b = entry.getValue().getFirstPart(0);
                    T t = null;
                    try {
                        t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                    } catch (IOException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                    S s = ci.getPromotedValueFromProto(t);

                    ByteBuffer bb = ByteBuffer.allocate(8)
                            .put(getBytesFromResponse(entry.getValue().getSecondPart()));
                    bb.rewind();

                    if (averages.containsKey(entry.getKey())) {
                        S sum = averages.get(entry.getKey()).getFirst();
                        Long rowCount = averages.get(entry.getKey()).getSecond();
                        averages.put(entry.getKey(),
                                new Pair<S, Long>(ci.add(sum, s), rowCount + bb.getLong()));
                    } else {
                        averages.put(entry.getKey(), new Pair<S, Long>(s, bb.getLong()));
                    }
                }
            }
        }
    }
    AvgCallBack avgCallBack = new AvgCallBack();
    table.coprocessorService(TimeseriesAggregateService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TimeseriesAggregateService, TimeseriesAggregateResponse>() {
                @Override
                public TimeseriesAggregateResponse call(TimeseriesAggregateService instance)
                        throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TimeseriesAggregateResponse> rpcCallback = new BlockingRpcCallback<TimeseriesAggregateResponse>();
                    instance.getAvg(controller, requestArg, rpcCallback);
                    TimeseriesAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    // if (response.getEntryCount() > 0) {
                    return response;
                    // }
                    // return null;
                }
            }, avgCallBack);
    return avgCallBack.getAvgArgs();
}

From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java

private void commitBucketTableToDisk() throws BucketTableManagerException {
    File currentFile = null;//from   www .  j  a v  a 2  s.  c  om
    FileChannel fileChannel = null;
    ByteBuffer headerBuffer = null;
    try {
        logger.warn("Start commit bucket table...");
        if (bucketTable.getRequestedCheckPoint() == null || bucketTable.getRequestedCheckPoint().isEmpty())
            throw new BucketTableManagerException("commit requested while there is no requested checkpoint");
        currentFile = getLatestCommitedFile();
        File nextFile = getNextFile(getLatestCommitedFile());
        fileChannel = (new RandomAccessFile(nextFile, "rw")).getChannel();
        // Write header with empty checkpoint 
        headerBuffer = ByteBuffer.allocate(HEADERSIZE);
        fileChannel.position(0L);
        headerBuffer.putInt(MAGICSTART);
        headerBuffer.putLong(mapSize);
        // NeedlePointer lastCheckPoint = bucketTable.getLastCheckPoint() ; // Reset checkpoint to no checkpoint done
        NeedlePointer lastCheckPoint = new NeedlePointer(); // Empty needle
        lastCheckPoint.putNeedlePointerToBuffer(headerBuffer);
        headerBuffer.putInt(MAGICEND);
        headerBuffer.flip(); // truncate buffer
        fileChannel.write(headerBuffer);
        // Now writes buffers
        for (int i = 0; i < nbBuffers; i++) {
            bucketTable.prepareBufferForWriting(i);
            int written = fileChannel.write(bucketTable.getBuffer(i));
            if (written < bucketTable.getBuffer(i).limit())
                throw new BucketTableManagerException("Incomplete write for bucket table file "
                        + nextFile.getName() + ", expected " + mapSize + HEADERSIZE);
            // else
            // logger.info("Bucket table commit: written "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ;
            try {
                Thread.sleep(10);
            } catch (Throwable th) {

            }
        }
        // Writes second magic number
        ByteBuffer buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE);
        buffer.rewind();
        buffer.limit(INTSIZE);
        buffer.putInt(MAGICSTART);
        buffer.rewind();
        fileChannel.write(buffer);
        // Write Needle Log Info
        Iterator<NeedleLogInfo> it = logInfoPerLogNumber.values().iterator();
        while (it.hasNext()) {
            buffer.rewind();
            buffer.limit(NeedleLogInfo.INFOSIZE);
            NeedleLogInfo nli = it.next();
            nli.putNeedleLogInfo(buffer, true);
            int written = fileChannel.write(buffer);
            if (written < NeedleLogInfo.INFOSIZE)
                throw new BucketTableManagerException(
                        "Incomplete write for bucket table file, writing log infos " + nextFile.getName());
        }
        // Writes checkpoint
        headerBuffer = ByteBuffer.allocate(NeedlePointer.POINTERSIZE);
        headerBuffer.rewind();
        headerBuffer.limit(NeedlePointer.POINTERSIZE);
        // System.out.println("Writing checkpoint in index "+bucketTable.getRequestedCheckPoint()) ;
        bucketTable.getRequestedCheckPoint().putNeedlePointerToBuffer(headerBuffer, true); // Flip buffer after write
        headerBuffer.rewind();
        // fileChannel.force(false) ;
        if (fileChannel.write(headerBuffer, CHECKPOINTOFFSET) < NeedlePointer.POINTERSIZE) {
            throw new BucketTableManagerException("Could not write checkpoint to " + nextFile.getName());
        }
        fileChannel.force(true);
        fileChannel.close();
        if (!nextFile.renameTo(getCommittedFile(nextFile)))
            throw new BucketTableManagerException(
                    "Could not rename " + nextFile.getName() + " to " + getCommittedFile(nextFile).getName());

        logger.warn("Committed bucket table.");
    } catch (IOException ie) {
        throw new BucketTableManagerException("Failed writting bucket table", ie);
    } finally {
        headerBuffer = null; //May ease garbage collection
        if (fileChannel != null) {
            try {
                fileChannel.close();
            } catch (Exception ex) {
                throw new BucketTableManagerException("Failed to close file channel", ex);
            }
        }
    }
    try {
        if (currentFile != null) {
            if (!currentFile.delete())
                logger.error("Failed deleting previous bucket table" + currentFile.getName());
        }
    } catch (Throwable th) {
        logger.error("Failed deleting previous bucket table" + currentFile.getName(), th);
    }
}

From source file:org.limewire.mojito.io.MessageDispatcherImpl.java

/**
 * Reads and returns a single DHTMessage from Network or null
 * if no Messages were in the input queue.
 *//*w w w  . ja v  a2 s  .co m*/
private DHTMessage readMessage() throws MessageFormatException, IOException {
    SocketAddress src = receive((ByteBuffer) receiveBuffer.clear());
    if (src != null) {
        receiveBuffer.flip();

        ByteBuffer data = null;
        if (getAllocateNewByteBuffer()) {
            int length = receiveBuffer.remaining();
            data = ByteBuffer.allocate(length);
            data.put(receiveBuffer);
            data.rewind();
        } else {
            data = receiveBuffer.slice();
        }

        DHTMessage message = deserialize(src, data/*.asReadOnlyBuffer()*/);
        return message;
    }
    return null;
}

From source file:org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl.java

private ByteBuffer principalKey(AuthPrincipalInfo principalInfo) {
    // 66 bytes, 2 UUIDS + 2 chars for prefix
    ByteBuffer buff = ByteBuffer.allocate(32 * 2 + 2);
    buff.put(bytes(principalInfo.getApplicationId()));
    buff.put(bytes(principalInfo.getUuid()));
    buff.put(bytes(principalInfo.getType().getPrefix()));
    buff.rewind();

    return buff;//  ww w  .jav a2s  .  co  m
}