Example usage for java.nio ByteBuffer putLong

List of usage examples for java.nio ByteBuffer putLong

Introduction

In this page you can find the example usage for java.nio ByteBuffer putLong.

Prototype

public abstract ByteBuffer putLong(long value);

Source Link

Document

Writes the given long to the current position and increases the position by 8.

Usage

From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java

/**
 * Generate meta entry with given master key
 *//*from   ww w  .ja  va2s.  co  m*/
private ByteBuffer generateMetaEntry(long ledgerId, byte[] masterKey) {
    ByteBuffer bb = ByteBuffer.allocate(8 + 8 + 4 + masterKey.length);
    bb.putLong(ledgerId);
    bb.putLong(Bookie.METAENTRY_ID_LEDGER_KEY);
    bb.putInt(masterKey.length);
    bb.put(masterKey);
    bb.flip();
    return bb;
}

From source file:org.opendaylight.lispflowmapping.implementation.serializer.MapRegisterSerializer.java

public ByteBuffer serialize(MapRegister mapRegister) {
    int size = Length.HEADER_SIZE;
    if (mapRegister.getAuthenticationData() != null) {
        size += mapRegister.getAuthenticationData().length;
    }//from   ww  w  .j  a  va  2 s.  c o  m
    if (mapRegister.isXtrSiteIdPresent() != null && mapRegister.isXtrSiteIdPresent()) {
        size += Length.XTRID_SIZE + Length.SITEID_SIZE;
    }
    for (EidToLocatorRecord eidToLocatorRecord : mapRegister.getEidToLocatorRecord()) {
        size += EidToLocatorRecordSerializer.getInstance().getSerializationSize(eidToLocatorRecord);
    }

    ByteBuffer registerBuffer = ByteBuffer.allocate(size);
    registerBuffer.put((byte) ((byte) (LispMessageEnum.MapRegister.getValue() << 4)
            | ByteUtil.boolToBit(BooleanUtils.isTrue(mapRegister.isProxyMapReply()), Flags.PROXY)
            | ByteUtil.boolToBit(BooleanUtils.isTrue(mapRegister.isXtrSiteIdPresent()), Flags.XTRSITEID)));
    registerBuffer.position(registerBuffer.position() + Length.RES);
    registerBuffer
            .put(ByteUtil.boolToBit(BooleanUtils.isTrue(mapRegister.isWantMapNotify()), Flags.WANT_MAP_REPLY));
    registerBuffer.put((byte) mapRegister.getEidToLocatorRecord().size());
    registerBuffer.putLong(NumberUtil.asLong(mapRegister.getNonce()));
    registerBuffer.putShort(NumberUtil.asShort(mapRegister.getKeyId()));

    if (mapRegister.getAuthenticationData() != null) {
        registerBuffer.putShort((short) mapRegister.getAuthenticationData().length);
        registerBuffer.put(mapRegister.getAuthenticationData());
    } else {
        registerBuffer.putShort((short) 0);
    }
    for (EidToLocatorRecord eidToLocatorRecord : mapRegister.getEidToLocatorRecord()) {
        EidToLocatorRecordSerializer.getInstance().serialize(registerBuffer, eidToLocatorRecord);
    }

    if (mapRegister.isXtrSiteIdPresent() != null && mapRegister.isXtrSiteIdPresent()) {
        registerBuffer.put(mapRegister.getXtrId());
        registerBuffer.put(mapRegister.getSiteId());
    }
    registerBuffer.clear();
    return registerBuffer;
}

From source file:io.pcp.parfait.dxm.PcpMmvWriter.java

/**
 * Writes out a PCP MMV table-of-contents block.
 * /*from   www.j ava  2 s .  c o  m*/
 * @param dataFileBuffer
 *            ByteBuffer positioned at the correct offset in the file for the block
 * @param tocType
 *            the type of TOC block to write
 * @param entryCount
 *            the number of blocks of type tocType to be found in the file
 * @param firstEntryOffset
 *            the offset of the first tocType block, relative to start of the file
 */
private void writeToc(ByteBuffer dataFileBuffer, TocType tocType, int entryCount, int firstEntryOffset) {
    dataFileBuffer.putInt(tocType.identifier);
    dataFileBuffer.putInt(entryCount);
    dataFileBuffer.putLong(firstEntryOffset);
}

From source file:org.opendaylight.lispflowmapping.lisp.serializer.MapRegisterSerializer.java

public ByteBuffer serialize(MapRegister mapRegister) {
    int size = Length.HEADER_SIZE;
    if (mapRegister.getAuthenticationData() != null) {
        size += mapRegister.getAuthenticationData().length;
    }/* w w  w  . ja v a 2 s . com*/
    if (mapRegister.isXtrSiteIdPresent() != null && mapRegister.isXtrSiteIdPresent()) {
        size += Length.XTRID_SIZE + Length.SITEID_SIZE;
    }
    for (MappingRecordItem eidToLocatorRecord : mapRegister.getMappingRecordItem()) {
        size += MappingRecordSerializer.getInstance()
                .getSerializationSize(eidToLocatorRecord.getMappingRecord());
    }

    ByteBuffer registerBuffer = ByteBuffer.allocate(size);
    registerBuffer.put((byte) ((byte) (MessageType.MapRegister.getIntValue() << 4)
            | ByteUtil.boolToBit(BooleanUtils.isTrue(mapRegister.isProxyMapReply()), Flags.PROXY)
            | ByteUtil.boolToBit(BooleanUtils.isTrue(mapRegister.isXtrSiteIdPresent()), Flags.XTRSITEID)));
    registerBuffer.position(registerBuffer.position() + Length.RES);
    registerBuffer.put((byte) (ByteUtil.boolToBit(BooleanUtils.isTrue(mapRegister.isMergeEnabled()),
            Flags.MERGE_ENABLED)
            | ByteUtil.boolToBit(BooleanUtils.isTrue(mapRegister.isWantMapNotify()), Flags.WANT_MAP_NOTIFY)));
    registerBuffer.put((byte) mapRegister.getMappingRecordItem().size());
    registerBuffer.putLong(NumberUtil.asLong(mapRegister.getNonce()));
    registerBuffer.putShort(NumberUtil.asShort(mapRegister.getKeyId()));

    if (mapRegister.getAuthenticationData() != null) {
        registerBuffer.putShort((short) mapRegister.getAuthenticationData().length);
        registerBuffer.put(mapRegister.getAuthenticationData());
    } else {
        registerBuffer.putShort((short) 0);
    }
    for (MappingRecordItem eidToLocatorRecord : mapRegister.getMappingRecordItem()) {
        MappingRecordSerializer.getInstance().serialize(registerBuffer, eidToLocatorRecord.getMappingRecord());
    }

    if (mapRegister.isXtrSiteIdPresent() != null && mapRegister.isXtrSiteIdPresent()) {
        registerBuffer.put(mapRegister.getXtrId().getValue());
        registerBuffer.put(mapRegister.getSiteId().getValue());
    }
    registerBuffer.clear();
    return registerBuffer;
}

From source file:org.apache.jxtadoop.hdfs.server.datanode.BlockSender.java

/**
 * Sends upto maxChunks chunks of data.//w w  w . j ava  2  s.c  om
 * 
 * When blockInPosition is >= 0, assumes 'out' is a 
 * {@link SocketOutputStream} and tries 
 * {@link SocketOutputStream#transferToFully(FileChannel, long, int)} to
 * send data (and updates blockInPosition).
 */
private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out) throws IOException {
    // Sends multiple chunks in one packet with a single write().

    int len = Math.min((int) (endOffset - offset), bytesPerChecksum * maxChunks);
    if (len == 0) {
        return 0;
    }

    int numChunks = (len + bytesPerChecksum - 1) / bytesPerChecksum;
    int packetLen = len + numChunks * checksumSize + 4;
    pkt.clear();

    // write packet header
    pkt.putInt(packetLen);
    pkt.putLong(offset);
    pkt.putLong(seqno);
    pkt.put((byte) ((offset + len >= endOffset) ? 1 : 0));
    //why no ByteBuf.putBoolean()?
    pkt.putInt(len);

    int checksumOff = pkt.position();
    int checksumLen = numChunks * checksumSize;
    byte[] buf = pkt.array();

    if (checksumSize > 0 && checksumIn != null) {
        try {
            checksumIn.readFully(buf, checksumOff, checksumLen);
        } catch (IOException e) {
            LOG.warn(" Could not read or failed to veirfy checksum for data" + " at offset " + offset
                    + " for block " + block + " got : " + StringUtils.stringifyException(e));
            IOUtils.closeStream(checksumIn);
            checksumIn = null;
            if (corruptChecksumOk) {
                if (checksumOff < checksumLen) {
                    // Just fill the array with zeros.
                    Arrays.fill(buf, checksumOff, checksumLen, (byte) 0);
                }
            } else {
                throw e;
            }
        }
    }

    int dataOff = checksumOff + checksumLen;

    if (blockInPosition < 0) {
        //normal transfer
        IOUtils.readFully(blockIn, buf, dataOff, len);

        if (verifyChecksum) {
            int dOff = dataOff;
            int cOff = checksumOff;
            int dLeft = len;

            for (int i = 0; i < numChunks; i++) {
                checksum.reset();
                int dLen = Math.min(dLeft, bytesPerChecksum);
                checksum.update(buf, dOff, dLen);
                if (!checksum.compare(buf, cOff)) {
                    throw new ChecksumException("Checksum failed at " + (offset + len - dLeft), len);
                }
                dLeft -= dLen;
                dOff += dLen;
                cOff += checksumSize;
            }
        }
        //writing is done below (mainly to handle IOException)
    }

    try {
        if (blockInPosition >= 0) {
            //use transferTo(). Checks on out and blockIn are already done. 

            SocketOutputStream sockOut = (SocketOutputStream) out;
            //first write the packet
            sockOut.write(buf, 0, dataOff);
            // no need to flush. since we know out is not a buffered stream. 

            sockOut.transferToFully(((FileInputStream) blockIn).getChannel(), blockInPosition, len);

            blockInPosition += len;
        } else {
            // normal transfer
            out.write(buf, 0, dataOff + len);
        }

    } catch (IOException e) {
        /* exception while writing to the client (well, with transferTo(),
         * it could also be while reading from the local file).
         */
        throw ioeToSocketException(e);
    }

    if (throttler != null) { // rebalancing so throttle
        throttler.throttle(packetLen);
    }

    return len;
}

From source file:org.apache.usergrid.security.tokens.cassandra.TokenServiceImpl.java

private String getTokenForUUID(TokenInfo tokenInfo, TokenCategory tokenCategory, UUID uuid) {
    int l = 36;/*from w  w  w. java 2 s .c o m*/
    if (tokenCategory.getExpires()) {
        l += 8;
    }
    ByteBuffer bytes = ByteBuffer.allocate(l);
    bytes.put(bytes(uuid));
    long expires = Long.MAX_VALUE;
    if (tokenCategory.getExpires()) {
        expires = (tokenInfo.getDuration() > 0)
                ? UUIDUtils.getTimestampInMillis(uuid) + (tokenInfo.getDuration())
                : UUIDUtils.getTimestampInMillis(uuid) + getExpirationForTokenType(tokenCategory);
        bytes.putLong(expires);
    }
    bytes.put(sha(tokenCategory.getPrefix() + uuid + tokenSecretSalt + expires));
    return tokenCategory.getBase64Prefix() + encodeBase64URLSafeString(bytes.array());
}

From source file:org.apache.hadoop.hbase.filter.SlicedRowFilter.java

/**
 * Serialize the filter/* w w w. j av  a 2 s  .c  o m*/
 */
@Override
public byte[] toByteArray() throws IOException {

    //
    // Allocate buffer for the following data:
    //
    // count: 8 bytes
    // slicesLength: 4 bytes
    // nbounds: 4 bytes (this.bounds.length)
    // bounds: 4 * this.bounds.length
    // Size of range keys: 4 bytes (this.rangekeys.length)
    // slices: this.rangekeys
    //

    ByteBuffer bb = ByteBuffer.wrap(new byte[8 + 4 + 4 * this.bounds.length + 4 + 4 + this.rangekeys.length])
            .order(ByteOrder.BIG_ENDIAN);

    bb.putLong(this.count);
    bb.putInt(this.slicesLength);
    bb.putInt(this.bounds.length);
    for (int i = 0; i < this.bounds.length; i++) {
        bb.putInt(this.bounds[i]);
    }
    bb.putInt(this.rangekeys.length);
    bb.put(this.rangekeys);

    return bb.array();
}

From source file:org.commoncrawl.service.listcrawler.HDFSFileIndex.java

public static void writeIndex(Vector<FingerprintAndOffsetTuple> offsetInfo, DataOutput indexFileOut)
        throws IOException {

    long firstFingerprint = offsetInfo.get(0)._fingerprint;

    BloomFilter bloomFilter = new BloomFilter(offsetInfo.size(), 0.001201);

    // sort the offset list by fingerprint 
    Collections.sort(offsetInfo, new Comparator<FingerprintAndOffsetTuple>() {

        @Override/*w  ww .j a v  a  2 s  . co m*/
        public int compare(FingerprintAndOffsetTuple o1, FingerprintAndOffsetTuple o2) {
            return (o1._fingerprint < o2._fingerprint) ? -1 : o1._fingerprint > o2._fingerprint ? 1 : 0;
        }

    });
    // now we need to write the index out

    // allocate working set buffers ...
    ByteBuffer indexDataBuffer = ByteBuffer.allocate(offsetInfo.size() * 16);
    ByteBuffer indexHintsBuffer = ByteBuffer
            .allocate(((((offsetInfo.size() + INDEX_HINT_RECORD_INTERVAL) / INDEX_HINT_RECORD_INTERVAL) + 1)
                    * INDEX_HINT_SIZE) + 4);

    // build index hints placeholder 
    Vector<HDFSFileIndex.IndexItem> hints = new Vector<HDFSFileIndex.IndexItem>();
    // 0 100 200 300 400 500
    for (int i = 0; i < offsetInfo.size(); ++i) {

        if (i % INDEX_HINT_RECORD_INTERVAL == 0 || (i == (offsetInfo.size() - 1))) {
            HDFSFileIndex.IndexItem hint = new IndexItem(offsetInfo.get(i)._fingerprint,
                    (int) offsetInfo.get(i)._offset);
            hints.add(hint);
            // add fingerprint to bloom filter 
            bloomFilter.add(hint.fingerprint);
        }
    }
    // start off the index hints buffer with a hint of the index hint buffer size 
    indexHintsBuffer.putInt(hints.size());

    // track total bits used ... 
    int bitsUsedForHints = 0;
    int bitsUsedForFingerprints = 0;
    int bitsUsedForOffsets = 0;

    // now start populating index data ... 
    for (int hintIdx = 0; hintIdx < hints.size(); ++hintIdx) {

        HDFSFileIndex.IndexItem hint = hints.get(hintIdx);

        LOG.info("IndexWriter FP:" + hint.fingerprint);
        indexHintsBuffer.putLong(hint.fingerprint);
        indexHintsBuffer.putInt(hint.dataOffset);
        indexHintsBuffer.putInt(indexDataBuffer.position());

        // update stats 
        bitsUsedForHints += INDEX_HINT_SIZE * 8;

        if (hintIdx < hints.size() - 1) {
            // track cumilative delta and offset values (for average calc later)
            double cumilativeDelta = 0;
            long cumilativeOffset = 0;

            int subIndexItemCount = 0;
            int nonZeroDeltaCount = 0;

            Vector<HDFSFileIndex.IndexItem> subHints = new Vector<HDFSFileIndex.IndexItem>();

            // initialize last fingerprint to indexed value ... 
            long lastFingerprint = hint.fingerprint;

            // first collect values in between index hints
            for (int nonIndexItem = (hintIdx * INDEX_HINT_RECORD_INTERVAL) + 1; nonIndexItem < ((hintIdx + 1)
                    * INDEX_HINT_RECORD_INTERVAL); ++nonIndexItem) {
                if (nonIndexItem >= offsetInfo.size())
                    break;

                // calculdate fingerprint delta ... 
                long fingerprintDelta = offsetInfo.get(nonIndexItem)._fingerprint - lastFingerprint;
                LOG.info("IndexWriter FP:" + offsetInfo.get(nonIndexItem)._fingerprint + " Delta:"
                        + fingerprintDelta);
                // offset delta

                if (fingerprintDelta != 0) {

                    cumilativeDelta += (double) fingerprintDelta;
                    LOG.info("Cumilative Delta is:" + cumilativeDelta);
                    nonZeroDeltaCount++;
                }

                cumilativeOffset += offsetInfo.get(nonIndexItem)._offset;

                ++subIndexItemCount;

                // add to collection vector 
                subHints.add(new IndexItem(fingerprintDelta, (int) offsetInfo.get(nonIndexItem)._offset));

                // remember the last fingerpint ... 
                lastFingerprint = offsetInfo.get(nonIndexItem)._fingerprint;

                // add item to bloom filter
                bloomFilter.add(lastFingerprint);
            }

            // calculate average delta value 
            double averageDeltaValue = (double) cumilativeDelta / (double) nonZeroDeltaCount;
            // calculate m for fingerprint deltas 
            int mForFingerprints = (int) Math.floor(lg(averageDeltaValue));
            LOG.info("Average Delta Value is:" + averageDeltaValue + " m is:" + mForFingerprints);
            // cacluldate average offset value 
            double averageOffsetValue = (double) cumilativeOffset / (double) subIndexItemCount;
            // calculate m for offsets 
            int mForOffsets = (int) Math.floor(lg(averageOffsetValue));

            // calculate rice codes
            RiceCoding riceCodeFP = new RiceCoding(mForFingerprints);
            RiceCoding riceCodeOffsets = new RiceCoding(mForOffsets);

            // populate bits 
            for (HDFSFileIndex.IndexItem subItemHint : subHints) {
                if (subItemHint.fingerprint == 0) {
                    LOG.warn("Zero Delta for Fingerprint Detected.There are two duplicate entires in log!");
                }
                riceCodeFP.addItem(subItemHint.fingerprint + 1);
                riceCodeOffsets.addItem(subItemHint.dataOffset + 1);
            }
            // now track bits used ... 
            bitsUsedForFingerprints += riceCodeFP.getNumBits();
            bitsUsedForOffsets += riceCodeOffsets.getNumBits();

            // write out metadata 

            // save the current position 
            int currentPosition = indexDataBuffer.position();

            // fingerprint data 
            indexDataBuffer.put((byte) mForFingerprints);
            CacheManager.writeVLongToByteBuffer(indexDataBuffer, riceCodeFP.getNumBits());
            indexDataBuffer.put(riceCodeFP.getBits(), 0, (riceCodeFP.getNumBits() + 7) / 8);

            // offset data 
            indexDataBuffer.put((byte) mForOffsets);
            CacheManager.writeVLongToByteBuffer(indexDataBuffer, riceCodeOffsets.getNumBits());
            indexDataBuffer.put(riceCodeOffsets.getBits(), 0, (riceCodeOffsets.getNumBits() + 7) / 8);

            System.out.println("Item Count:" + subIndexItemCount + "FP Bits:" + subIndexItemCount * 64
                    + " Compressed:" + riceCodeFP.getNumBits() + " Offset Bits:" + subIndexItemCount * 32
                    + " Compressed:" + riceCodeOffsets.getNumBits());

            LOG.info("Item Count:" + subIndexItemCount + "FP Bits:" + subIndexItemCount * 64 + " Compressed:"
                    + riceCodeFP.getNumBits() + " Offset Bits:" + subIndexItemCount * 32 + " Compressed:"
                    + riceCodeOffsets.getNumBits());

            if ((subIndexItemCount * 64) < riceCodeFP.getNumBits()) {
                throw new RuntimeException("Compressed Size > UnCompressed Size!!!!");
            }

            validateIndexData(indexDataBuffer.array(), currentPosition, hint.fingerprint, subHints,
                    bloomFilter);
        }

    }

    if (!bloomFilter.isPresent(firstFingerprint)) {
        throw new RuntimeException("Test Failed!");
    }

    // serialize bloomfilter
    ByteStream baos = new ByteStream(1 << 12);
    BloomFilter.serializer().serialize(bloomFilter, new DataOutputStream(baos));

    // spit out final stats 
    System.out.println(" Bloomfilter Size:" + baos.size() + " IndexHintBuffer Size:"
            + indexHintsBuffer.position() + " IndexDataBuffer Size:" + indexDataBuffer.position());

    // now write out the final index file ... 

    // bloom filter data ... 
    indexFileOut.write(baos.getBuffer(), 0, baos.size());
    // write hint data  
    indexFileOut.write(indexHintsBuffer.array(), 0, indexHintsBuffer.position());
    // write out rice code data size 
    indexFileOut.writeInt(indexDataBuffer.position());
    // finally rice coded sub-index data
    indexFileOut.write(indexDataBuffer.array(), 0, indexDataBuffer.position());
}

From source file:com.linkedin.databus.core.DbusEventV2.java

public static int serializeEvent(DbusEventKey key, ByteBuffer buf, DbusEventInfo dbusEventInfo) {
    // Serialize a DbusEventV2 that has exact same contents as a DbusEventV1.
    final int start = buf.position();
    buf.put(DbusEventFactory.DBUS_EVENT_V2);
    buf.putInt(MAGIC);/*w w  w . j av  a 2 s  .co  m*/
    buf.putInt(0); // Header len placeholder
    buf.putInt(0); // Header crc placeholder
    buf.putInt(0); // Body CRC placeholder
    buf.putInt(0); // total length placeholder

    short attributes = 0;
    attributes = setOpCode(dbusEventInfo.getOpCode(), attributes, dbusEventInfo.getSrcId());
    attributes = setKeyType(key, attributes);
    if (dbusEventInfo.isEnableTracing()) {
        attributes |= FLAG_TRACE_ON;
    }

    if (dbusEventInfo.isReplicated()) {
        attributes |= FLAG_IS_REPLICATED;
    }

    DbusEventPart metadata = dbusEventInfo.getMetadata();
    if (shouldEncodePayloadPart(dbusEventInfo)) {
        attributes |= FLAG_HAS_PAYLOAD_PART;
    }
    if (metadata != null) {
        attributes |= FLAG_HAS_PAYLOAD_METADATA_PART;
    }
    buf.putShort(attributes);
    buf.putLong(dbusEventInfo.getTimeStampInNanos());
    buf.putInt(dbusEventInfo.getSrcId());
    buf.putShort(dbusEventInfo.getpPartitionId());
    buf.putLong(dbusEventInfo.getSequenceId());

    // Fixed part of header is done. Now for the variable header part
    setKey(buf, key);
    final int hdrEndPos = buf.position();

    if (metadata != null) {
        metadata.encode(buf);
    }

    if ((attributes & FLAG_HAS_PAYLOAD_PART) != 0) {
        ByteBuffer bb = dbusEventInfo.getValueByteBuffer();
        if (bb == null) {
            // Special case to encode when there is no data.
            bb = ByteBuffer.allocate(1).order(buf.order());
            bb.limit(0);
        }
        DbusEventPart valuePart = new DbusEventPart(SchemaDigestType.MD5, dbusEventInfo.getSchemaId(),
                dbusEventInfo.getPayloadSchemaVersion(), bb);
        valuePart.encode(buf);
    }
    final int end = buf.position();
    buf.putInt(start + HeaderLenOffset, hdrEndPos - start);
    buf.putInt(start + TotalLenOffset, end - start);

    long bodyCrc = ByteBufferCRC32.getChecksum(buf, hdrEndPos, end - hdrEndPos);
    Utils.putUnsignedInt(buf, start + BodyCrcOffset, bodyCrc);
    // Header CRC
    if (dbusEventInfo.isAutocommit()) {
        // Do the body CRC first, since that is included in the header CRC
        long hdrCrc = ByteBufferCRC32.getChecksum(buf, start + BodyCrcOffset,
                hdrEndPos - start - BodyCrcOffset);
        Utils.putUnsignedInt(buf, start + HeaderCrcOffset, hdrCrc);
    }
    return buf.position() - start;
}