Example usage for java.nio ByteBuffer putLong

List of usage examples for java.nio ByteBuffer putLong

Introduction

In this page you can find the example usage for java.nio ByteBuffer putLong.

Prototype

public abstract ByteBuffer putLong(long value);

Source Link

Document

Writes the given long to the current position and increases the position by 8.

Usage

From source file:org.apache.geode.internal.cache.DiskInitFile.java

private void writeIFRecord(byte b, DiskRegionView dr, long v) {
    assert lock.isHeldByCurrentThread();
    try {/*from w  ww. j a v a2s. c  o  m*/
        ByteBuffer bb = getIFWriteBuffer(1 + DR_ID_MAX_BYTES + 8 + 1);
        bb.put(b);
        putDiskRegionID(bb, dr.getId());
        bb.putLong(v);
        bb.put(END_OF_RECORD_ID);
        writeIFRecord(bb, false); // don't do stats for these small records
    } catch (IOException ex) {
        DiskAccessException dae = new DiskAccessException(
                LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex),
                this.parent);
        if (!this.compactInProgress) {
            this.parent.handleDiskAccessException(dae);
        }
        throw dae;
    }
}

From source file:nodomain.freeyourgadget.gadgetbridge.service.devices.pebble.PebbleProtocol.java

@Override
public byte[] encodeAppDelete(UUID uuid) {
    if (mFwMajor >= 3) {
        if (UUID_PEBBLE_HEALTH.equals(uuid)) {
            return encodeActivateHealth(false);
        }/*from  w w  w .  j  a  v a 2s . co  m*/
        if (UUID_WORKOUT.equals(uuid)) {
            return encodeActivateHRM(false);
        }
        if (UUID_WEATHER.equals(uuid)) { //TODO: probably it wasn't present in firmware 3
            return encodeActivateWeather(false);
        }
        return encodeBlobdb(uuid, BLOBDB_DELETE, BLOBDB_APP, null);
    } else {
        final short LENGTH_REMOVEAPP_2X = 17;
        ByteBuffer buf = ByteBuffer.allocate(LENGTH_PREFIX + LENGTH_REMOVEAPP_2X);
        buf.order(ByteOrder.BIG_ENDIAN);
        buf.putShort(LENGTH_REMOVEAPP_2X);
        buf.putShort(ENDPOINT_APPMANAGER);
        buf.put(APPMANAGER_REMOVEAPP);
        buf.putLong(uuid.getMostSignificantBits());
        buf.putLong(uuid.getLeastSignificantBits());
        return buf.array();
    }
}

From source file:nodomain.freeyourgadget.gadgetbridge.service.devices.pebble.PebbleProtocol.java

@Override
public byte[] encodeAppStart(UUID uuid, boolean start) {
    if (mFwMajor >= 3) {
        final short LENGTH_APPRUNSTATE = 17;
        ByteBuffer buf = ByteBuffer.allocate(LENGTH_PREFIX + LENGTH_APPRUNSTATE);
        buf.order(ByteOrder.BIG_ENDIAN);
        buf.putShort(LENGTH_APPRUNSTATE);
        buf.putShort(ENDPOINT_APPRUNSTATE);
        buf.put(start ? APPRUNSTATE_START : APPRUNSTATE_STOP);
        buf.putLong(uuid.getMostSignificantBits());
        buf.putLong(uuid.getLeastSignificantBits());
        return buf.array();
    } else {//from ww w  .  j ava2  s. c o  m
        ArrayList<Pair<Integer, Object>> pairs = new ArrayList<>();
        int param = start ? 1 : 0;
        pairs.add(new Pair<>(1, (Object) param));
        return encodeApplicationMessagePush(ENDPOINT_LAUNCHER, uuid, pairs);
    }
}

From source file:nodomain.freeyourgadget.gadgetbridge.service.devices.pebble.PebbleProtocol.java

@Override
public byte[] encodeAppReorder(UUID[] uuids) {
    int length = 2 + uuids.length * LENGTH_UUID;
    ByteBuffer buf = ByteBuffer.allocate(LENGTH_PREFIX + length);
    buf.order(ByteOrder.BIG_ENDIAN);
    buf.putShort((short) length);
    buf.putShort(ENDPOINT_APPREORDER);//from   w  w w  . j  a v  a  2  s  .c  o m
    buf.put((byte) 0x01);
    buf.put((byte) uuids.length);
    for (UUID uuid : uuids) {
        buf.putLong(uuid.getMostSignificantBits());
        buf.putLong(uuid.getLeastSignificantBits());
    }

    return buf.array();
}

From source file:com.inclouds.hbase.rowcache.RowCache.java

/**
 * CHECKED 2 Adds the key value (KeyValue) to a buffer for Put/Append.
 * //from w  ww.  ja v a2 s.c om
 * @param buf
 *          the buf
 * @param kv
 *          the kv
 * @return the int
 */
private int addKeyValue(ByteBuffer buf, KeyValue kv) {

    // Format:
    // 8 bytes - ts
    // 4 bytes - value length
    // value blob
    int valLen = kv.getValueLength();
    int size = 12 + valLen;
    buf.putLong(kv.getTimestamp());
    buf.putInt(valLen);
    buf.put(kv.getBuffer(), kv.getValueOffset(), valLen);
    return size;
}

From source file:nodomain.freeyourgadget.gadgetbridge.service.devices.pebble.PebbleProtocol.java

byte[] encodeApplicationMessagePush(short endpoint, UUID uuid, ArrayList<Pair<Integer, Object>> pairs) {
    int length = LENGTH_UUID + 3; // UUID + (PUSH + id + length of dict)
    for (Pair<Integer, Object> pair : pairs) {
        if (pair.first == null || pair.second == null)
            continue;
        length += 7; // key + type + length
        if (pair.second instanceof Integer) {
            length += 4;//ww  w  .  ja va  2 s.co  m
        } else if (pair.second instanceof Short) {
            length += 2;
        } else if (pair.second instanceof Byte) {
            length += 1;
        } else if (pair.second instanceof String) {
            length += ((String) pair.second).getBytes().length + 1;
        } else if (pair.second instanceof byte[]) {
            length += ((byte[]) pair.second).length;
        } else {
            LOG.warn("unknown type: " + pair.second.getClass().toString());
        }
    }

    ByteBuffer buf = ByteBuffer.allocate(LENGTH_PREFIX + length);
    buf.order(ByteOrder.BIG_ENDIAN);
    buf.putShort((short) length);
    buf.putShort(endpoint); // 48 or 49
    buf.put(APPLICATIONMESSAGE_PUSH);
    buf.put(++last_id);
    buf.putLong(uuid.getMostSignificantBits());
    buf.putLong(uuid.getLeastSignificantBits());
    buf.put((byte) pairs.size());

    buf.order(ByteOrder.LITTLE_ENDIAN);
    for (Pair<Integer, Object> pair : pairs) {
        if (pair.first == null || pair.second == null)
            continue;
        buf.putInt(pair.first);
        if (pair.second instanceof Integer) {
            buf.put(TYPE_INT);
            buf.putShort((short) 4); // length
            buf.putInt((int) pair.second);
        } else if (pair.second instanceof Short) {
            buf.put(TYPE_INT);
            buf.putShort((short) 2); // length
            buf.putShort((short) pair.second);
        } else if (pair.second instanceof Byte) {
            buf.put(TYPE_INT);
            buf.putShort((short) 1); // length
            buf.put((byte) pair.second);
        } else if (pair.second instanceof String) {
            String str = (String) pair.second;
            buf.put(TYPE_CSTRING);
            buf.putShort((short) (str.getBytes().length + 1));
            buf.put(str.getBytes());
            buf.put((byte) 0);
        } else if (pair.second instanceof byte[]) {
            byte[] bytes = (byte[]) pair.second;
            buf.put(TYPE_BYTEARRAY);
            buf.putShort((short) bytes.length);
            buf.put(bytes);
        }
    }

    return buf.array();
}

From source file:com.act.lcms.v2.fullindex.Builder.java

protected void extractTriples(Iterator<LCMSSpectrum> iter, List<MZWindow> windows)
        throws RocksDBException, IOException {
    /* Warning: this method makes heavy use of ByteBuffers to perform memory efficient collection of values and
     * conversion of those values into byte arrays that RocksDB can consume.  If you haven't already, go read this
     * tutorial on ByteBuffers: http://mindprod.com/jgloss/bytebuffer.html
     *//from   w w  w. ja  va 2s.  co  m
     * ByteBuffers are quite low-level structures, and they use some terms you need to watch out for:
     *   capacity: The total number of bytes in the array backing the buffer.  Don't write more than this.
     *   position: The next index in the buffer to read or write a byte.  Moves with each read or write op.
     *   limit:    A mark of where the final byte in the buffer was written.  Don't read past this.
     *             The remaining() call is affected by the limit.
     *   mark:     Ignore this for now, we don't use it.  (We'll always, always read buffers from 0.)
     *
     * And here are some methods that we'll use often:
     *   clear:     Set position = 0, limit = 0.  Pretend the buffer is empty, and is ready for more writes.
     *   flip:      Set limit = position, then position = 0.  This remembers how many bytes were written to the buffer
     *              (as the current position), and then puts the position at the beginning.
     *              Always call this after the write before a read.
     *   rewind:    Set position = 0.  Buffer is ready for reading, but unless the limit was set we might now know how
     *              many bytes there are to read.  Always call flip() before rewind().  Can rewind many times to re-read
     *              the buffer repeatedly.
     *   remaining: How many bytes do we have left to read?  Requires an accurate limit value to avoid garbage bytes.
     *   reset:     Don't use this.  It uses the mark, which we don't need currently.
     *
     * Write/read patterns look like:
     *   buffer.clear(); // Clear out anything already in the buffer.
     *   buffer.put(thing1).put(thing2)... // write a bunch of stuff
     *   buffer.flip(); // Prep for reading.  Call *once*!
     *
     *   while (buffer.hasRemaining()) { buffer.get(); } // Read a bunch of stuff.
     *   buffer.rewind(); // Ready for reading again!
     *   while (buffer.hasRemaining()) { buffer.get(); } // Etc.
     *   buffer.reset(); // Forget what was written previously, buffer is ready for reuse.
     *
     * We use byte buffers because they're fast, efficient, and offer incredibly convenient means of serializing a
     * stream of primitive types to their minimal binary representations.  The same operations on objects + object
     * streams require significantly more CPU cycles, consume more memory, and tend to be brittle (i.e. if a class
     * definition changes slightly, serialization may break).  Since the data we're dealing with is pretty simple, we
     * opt for the low-level approach.
     */

    /* Because we'll eventually use the window indices to map a mz range to a list of triples that fall within that
     * range, verify that all of the indices are unique.  If they're not, we'll end up overwriting the data in and
     * corrupting the structure of the index. */
    ensureUniqueMZWindowIndices(windows);

    // For every mz window, allocate a buffer to hold the indices of the triples that fall in that window.
    ByteBuffer[] mzWindowTripleBuffers = new ByteBuffer[windows.size()];
    for (int i = 0; i < mzWindowTripleBuffers.length; i++) {
        /* Note: the mapping between these buffers and their respective mzWindows is purely positional.  Specifically,
         * mzWindows.get(i).getIndex() != i, but mzWindowTripleBuffers[i] belongs to mzWindows.get(i).  We'll map windows
         * indices to the contents of mzWindowTripleBuffers at the very end of this function. */
        mzWindowTripleBuffers[i] = ByteBuffer.allocate(Long.BYTES * 4096); // Start with 4096 longs = 8 pages per window.
    }

    // Every TMzI gets an index which we'll use later when we're querying by m/z and time.
    long counter = -1; // We increment at the top of the loop.
    // Note: we could also write to an mmapped file and just track pointers, but then we might lose out on compression.

    // We allocate all the buffers strictly here, as we know how many bytes a long and a triple will take.  Then reuse!
    ByteBuffer counterBuffer = ByteBuffer.allocate(Long.BYTES);
    ByteBuffer valBuffer = ByteBuffer.allocate(TMzI.BYTES);
    List<Float> timepoints = new ArrayList<>(2000); // We can be sloppy here, as the count is small.

    /* We use a sweep-line approach to scanning through the m/z windows so that we can aggregate all intensities in
     * one pass over the current LCMSSpectrum (this saves us one inner loop in our extraction process).  The m/z
     * values in the LCMSSpectrum become our "critical" or "interesting points" over which we sweep our m/z ranges.
     * The next window in m/z order is guaranteed to be the next one we want to consider since we address the points
     * in m/z order as well.  As soon as we've passed out of the range of one of our windows, we discard it.  It is
     * valid for a window to be added to and discarded from the working queue in one application of the work loop. */
    LinkedList<MZWindow> tbdQueueTemplate = new LinkedList<>(windows); // We can reuse this template to init the sweep.

    int spectrumCounter = 0;
    while (iter.hasNext()) {
        LCMSSpectrum spectrum = iter.next();
        float time = spectrum.getTimeVal().floatValue();

        // This will record all the m/z + intensity readings that correspond to this timepoint.  Exactly sized too!
        ByteBuffer triplesForThisTime = ByteBuffer.allocate(Long.BYTES * spectrum.getIntensities().size());

        // Batch up all the triple writes to reduce the number of times we hit the disk in this loop.
        // Note: huge success!
        RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch();

        // Initialize the sweep line lists.  Windows go follow: tbd -> working -> done (nowhere).
        LinkedList<MZWindow> workingQueue = new LinkedList<>();
        LinkedList<MZWindow> tbdQueue = (LinkedList<MZWindow>) tbdQueueTemplate.clone(); // clone is in the docs, so okay!
        for (Pair<Double, Double> mzIntensity : spectrum.getIntensities()) {
            // Very important: increment the counter for every triple.  Otherwise we'll overwrite triples = Very Bad (tm).
            counter++;

            // Brevity = soul of wit!
            Double mz = mzIntensity.getLeft();
            Double intensity = mzIntensity.getRight();

            // Reset the buffers so we end up re-using the few bytes we've allocated.
            counterBuffer.clear(); // Empty (virtually).
            counterBuffer.putLong(counter);
            counterBuffer.flip(); // Prep for reading.

            valBuffer.clear(); // Empty (virtually).
            TMzI.writeToByteBuffer(valBuffer, time, mz, intensity.floatValue());
            valBuffer.flip(); // Prep for reading.

            // First, shift any applicable ranges onto the working queue based on their minimum mz.
            while (!tbdQueue.isEmpty() && tbdQueue.peekFirst().getMin() <= mz) {
                workingQueue.add(tbdQueue.pop());
            }

            // Next, remove any ranges we've passed.
            while (!workingQueue.isEmpty() && workingQueue.peekFirst().getMax() < mz) {
                workingQueue.pop(); // TODO: add() this to a recovery queue which can then become the tbdQueue.  Edge cases!
            }
            /* In the old indexed trace extractor world, we could bail here if there were no target m/z's in our window set
             * that matched with the m/z of our current mzIntensity.  However, since we're now also recording the links
             * between timepoints and their (t, m/z, i) triples, we need to keep on keepin' on regardless of whether we have
             * any m/z windows in the working set right now. */

            // The working queue should now hold only ranges that include this m/z value.  Sweep line swept!

            /* Now add this intensity to the buffers of all the windows in the working queue.  Note that since we're only
             * storing the *index* of the triple, these buffers are going to consume less space than they would if we
             * stored everything together. */
            for (MZWindow window : workingQueue) {
                // TODO: count the number of times we add intensities to each window's accumulator for MS1-style warnings.
                counterBuffer.rewind(); // Already flipped.
                mzWindowTripleBuffers[window.getIndex()] = // Must assign when calling appendOrRealloc.
                        Utils.appendOrRealloc(mzWindowTripleBuffers[window.getIndex()], counterBuffer);
            }

            // We flipped after reading, so we should be good to rewind (to be safe) and write here.
            counterBuffer.rewind();
            valBuffer.rewind();
            writeBatch.put(ColumnFamilies.ID_TO_TRIPLE, Utils.toCompactArray(counterBuffer),
                    Utils.toCompactArray(valBuffer));

            // Rewind again for another read.
            counterBuffer.rewind();
            triplesForThisTime.put(counterBuffer);
        }

        writeBatch.write();

        assert (triplesForThisTime.position() == triplesForThisTime.capacity());

        ByteBuffer timeBuffer = ByteBuffer.allocate(Float.BYTES).putFloat(time);
        timeBuffer.flip(); // Prep both bufers for reading so they can be written to the DB.
        triplesForThisTime.flip();
        dbAndHandles.put(ColumnFamilies.TIMEPOINT_TO_TRIPLES, Utils.toCompactArray(timeBuffer),
                Utils.toCompactArray(triplesForThisTime));

        timepoints.add(time);

        spectrumCounter++;
        if (spectrumCounter % 1000 == 0) {
            LOGGER.info("Extracted %d time spectra", spectrumCounter);
        }
    }
    LOGGER.info("Extracted %d total time spectra", spectrumCounter);

    // Now write all the mzWindow to triple indexes.
    RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch();
    ByteBuffer idBuffer = ByteBuffer.allocate(Integer.BYTES);
    for (int i = 0; i < mzWindowTripleBuffers.length; i++) {
        idBuffer.clear();
        idBuffer.putInt(windows.get(i).getIndex());
        idBuffer.flip();

        ByteBuffer triplesBuffer = mzWindowTripleBuffers[i];
        triplesBuffer.flip(); // Prep for read.

        writeBatch.put(ColumnFamilies.WINDOW_ID_TO_TRIPLES, Utils.toCompactArray(idBuffer),
                Utils.toCompactArray(triplesBuffer));
    }
    writeBatch.write();

    dbAndHandles.put(ColumnFamilies.TIMEPOINTS, TIMEPOINTS_KEY, Utils.floatListToByteArray(timepoints));
    dbAndHandles.flush(true);
}

From source file:org.apache.geode.internal.cache.DiskInitFile.java

private void writeDiskStoreId() {
    lock(true);/*from www .  j a  v  a2s.  c  o m*/
    try {
        ByteBuffer bb = getIFWriteBuffer(1 + 6 + 1);
        bb.put(OPLOG_MAGIC_SEQ_ID);
        bb.put(Oplog.OPLOG_TYPE.IF.getBytes(), 0, Oplog.OPLOG_TYPE.getLen());
        bb.put(END_OF_RECORD_ID);
        writeIFRecord(bb, false); // don't do stats for these small records

        bb = getIFWriteBuffer(1 + 8 + 8 + 1);
        bb.put(IFREC_DISKSTORE_ID);
        bb.putLong(parent.getDiskStoreID().getLeastSignificantBits());
        bb.putLong(parent.getDiskStoreID().getMostSignificantBits());
        bb.put(END_OF_RECORD_ID);
        writeIFRecord(bb, false); // don't do stats for these small records
    } catch (IOException ex) {
        DiskAccessException dae = new DiskAccessException(
                LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex),
                this.parent);
        if (!this.compactInProgress) {
            this.parent.handleDiskAccessException(dae);
        }
        throw dae;
    } finally {
        unlock(true);
    }
}

From source file:nodomain.freeyourgadget.gadgetbridge.service.devices.pebble.PebbleProtocol.java

private byte[] encodeBlobdb(Object key, byte command, byte db, byte[] blob) {

    int length = 5;

    int key_length;
    if (key instanceof UUID) {
        key_length = LENGTH_UUID;
    } else if (key instanceof String) {
        key_length = ((String) key).getBytes().length;
    } else {//from w w  w .j  a  va2  s .c o m
        LOG.warn("unknown key type");
        return null;
    }
    if (key_length > 255) {
        LOG.warn("key is too long");
        return null;
    }
    length += key_length;

    if (blob != null) {
        length += blob.length + 2;
    }

    ByteBuffer buf = ByteBuffer.allocate(LENGTH_PREFIX + length);

    buf.order(ByteOrder.BIG_ENDIAN);
    buf.putShort((short) length);
    buf.putShort(ENDPOINT_BLOBDB);

    buf.order(ByteOrder.LITTLE_ENDIAN);
    buf.put(command);
    buf.putShort((short) mRandom.nextInt()); // token
    buf.put(db);

    buf.put((byte) key_length);
    if (key instanceof UUID) {
        UUID uuid = (UUID) key;
        buf.order(ByteOrder.BIG_ENDIAN);
        buf.putLong(uuid.getMostSignificantBits());
        buf.putLong(uuid.getLeastSignificantBits());
        buf.order(ByteOrder.LITTLE_ENDIAN);
    } else {
        buf.put(((String) key).getBytes());
    }

    if (blob != null) {
        buf.putShort((short) blob.length);
        buf.put(blob);
    }

    return buf.array();
}

From source file:nodomain.freeyourgadget.gadgetbridge.service.devices.pebble.PebbleProtocol.java

private byte[] encodePhoneVersion3x(byte os) {
    final short LENGTH_PHONEVERSION3X = 25;
    ByteBuffer buf = ByteBuffer.allocate(LENGTH_PREFIX + LENGTH_PHONEVERSION3X);
    buf.order(ByteOrder.BIG_ENDIAN);
    buf.putShort(LENGTH_PHONEVERSION3X);
    buf.putShort(ENDPOINT_PHONEVERSION);
    buf.put((byte) 0x01);
    buf.putInt(-1); //0xffffffff
    buf.putInt(0);//from   w  w  w  . ja v  a2s  . c  o  m

    buf.putInt(os);

    buf.put(PHONEVERSION_APPVERSION_MAGIC);
    buf.put((byte) 4); // major
    buf.put((byte) 1); // minor
    buf.put((byte) 1); // patch
    buf.order(ByteOrder.LITTLE_ENDIAN);
    buf.putLong(0x00000000000029af); //flags

    return buf.array();
}