Example usage for java.nio ByteBuffer flip

List of usage examples for java.nio ByteBuffer flip

Introduction

In this page you can find the example usage for java.nio ByteBuffer flip.

Prototype

public final Buffer flip() 

Source Link

Document

Flips this buffer.

Usage

From source file:com.castis.sysComp.PoisConverterSysComp.java

private List<subDataDTO> writeNodeInfoOnFile(int byteSize, ByteBuffer byteBuffer,
        GatheringByteChannel outByteCh, InputDataDTO data, String isLeafNode) {

    StringBuffer strBuffer = new StringBuffer();
    strBuffer.append(data.getRegion());//from w w w.  j av a 2 s .c o  m
    strBuffer.append("|");
    strBuffer.append(data.getCategory());
    strBuffer.append("|");
    strBuffer.append(data.getWeekday());
    strBuffer.append("|");
    strBuffer.append(data.getHour());
    strBuffer.append("|");
    strBuffer.append(data.getPlatform());
    strBuffer.append("|");
    strBuffer.append(data.getCount());
    List<subDataDTO> subDataList = getSubDataList(data);

    // leafNode or not(Y/N)
    strBuffer.append("|");
    strBuffer.append(isLeafNode);

    strBuffer.append("\r\n");

    byte[] outByte = null;
    try {
        outByte = strBuffer.toString().getBytes("UTF-8");
    } catch (UnsupportedEncodingException e2) {
        e2.printStackTrace();
    }
    byteBuffer.put(outByte);
    byteBuffer.flip();
    try {
        outByteCh.write(byteBuffer);
    } catch (IOException e) {
    }
    byteBuffer.clear();

    return subDataList;
}

From source file:au.org.theark.core.service.ArkCommonServiceImpl.java

/**
 * {@inheritDoc}/*from   w ww  .jav  a 2 s  .c om*/
 */
public void copyArkLargeFileAttachments(String sourceFilePath, String destinationFilePath) throws IOException {
    FileChannel source = null;
    FileChannel destination = null;

    try {
        source = new FileInputStream(new File(sourceFilePath)).getChannel();
        destination = new FileOutputStream(new File(destinationFilePath)).getChannel();

        // This fails with Map Failed exception on large files
        // destination.transferFrom(source, 0, source.size());

        ByteBuffer buf = ByteBuffer.allocateDirect(DEFAULT_BUFFER_SIZE);
        while ((source.read(buf)) != -1) {
            buf.flip();
            destination.write(buf);
            buf.clear();
        }
    } finally {
        if (source != null) {
            source.close();
        }
        if (destination != null) {
            destination.close();
        }
    }
}

From source file:com.healthmarketscience.jackcess.Column.java

/**
 * Serialize an Object into a raw byte value for this column
 * @param obj Object to serialize//from   w w  w  . j a  v  a2  s.com
 * @param order Order in which to serialize
 * @return A buffer containing the bytes
 * @usage _advanced_method_
 */
public ByteBuffer write(Object obj, int remainingRowLength, ByteOrder order) throws IOException {
    if (isRawData(obj)) {
        // just slap it right in (not for the faint of heart!)
        return ByteBuffer.wrap(((RawData) obj).getBytes());
    }

    if (!isVariableLength() || !getType().isVariableLength()) {
        return writeFixedLengthField(obj, order);
    }

    // var length column
    if (!getType().isLongValue()) {

        // this is an "inline" var length field
        switch (getType()) {
        case NUMERIC:
            // don't ask me why numerics are "var length" columns...
            ByteBuffer buffer = getPageChannel().createBuffer(getType().getFixedSize(), order);
            writeNumericValue(buffer, obj);
            buffer.flip();
            return buffer;

        case TEXT:
            byte[] encodedData = encodeTextValue(obj, 0, getLengthInUnits(), false).array();
            obj = encodedData;
            break;

        case BINARY:
        case UNKNOWN_0D:
        case UNSUPPORTED_VARLEN:
            // should already be "encoded"
            break;
        default:
            throw new RuntimeException("unexpected inline var length type: " + getType());
        }

        ByteBuffer buffer = ByteBuffer.wrap(toByteArray(obj));
        buffer.order(order);
        return buffer;
    }

    // var length, long value column
    switch (getType()) {
    case OLE:
        // should already be "encoded"
        break;
    case MEMO:
        int maxMemoChars = DataType.MEMO.toUnitSize(DataType.MEMO.getMaxSize());
        obj = encodeTextValue(obj, 0, maxMemoChars, false).array();
        break;
    default:
        throw new RuntimeException("unexpected var length, long value type: " + getType());
    }

    // create long value buffer
    return writeLongValue(toByteArray(obj), remainingRowLength);
}

From source file:org.apache.hadoop.hbase.io.hfile.HFileBlock.java

@Override
public void serialize(ByteBuffer destination) {
    // BE CAREFUL!! There is a custom version of this serialization over in BucketCache#doDrain.
    // Make sure any changes in here are reflected over there.
    this.buf.get(destination, 0, getSerializedLength() - BLOCK_METADATA_SPACE);
    destination = addMetaData(destination);

    // Make it ready for reading. flip sets position to zero and limit to current position which
    // is what we want if we do not want to serialize the block plus checksums if present plus
    // metadata./*from ww  w.j  a  va  2  s . c  o m*/
    destination.flip();
}

From source file:edu.hawaii.soest.kilonalu.ctd.SBE37Source.java

/**
 * A method that executes the streaming of data from the source to the RBNB
 * server after all configuration of settings, connections to hosts, and
 * thread initiatizing occurs.  This method contains the detailed code for 
 * streaming the data and interpreting the stream.
 *//*from  w ww.j av a 2s . com*/
protected boolean execute() {
    logger.debug("SBE37Source.execute() called.");
    // do not execute the stream if there is no connection
    if (!isConnected())
        return false;

    boolean failed = false;

    // while data are being sent, read them into the buffer
    try {

        this.socketChannel = getSocketConnection();

        // create four byte placeholders used to evaluate up to a four-byte 
        // window.  The FIFO layout looks like:
        //           -------------------------
        //   in ---> | One | Two |Three|Four |  ---> out
        //           -------------------------
        byte byteOne = 0x00, // set initial placeholder values
                byteTwo = 0x00, byteThree = 0x00, byteFour = 0x00;

        // Create a buffer that will store the sample bytes as they are read
        ByteBuffer sampleBuffer = ByteBuffer.allocate(getBufferSize());

        // create a byte buffer to store bytes from the TCP stream
        ByteBuffer buffer = ByteBuffer.allocateDirect(getBufferSize());

        // create a character string to store characters from the TCP stream
        StringBuilder responseString = new StringBuilder();

        // add a channel of data that will be pushed to the server.  
        // Each sample will be sent to the Data Turbine as an rbnb frame.
        ChannelMap rbnbChannelMap = new ChannelMap();
        int channelIndex = rbnbChannelMap.Add(getRBNBChannelName());

        // wake the instrument with an initial take sample command
        this.command = this.commandPrefix + getInstrumentID() + "TS" + this.commandSuffix;
        this.sentCommand = queryInstrument(this.command);

        // verify the instrument ID is correct
        while (getInstrumentID() == null) {
            // allow time for the instrument response
            streamingThread.sleep(2000);
            buffer.clear();
            // send the command and update the sentCommand status
            this.sentCommand = queryInstrument(this.command);

            // read the response into the buffer. Note that the streamed bytes
            // are 8-bit, not 16-bit Unicode characters.  Use the US-ASCII
            // encoding instead.
            while (this.socketChannel.read(buffer) != -1 || buffer.position() > 0) {

                buffer.flip();
                while (buffer.hasRemaining()) {
                    String nextCharacter = new String(new byte[] { buffer.get() }, "US-ASCII");
                    responseString.append(nextCharacter);
                }
                // look for the command line ending
                if (responseString.toString().indexOf("S>") > 0) {

                    // parse the ID from the idCommand response
                    int idStartIndex = responseString.indexOf("=") + 2;
                    int idStopIndex = responseString.indexOf("=") + 4;
                    String idString = responseString.substring(idStartIndex, idStopIndex);
                    // test that the ID is a valid number and set the instrument ID
                    if ((new Integer(idString)).intValue() > 0) {
                        setInstrumentID(idString);
                        buffer.clear();
                        logger.debug("Instrument ID is " + getInstrumentID() + ".");
                        break;

                    } else {
                        logger.debug("Instrument ID \"" + idString + "\" was not set.");
                    }

                } else {
                    break;
                }

                buffer.compact();
                if (getInstrumentID() != null) {
                    break;
                }
            }
        }

        // instrumentID is set

        // allow time for the instrument response
        streamingThread.sleep(5000);
        this.command = this.commandPrefix + getInstrumentID() + this.takeSampleCommand + this.commandSuffix;
        this.sentCommand = queryInstrument(command);

        // while there are bytes to read from the socket ...
        while (this.socketChannel.read(buffer) != -1 || buffer.position() > 0) {
            // prepare the buffer for reading
            buffer.flip();

            // while there are unread bytes in the ByteBuffer
            while (buffer.hasRemaining()) {
                byteOne = buffer.get();
                //logger.debug("b1: " + new String(Hex.encodeHex((new byte[]{byteOne})))   + "\t" + 
                //             "b2: " + new String(Hex.encodeHex((new byte[]{byteTwo})))   + "\t" + 
                //             "b3: " + new String(Hex.encodeHex((new byte[]{byteThree}))) + "\t" + 
                //             "b4: " + new String(Hex.encodeHex((new byte[]{byteFour})))  + "\t" +
                //             "sample pos: "   + sampleBuffer.position()                  + "\t" +
                //             "sample rem: "   + sampleBuffer.remaining()                 + "\t" +
                //             "sample cnt: "   + sampleByteCount                          + "\t" +
                //             "buffer pos: "   + buffer.position()                        + "\t" +
                //             "buffer rem: "   + buffer.remaining()                       + "\t" +
                //             "state: "        + state
                //);

                // Use a State Machine to process the byte stream.
                // Start building an rbnb frame for the entire sample, first by 
                // inserting a timestamp into the channelMap.  This time is merely
                // the time of insert into the data turbine, not the time of
                // observations of the measurements.  That time should be parsed out
                // of the sample in the Sink client code

                switch (state) {

                case 0:

                    // sample line is begun by S>
                    // note bytes are in reverse order in the FIFO window
                    if (byteOne == 0x3E && byteTwo == 0x53) {
                        // we've found the beginning of a sample, move on
                        state = 1;
                        break;

                    } else {
                        break;
                    }

                case 1: // read the rest of the bytes to the next EOL characters

                    // sample line is terminated by S>
                    // note bytes are in reverse order in the FIFO window
                    if (byteOne == 0x3E && byteTwo == 0x53) {

                        sampleByteCount++; // add the last byte found to the count

                        // add the last byte found to the sample buffer
                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);

                        } else {
                            sampleBuffer.compact();
                            sampleBuffer.put(byteOne);

                        }

                        // extract just the length of the sample bytes (less 2 bytes
                        // to exclude the 'S>' prompt characters) out of the
                        // sample buffer, and place it in the channel map as a 
                        // byte array.  Then, send it to the data turbine.
                        byte[] sampleArray = new byte[sampleByteCount - 2];
                        sampleBuffer.flip();
                        sampleBuffer.get(sampleArray);

                        // send the sample to the data turbine
                        rbnbChannelMap.PutTimeAuto("server");
                        String sampleString = new String(sampleArray, "US-ASCII");
                        rbnbChannelMap.PutMime(channelIndex, "text/plain");
                        rbnbChannelMap.PutDataAsString(channelIndex, sampleString);
                        getSource().Flush(rbnbChannelMap);
                        logger.info("Sample: " + sampleString);
                        logger.info("flushed data to the DataTurbine. ");

                        byteOne = 0x00;
                        byteTwo = 0x00;
                        byteThree = 0x00;
                        byteFour = 0x00;
                        sampleBuffer.clear();
                        sampleByteCount = 0;
                        //rbnbChannelMap.Clear();                      
                        //logger.debug("Cleared b1,b2,b3,b4. Cleared sampleBuffer. Cleared rbnbChannelMap.");
                        //state = 0;

                        // Once the sample is flushed, take a new sample
                        if (getInstrumentID() != null) {
                            // allow time for the instrument response
                            streamingThread.sleep(2000);
                            this.command = this.commandPrefix + getInstrumentID() + this.takeSampleCommand
                                    + this.commandSuffix;
                            this.sentCommand = queryInstrument(command);
                        }

                    } else { // not 0x0A0D

                        // still in the middle of the sample, keep adding bytes
                        sampleByteCount++; // add each byte found

                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);
                        } else {
                            sampleBuffer.compact();
                            logger.debug("Compacting sampleBuffer ...");
                            sampleBuffer.put(byteOne);

                        }

                        break;
                    } // end if for 0x0A0D EOL

                } // end switch statement

                // shift the bytes in the FIFO window
                byteFour = byteThree;
                byteThree = byteTwo;
                byteTwo = byteOne;

            } //end while (more unread bytes)

            // prepare the buffer to read in more bytes from the stream
            buffer.compact();

        } // end while (more socket bytes to read)
        this.socketChannel.close();

    } catch (IOException e) {
        // handle exceptions
        // In the event of an i/o exception, log the exception, and allow execute()
        // to return false, which will prompt a retry.
        failed = true;
        e.printStackTrace();
        return !failed;
    } catch (SAPIException sapie) {
        // In the event of an RBNB communication  exception, log the exception, 
        // and allow execute() to return false, which will prompt a retry.
        failed = true;
        sapie.printStackTrace();
        return !failed;
    } catch (java.lang.InterruptedException ie) {
        ie.printStackTrace();
    }

    return !failed;
}

From source file:org.alfresco.contentstore.patch.PatchServiceImpl.java

@Override
public void updatePatchDocument(PatchDocument patchDocument, NodeChecksums checksums, ByteBuffer data) {
    int blockSize = checksums.getBlockSize();

    patchDocument.setBlockSize(blockSize);

    int i = 0;//from   w w w.j av  a 2 s. c o m

    Adler32 adlerInfo = new Adler32(hasher);
    int lastMatchIndex = 0;
    ByteBuffer currentPatch = ByteBuffer.allocate(600000); // TODO

    int currentPatchSize = 0;

    for (;;) {
        int chunkSize = 0;
        // determine the size of the next data chuck to evaluate. Default to
        // blockSize, but clamp to end of data
        if ((i + blockSize) > data.limit()) {
            chunkSize = data.limit() - i;
            adlerInfo.reset(); // need to reset this because the rolling
                               // checksum doesn't work correctly on a final
                               // non-aligned block
        } else {
            chunkSize = blockSize;
        }

        int matchedBlock = adlerInfo.checkMatch(lastMatchIndex, checksums, data, i, i + chunkSize - 1);
        if (matchedBlock != -1) {
            //                try
            //                {
            //                    String y = hasher.md5(data, i, i + chunkSize - 1);
            //                    System.out.println("y = " + y);
            //                }
            //                catch (NoSuchAlgorithmException e)
            //                {
            //                    // TODO Auto-generated catch block
            //                    e.printStackTrace();
            //                }
            // if we have a match, do the following:
            // 1) add the matched block index to our tracking buffer
            // 2) check to see if there's a current patch. If so, add it to
            // the patch document.
            // 3) jump forward blockSize bytes and continue
            patchDocument.addMatchedBlock(matchedBlock);

            if (currentPatchSize > 0) {
                // there are outstanding patches, add them to the list
                // create the patch and append it to the patches buffer
                currentPatch.flip();
                int size = currentPatch.limit();
                byte[] dst = new byte[size];
                currentPatch.get(dst, 0, size);
                Patch patch = new Patch(lastMatchIndex, size, dst);
                patchDocument.addPatch(patch);
                currentPatch.clear();
            }

            lastMatchIndex = matchedBlock;

            i += chunkSize;

            adlerInfo.reset();

            continue;
        } else {
            // while we don't have a block match, append bytes to the
            // current patch
            logger.debug("limit = " + currentPatch.limit() + ", position = " + currentPatch.position());
            currentPatch.put(data.get(i));
            currentPatchSize++;
        }
        if (i >= data.limit() - 1) {
            break;
        }
        i++;
    } // end for each byte in the data

    if (currentPatchSize > 0) {
        currentPatch.flip();
        int size = currentPatch.limit();
        byte[] dst = new byte[size];
        currentPatch.get(dst, 0, size);
        Patch patch = new Patch(lastMatchIndex, size, dst);
        patchDocument.addPatch(patch);
    }
}

From source file:com.healthmarketscience.jackcess.impl.IndexData.java

/**
 * Returns an entry buffer containing the relevant data for an entry given
 * the valuePrefix./*from   ww w. ja v  a  2 s.  c o m*/
 */
private ByteBuffer getTempEntryBuffer(ByteBuffer indexPage, int entryLen, byte[] valuePrefix,
        TempBufferHolder tmpEntryBufferH) {
    ByteBuffer tmpEntryBuffer = tmpEntryBufferH.getBuffer(getPageChannel(), valuePrefix.length + entryLen);

    // combine valuePrefix and rest of entry from indexPage, then prep for
    // reading
    tmpEntryBuffer.put(valuePrefix);
    tmpEntryBuffer.put(indexPage.array(), indexPage.position(), entryLen);
    tmpEntryBuffer.flip();

    return tmpEntryBuffer;
}

From source file:com.act.lcms.v2.fullindex.Builder.java

protected void extractTriples(Iterator<LCMSSpectrum> iter, List<MZWindow> windows)
        throws RocksDBException, IOException {
    /* Warning: this method makes heavy use of ByteBuffers to perform memory efficient collection of values and
     * conversion of those values into byte arrays that RocksDB can consume.  If you haven't already, go read this
     * tutorial on ByteBuffers: http://mindprod.com/jgloss/bytebuffer.html
     *//from  w ww  . j av a2s .c o m
     * ByteBuffers are quite low-level structures, and they use some terms you need to watch out for:
     *   capacity: The total number of bytes in the array backing the buffer.  Don't write more than this.
     *   position: The next index in the buffer to read or write a byte.  Moves with each read or write op.
     *   limit:    A mark of where the final byte in the buffer was written.  Don't read past this.
     *             The remaining() call is affected by the limit.
     *   mark:     Ignore this for now, we don't use it.  (We'll always, always read buffers from 0.)
     *
     * And here are some methods that we'll use often:
     *   clear:     Set position = 0, limit = 0.  Pretend the buffer is empty, and is ready for more writes.
     *   flip:      Set limit = position, then position = 0.  This remembers how many bytes were written to the buffer
     *              (as the current position), and then puts the position at the beginning.
     *              Always call this after the write before a read.
     *   rewind:    Set position = 0.  Buffer is ready for reading, but unless the limit was set we might now know how
     *              many bytes there are to read.  Always call flip() before rewind().  Can rewind many times to re-read
     *              the buffer repeatedly.
     *   remaining: How many bytes do we have left to read?  Requires an accurate limit value to avoid garbage bytes.
     *   reset:     Don't use this.  It uses the mark, which we don't need currently.
     *
     * Write/read patterns look like:
     *   buffer.clear(); // Clear out anything already in the buffer.
     *   buffer.put(thing1).put(thing2)... // write a bunch of stuff
     *   buffer.flip(); // Prep for reading.  Call *once*!
     *
     *   while (buffer.hasRemaining()) { buffer.get(); } // Read a bunch of stuff.
     *   buffer.rewind(); // Ready for reading again!
     *   while (buffer.hasRemaining()) { buffer.get(); } // Etc.
     *   buffer.reset(); // Forget what was written previously, buffer is ready for reuse.
     *
     * We use byte buffers because they're fast, efficient, and offer incredibly convenient means of serializing a
     * stream of primitive types to their minimal binary representations.  The same operations on objects + object
     * streams require significantly more CPU cycles, consume more memory, and tend to be brittle (i.e. if a class
     * definition changes slightly, serialization may break).  Since the data we're dealing with is pretty simple, we
     * opt for the low-level approach.
     */

    /* Because we'll eventually use the window indices to map a mz range to a list of triples that fall within that
     * range, verify that all of the indices are unique.  If they're not, we'll end up overwriting the data in and
     * corrupting the structure of the index. */
    ensureUniqueMZWindowIndices(windows);

    // For every mz window, allocate a buffer to hold the indices of the triples that fall in that window.
    ByteBuffer[] mzWindowTripleBuffers = new ByteBuffer[windows.size()];
    for (int i = 0; i < mzWindowTripleBuffers.length; i++) {
        /* Note: the mapping between these buffers and their respective mzWindows is purely positional.  Specifically,
         * mzWindows.get(i).getIndex() != i, but mzWindowTripleBuffers[i] belongs to mzWindows.get(i).  We'll map windows
         * indices to the contents of mzWindowTripleBuffers at the very end of this function. */
        mzWindowTripleBuffers[i] = ByteBuffer.allocate(Long.BYTES * 4096); // Start with 4096 longs = 8 pages per window.
    }

    // Every TMzI gets an index which we'll use later when we're querying by m/z and time.
    long counter = -1; // We increment at the top of the loop.
    // Note: we could also write to an mmapped file and just track pointers, but then we might lose out on compression.

    // We allocate all the buffers strictly here, as we know how many bytes a long and a triple will take.  Then reuse!
    ByteBuffer counterBuffer = ByteBuffer.allocate(Long.BYTES);
    ByteBuffer valBuffer = ByteBuffer.allocate(TMzI.BYTES);
    List<Float> timepoints = new ArrayList<>(2000); // We can be sloppy here, as the count is small.

    /* We use a sweep-line approach to scanning through the m/z windows so that we can aggregate all intensities in
     * one pass over the current LCMSSpectrum (this saves us one inner loop in our extraction process).  The m/z
     * values in the LCMSSpectrum become our "critical" or "interesting points" over which we sweep our m/z ranges.
     * The next window in m/z order is guaranteed to be the next one we want to consider since we address the points
     * in m/z order as well.  As soon as we've passed out of the range of one of our windows, we discard it.  It is
     * valid for a window to be added to and discarded from the working queue in one application of the work loop. */
    LinkedList<MZWindow> tbdQueueTemplate = new LinkedList<>(windows); // We can reuse this template to init the sweep.

    int spectrumCounter = 0;
    while (iter.hasNext()) {
        LCMSSpectrum spectrum = iter.next();
        float time = spectrum.getTimeVal().floatValue();

        // This will record all the m/z + intensity readings that correspond to this timepoint.  Exactly sized too!
        ByteBuffer triplesForThisTime = ByteBuffer.allocate(Long.BYTES * spectrum.getIntensities().size());

        // Batch up all the triple writes to reduce the number of times we hit the disk in this loop.
        // Note: huge success!
        RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch();

        // Initialize the sweep line lists.  Windows go follow: tbd -> working -> done (nowhere).
        LinkedList<MZWindow> workingQueue = new LinkedList<>();
        LinkedList<MZWindow> tbdQueue = (LinkedList<MZWindow>) tbdQueueTemplate.clone(); // clone is in the docs, so okay!
        for (Pair<Double, Double> mzIntensity : spectrum.getIntensities()) {
            // Very important: increment the counter for every triple.  Otherwise we'll overwrite triples = Very Bad (tm).
            counter++;

            // Brevity = soul of wit!
            Double mz = mzIntensity.getLeft();
            Double intensity = mzIntensity.getRight();

            // Reset the buffers so we end up re-using the few bytes we've allocated.
            counterBuffer.clear(); // Empty (virtually).
            counterBuffer.putLong(counter);
            counterBuffer.flip(); // Prep for reading.

            valBuffer.clear(); // Empty (virtually).
            TMzI.writeToByteBuffer(valBuffer, time, mz, intensity.floatValue());
            valBuffer.flip(); // Prep for reading.

            // First, shift any applicable ranges onto the working queue based on their minimum mz.
            while (!tbdQueue.isEmpty() && tbdQueue.peekFirst().getMin() <= mz) {
                workingQueue.add(tbdQueue.pop());
            }

            // Next, remove any ranges we've passed.
            while (!workingQueue.isEmpty() && workingQueue.peekFirst().getMax() < mz) {
                workingQueue.pop(); // TODO: add() this to a recovery queue which can then become the tbdQueue.  Edge cases!
            }
            /* In the old indexed trace extractor world, we could bail here if there were no target m/z's in our window set
             * that matched with the m/z of our current mzIntensity.  However, since we're now also recording the links
             * between timepoints and their (t, m/z, i) triples, we need to keep on keepin' on regardless of whether we have
             * any m/z windows in the working set right now. */

            // The working queue should now hold only ranges that include this m/z value.  Sweep line swept!

            /* Now add this intensity to the buffers of all the windows in the working queue.  Note that since we're only
             * storing the *index* of the triple, these buffers are going to consume less space than they would if we
             * stored everything together. */
            for (MZWindow window : workingQueue) {
                // TODO: count the number of times we add intensities to each window's accumulator for MS1-style warnings.
                counterBuffer.rewind(); // Already flipped.
                mzWindowTripleBuffers[window.getIndex()] = // Must assign when calling appendOrRealloc.
                        Utils.appendOrRealloc(mzWindowTripleBuffers[window.getIndex()], counterBuffer);
            }

            // We flipped after reading, so we should be good to rewind (to be safe) and write here.
            counterBuffer.rewind();
            valBuffer.rewind();
            writeBatch.put(ColumnFamilies.ID_TO_TRIPLE, Utils.toCompactArray(counterBuffer),
                    Utils.toCompactArray(valBuffer));

            // Rewind again for another read.
            counterBuffer.rewind();
            triplesForThisTime.put(counterBuffer);
        }

        writeBatch.write();

        assert (triplesForThisTime.position() == triplesForThisTime.capacity());

        ByteBuffer timeBuffer = ByteBuffer.allocate(Float.BYTES).putFloat(time);
        timeBuffer.flip(); // Prep both bufers for reading so they can be written to the DB.
        triplesForThisTime.flip();
        dbAndHandles.put(ColumnFamilies.TIMEPOINT_TO_TRIPLES, Utils.toCompactArray(timeBuffer),
                Utils.toCompactArray(triplesForThisTime));

        timepoints.add(time);

        spectrumCounter++;
        if (spectrumCounter % 1000 == 0) {
            LOGGER.info("Extracted %d time spectra", spectrumCounter);
        }
    }
    LOGGER.info("Extracted %d total time spectra", spectrumCounter);

    // Now write all the mzWindow to triple indexes.
    RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch();
    ByteBuffer idBuffer = ByteBuffer.allocate(Integer.BYTES);
    for (int i = 0; i < mzWindowTripleBuffers.length; i++) {
        idBuffer.clear();
        idBuffer.putInt(windows.get(i).getIndex());
        idBuffer.flip();

        ByteBuffer triplesBuffer = mzWindowTripleBuffers[i];
        triplesBuffer.flip(); // Prep for read.

        writeBatch.put(ColumnFamilies.WINDOW_ID_TO_TRIPLES, Utils.toCompactArray(idBuffer),
                Utils.toCompactArray(triplesBuffer));
    }
    writeBatch.write();

    dbAndHandles.put(ColumnFamilies.TIMEPOINTS, TIMEPOINTS_KEY, Utils.floatListToByteArray(timepoints));
    dbAndHandles.flush(true);
}

From source file:com.healthmarketscience.jackcess.impl.TableImpl.java

/**
 * Returns a single ByteBuffer which contains the entire table definition
 * (which may span multiple database pages).
 *///from  w  ww . j a  va 2 s . c  o  m
private ByteBuffer loadCompleteTableDefinitionBuffer(ByteBuffer tableBuffer) throws IOException {
    int nextPage = tableBuffer.getInt(getFormat().OFFSET_NEXT_TABLE_DEF_PAGE);
    ByteBuffer nextPageBuffer = null;
    while (nextPage != 0) {
        if (nextPageBuffer == null) {
            nextPageBuffer = getPageChannel().createPageBuffer();
        }
        getPageChannel().readPage(nextPageBuffer, nextPage);
        nextPage = nextPageBuffer.getInt(getFormat().OFFSET_NEXT_TABLE_DEF_PAGE);
        ByteBuffer newBuffer = PageChannel.createBuffer(tableBuffer.capacity() + getFormat().PAGE_SIZE - 8);
        newBuffer.put(tableBuffer);
        newBuffer.put(nextPageBuffer.array(), 8, getFormat().PAGE_SIZE - 8);
        tableBuffer = newBuffer;
        tableBuffer.flip();
    }
    return tableBuffer;
}

From source file:com.healthmarketscience.jackcess.Table.java

/**
 * @param database database which owns this table
 * @param tableBuffer Buffer to read the table with
 * @param pageNumber Page number of the table definition
 * @param name Table name// www.ja  v  a 2s . c  o  m
 * @param useBigIndex whether or not "big index support" should be enabled
 *                    for the table
 */
protected Table(Database database, ByteBuffer tableBuffer, int pageNumber, String name, int flags,
        boolean useBigIndex) throws IOException {
    _database = database;
    _tableDefPageNumber = pageNumber;
    _name = name;
    _flags = flags;
    _useBigIndex = useBigIndex;
    int nextPage = tableBuffer.getInt(getFormat().OFFSET_NEXT_TABLE_DEF_PAGE);
    ByteBuffer nextPageBuffer = null;
    while (nextPage != 0) {
        if (nextPageBuffer == null) {
            nextPageBuffer = getPageChannel().createPageBuffer();
        }
        getPageChannel().readPage(nextPageBuffer, nextPage);
        nextPage = nextPageBuffer.getInt(getFormat().OFFSET_NEXT_TABLE_DEF_PAGE);
        ByteBuffer newBuffer = getPageChannel()
                .createBuffer(tableBuffer.capacity() + getFormat().PAGE_SIZE - 8);
        newBuffer.put(tableBuffer);
        newBuffer.put(nextPageBuffer.array(), 8, getFormat().PAGE_SIZE - 8);
        tableBuffer = newBuffer;
        tableBuffer.flip();
    }
    readTableDefinition(tableBuffer);
    tableBuffer = null;
}