Example usage for java.nio ByteBuffer capacity

List of usage examples for java.nio ByteBuffer capacity

Introduction

In this page you can find the example usage for java.nio ByteBuffer capacity.

Prototype

public final int capacity() 

Source Link

Document

Returns the capacity of this buffer.

Usage

From source file:nl.salp.warcraft4j.io.HttpDataReader.java

/**
 * {@inheritDoc}// w w  w  . j  av  a 2s . c  o  m
 */
@Override
protected int readData(ByteBuffer buffer) throws DataReadingException {
    try {
        int read = 0;
        byte[] dataArray;
        if (buffer.hasArray()) {
            read = responseStream.read(buffer.array());
        } else {
            byte[] data = new byte[buffer.capacity()];
            read = responseStream.read(data);
            buffer.put(data);
        }
        return read;
    } catch (IOException e) {
        throw new DataReadingException(e);
    }
}

From source file:cn.ac.ncic.mastiff.io.coding.DeltaBinaryPackingStringReader.java

public void ensureDecompress() throws IOException {
    org.apache.hadoop.io.compress.Decompressor decompressor = this.compressAlgo.getDecompressor();
    InputStream is = this.compressAlgo.createDecompressionStream(inBuf, decompressor, 0);
    ByteBuffer buf = ByteBuffer.allocate(decompressedSize);
    // ByteBuffer buf = ByteBuffer.allocate(is.available());
    IOUtils.readFully(is, buf.array(), 0, buf.capacity());
    is.close();/*ww w.  j a v a 2 s .c om*/
    this.compressAlgo.returnDecompressor(decompressor);
    inBuf.reset(buf.array(), offset, buf.capacity());
}

From source file:record.wave.WaveWriter.java

/**
* Writes the buffer contents to the file.  Assumes that the buffer is full 
* and the first byte of data is at position 0.
*///from w w  w  .j  a  v a2s  .  com
public void write(ByteBuffer buffer) throws IOException {
    buffer.position(0);

    /* Write the full buffer if there is room, respecting the max file size */
    if (mFileChannel.size() + buffer.capacity() < mMaxSize) {
        while (buffer.hasRemaining()) {
            mFileChannel.write(buffer);
        }

        updateWaveFileSize();
    } else {
        /* Split the buffer to finish filling the current file and then put
         * the leftover into a new file */
        int remaining = (int) (mMaxSize - mFileChannel.size());

        /* Ensure we write full frames to fill up the remaining size */
        remaining -= (int) (remaining % mAudioFormat.getFrameSize());

        byte[] bytes = buffer.array();

        ByteBuffer current = ByteBuffer.wrap(Arrays.copyOf(bytes, remaining));

        ByteBuffer next = ByteBuffer.wrap(Arrays.copyOfRange(bytes, remaining, bytes.length));

        while (current.hasRemaining()) {
            mFileChannel.write(current);
        }

        updateWaveFileSize();

        rollover();

        while (next.hasRemaining()) {
            mFileChannel.write(next);
        }

        updateWaveFileSize();
    }
}

From source file:org.stem.db.FatFile.java

public void writeIndex() throws IOException {
    ByteBuffer indexSerialized = index.serialize();

    writer.seek(capacity - indexSerialized.capacity() - 1);
    writer.write(indexSerialized.array());

    pointer = (int) writer.getFilePointer();
}

From source file:pl.allegro.tech.hermes.consumers.consumer.sender.http.ByteBufferEntityTest.java

@Test
public void testWriteTo() throws Exception {
    final ByteBuffer bytes = ByteBuffer.wrap("Message content".getBytes(Consts.ASCII));
    final ByteBufferEntity httpentity = new ByteBufferEntity(bytes);

    ByteArrayOutputStream out = new ByteArrayOutputStream();
    httpentity.writeTo(out);//w  w  w. j ava  2  s  .  c om
    byte[] bytes2 = out.toByteArray();
    Assert.assertNotNull(bytes2);
    Assert.assertEquals(bytes.capacity(), bytes2.length);
    bytes.position(0);
    for (int i = 0; i < bytes2.length; i++) {
        Assert.assertEquals(bytes.get(i), bytes2[i]);
    }

    out = new ByteArrayOutputStream();
    httpentity.writeTo(out);
    bytes2 = out.toByteArray();
    Assert.assertNotNull(bytes2);
    Assert.assertEquals(bytes.capacity(), bytes2.length);
    bytes.position(0);
    for (int i = 0; i < bytes.capacity(); i++) {
        Assert.assertEquals(bytes.get(i), bytes2[i]);
    }

    try {
        httpentity.writeTo(null);
        Assert.fail("IllegalArgumentException should have been thrown");
    } catch (final IllegalArgumentException ex) {
        // expected
    }
}

From source file:com.yobidrive.diskmap.needles.Needle.java

public void putNeedleInBuffer(ByteBuffer result) throws Exception {
    int startPosition = result.position();
    result.limit(result.capacity());
    result.putInt(MAGICSTART);/*w  ww  .  j  a va 2s. c o  m*/
    result.putLong(needleNumber);
    result.put(flags);
    result.putInt(keyBytes.length);
    result.put(keyBytes);
    result.putInt(version == null ? 0 : version.toBytes().length);
    if (version != null)
        result.put(version.toBytes());
    result.putInt(previousNeedle == null ? -1 : previousNeedle.getNeedleFileNumber()); // Chaining
    result.putLong(previousNeedle == null ? -1L : previousNeedle.getNeedleOffset()); // Chaining
    result.putInt(originalFileNumber); // Original needle location (for cleaning)
    result.putInt(originalSize); // Original needle size (for cleaning)
    result.putInt(data == null ? 0 : data.length);
    if (data != null)
        result.put(data);
    result.putInt(MAGICEND);
    result.put(hashMD5());
    while (((result.position() - startPosition) % 256) > 0) {
        result.put(PADDING);
    }
    result.flip();
}

From source file:com.streamsets.pipeline.stage.origin.maprjson.MapRJsonOriginSource.java

private boolean gatherJsonDataTypes() throws StageException {

    try (DocumentStream docs = table.find()) {

        Iterator<Document> it = docs.iterator();
        if (it.hasNext()) {
            Document doc = it.next();

            jsonDataTypes.clear();//  w w  w  . j av a 2  s.  c  o m

            Map<String, Object> m = doc.asMap();
            Set<String> names = m.keySet();

            for (String s : names) {
                jsonDataTypes.put(s, doc.getValue(s).getType());
            }

            if (jsonDataTypes.get(MAPR_ID) == Value.Type.BINARY) {
                ByteBuffer bb = doc.getValue(MAPR_ID).getBinary();
                binaryColumnWidth = bb.capacity();
            }
        } else {
            return false;
        }

    } catch (DBException ex) {
        LOG.error(Errors.MAPR_JSON_ORIGIN_12.getMessage(), ex);
        throw new OnRecordErrorException(Errors.MAPR_JSON_ORIGIN_12, ex);
    }
    return true;
}

From source file:org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader.java

/**
 * To check whether a trailer is present in a WAL, it seeks to position (fileLength -
 * PB_WAL_COMPLETE_MAGIC.size() - Bytes.SIZEOF_INT). It reads the int value to know the size of
 * the trailer, and checks whether the trailer is present at the end or not by comparing the last
 * PB_WAL_COMPLETE_MAGIC.size() bytes. In case trailer is not present, it returns false;
 * otherwise, sets the trailer and sets this.walEditsStopOffset variable up to the point just
 * before the trailer.//from  w  w  w . j  a  v  a 2  s .c o  m
 * <ul>
 * The trailer is ignored in case:
 * <li>fileLength is 0 or not correct (when file is under recovery, etc).
 * <li>the trailer size is negative.
 * </ul>
 * <p>
 * In case the trailer size > this.trailerMaxSize, it is read after a WARN message.
 * @return true if a valid trailer is present
 * @throws IOException
 */
private boolean setTrailerIfPresent() {
    try {
        long trailerSizeOffset = this.fileLength - (PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT);
        if (trailerSizeOffset <= 0)
            return false;// no trailer possible.
        this.seekOnFs(trailerSizeOffset);
        // read the int as trailer size.
        int trailerSize = this.inputStream.readInt();
        ByteBuffer buf = ByteBuffer.allocate(ProtobufLogReader.PB_WAL_COMPLETE_MAGIC.length);
        this.inputStream.readFully(buf.array(), buf.arrayOffset(), buf.capacity());
        if (!Arrays.equals(buf.array(), PB_WAL_COMPLETE_MAGIC)) {
            LOG.trace("No trailer found.");
            return false;
        }
        if (trailerSize < 0) {
            LOG.warn("Invalid trailer Size " + trailerSize + ", ignoring the trailer");
            return false;
        } else if (trailerSize > this.trailerWarnSize) {
            // continue reading after warning the user.
            LOG.warn("Please investigate WALTrailer usage. Trailer size > maximum configured size : "
                    + trailerSize + " > " + this.trailerWarnSize);
        }
        // seek to the position where trailer starts.
        long positionOfTrailer = trailerSizeOffset - trailerSize;
        this.seekOnFs(positionOfTrailer);
        // read the trailer.
        buf = ByteBuffer.allocate(trailerSize);// for trailer.
        this.inputStream.readFully(buf.array(), buf.arrayOffset(), buf.capacity());
        trailer = WALTrailer.parseFrom(buf.array());
        this.walEditsStopOffset = positionOfTrailer;
        return true;
    } catch (IOException ioe) {
        LOG.warn("Got IOE while reading the trailer. Continuing as if no trailer is present.", ioe);
    }
    return false;
}

From source file:com.hadoop.compression.lzo.LzoCompressor.java

/**
 * Reallocates a direct byte buffer by freeing the old one and allocating
 * a new one, unless the size is the same, in which case it is simply
 * cleared and returned./*from ww w . ja v  a2s. c  om*/
 *
 * NOTE: this uses unsafe APIs to manually free memory - if anyone else
 * has a reference to the 'buf' parameter they will likely read random
 * data or cause a segfault by accessing it.
 */
private ByteBuffer realloc(ByteBuffer buf, int newSize) {
    if (buf != null) {
        if (buf.capacity() == newSize) {
            // Can use existing buffer
            buf.clear();
            return buf;
        }
        try {
            // Manually free the old buffer using undocumented unsafe APIs.
            // If this fails, we'll drop the reference and hope GC finds it
            // eventually.
            Method cleanerMethod = buf.getClass().getMethod("cleaner");
            cleanerMethod.setAccessible(true);
            Object cleaner = cleanerMethod.invoke(buf);
            Method cleanMethod = cleaner.getClass().getMethod("clean");
            cleanMethod.setAccessible(true);
            cleanMethod.invoke(cleaner);
        } catch (Exception e) {
            // Perhaps a non-sun-derived JVM - contributions welcome
            LOG.warn("Couldn't realloc bytebuffer", e);
        }
    }
    return ByteBuffer.allocateDirect(newSize);
}

From source file:io.github.dsheirer.record.wave.WaveWriter.java

/**
 * Writes the metadata to the end of the file if there is sufficient space without exceeding the
 * max file size.//w  w  w.  j a v  a  2  s  .  c  o  m
 */
public void writeMetadata(WaveMetadata metadata) throws IOException {
    ByteBuffer listChunk = metadata.getLISTChunk();

    if (mFileChannel.size() + listChunk.capacity() >= mMaxSize) {
        throw new IOException("Cannot write LIST metadata chunk - insufficient file space remaining");
    }

    closeDataChunk();

    listChunk.position(0);

    while (listChunk.hasRemaining()) {
        mFileChannel.write(listChunk);
    }

    updateTotalSize();

    ByteBuffer id3Chunk = metadata.getID3Chunk();

    if (mFileChannel.size() + id3Chunk.capacity() >= mMaxSize) {
        throw new IOException("Cannot write ID3 metadata chunk - insufficient file space remaining");
    }

    id3Chunk.position(0);

    while (id3Chunk.hasRemaining()) {
        mFileChannel.write(id3Chunk);
    }

    updateTotalSize();
}