List of usage examples for java.nio ByteBuffer allocateDirect
public static ByteBuffer allocateDirect(int capacity)
From source file:org.apache.hadoop.io.compress.snappy.SnappyCompressor.java
/** * Creates a new compressor.//from ww w. ja v a 2 s . c o m * * @param directBufferSize size of the direct buffer to be used. */ public SnappyCompressor(int directBufferSize) { this.directBufferSize = directBufferSize; uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); compressedDirectBuf.position(directBufferSize); }
From source file:org.apache.hadoop.hbase.io.hfile.slab.Slab.java
private void allocateAndSlice(int size, int sliceSize) { ByteBuffer newSlab = ByteBuffer.allocateDirect(size); slabs.add(newSlab);//from w w w .j a va 2s .c o m for (int j = 0; j < newSlab.capacity(); j += sliceSize) { newSlab.limit(j + sliceSize).position(j); ByteBuffer aSlice = newSlab.slice(); buffers.add(aSlice); heapSize += ClassSize.estimateBase(aSlice.getClass(), false); } }
From source file:jext2.DataInode.java
/** * Read Inode data/* w ww .j a v a 2 s . c om*/ * @param size size of the data to be read * @param offset start address in data area * @return buffer of size size containing data. * @throws FileTooLarge * @throws IoError */ public ByteBuffer readData(int size, long fileOffset) throws JExt2Exception, FileTooLarge { /* Returning null may break things somewhere.. * Zero length buffer breaks something in jlowfuse's c code */ if (getSize() == 0) return ByteBuffer.allocateDirect(1); /* * size may be larger than the inode.size, it doesn't make sense to return * 4k of zeros */ if (size > getSize()) size = (int) getSize(); ByteBuffer buf = ByteBuffer.allocateDirect(size); int blocksize = superblock.getBlocksize(); long i = 0; long firstBlock = fileOffset / blocksize; long offset = fileOffset % blocksize; /* * just as size may be larger than the inode's data, the number of blocks * may also be. */ long approxBlocks = (size / blocksize) + 1; long maxBlocks = this.getBlocks() / (superblock.getBlocksize() / 512); if (approxBlocks > maxBlocks) approxBlocks = maxBlocks; while (i < approxBlocks) { long start = firstBlock + i; long stop = firstBlock + approxBlocks; LinkedList<Long> b = accessData().getBlocks(start, stop); int blocksRead; /* * Note on the sparse file support: * getBlocks will return null if there is no data block for this * logical address. So just move the position count blocks forward. */ if (b == null) { /* hole */ blocksRead = 1; int unboundedLimit = buf.position() + blocksize; int limit = Math.min(unboundedLimit, buf.capacity()); assert limit <= buf.capacity() : "New position, limit " + limit + " is beyond buffer's capacity, " + buf; buf.limit(limit); buf.position(limit); assert buf.limit() == buf.position(); } else { /* blocks */ blocksRead = b.size(); long pos = b.getFirst() * blocksize + offset; int unboundedLimit = buf.position() + blocksRead * blocksize; int limit = Math.min(unboundedLimit, buf.capacity()); assert limit <= buf.capacity() : "New limit " + limit + " is beyond buffer's capacity, " + buf; buf.limit(limit); blockAccess.readToBufferUnsynchronized(pos, buf); } i += blocksRead; offset = 0; /* This should be removed soon. IllegalMonitorStateException happen * occasionally for unknown reasons. */ try { accessData().getHierarchyLock().readLock().unlock(); } catch (IllegalMonitorStateException e) { Logger log = Filesystem.getLogger(); log.warning("IllegalMonitorStateException encountered in readData, inode=" + this); log.warning(String.format( "context for exception: blocks=%s i=%d approxBlocks=%d off=%d buf=%s readlock=%s lock.readlock.holds=%s", b, i, approxBlocks, fileOffset, buf, accessData().getHierarchyLock(), accessData().getHierarchyLock().getReadHoldCount())); } if (buf.capacity() == buf.limit()) break; } assert buf.position() == buf.limit() : "Buffer wasn't filled completely"; assert buf.limit() == size : "Read buffer size does not match request size"; if (buf.limit() > getSize()) buf.limit((int) getSize()); buf.rewind(); return buf; }
From source file:com.flexive.shared.FxFileUtils.java
/** * Copy data from source to destination nio channel * * @param source source channel/*from w ww. j av a 2 s .c om*/ * @param destination destination channel * @return total number of bytes copied * @throws java.io.IOException on errors */ public static long copyNIOChannel(ReadableByteChannel source, WritableByteChannel destination) throws IOException { ByteBuffer xferBuffer = ByteBuffer.allocateDirect(4096); long count = 0, read, written; while (true) { read = source.read(xferBuffer); if (read < 0) return count; xferBuffer.flip(); written = destination.write(xferBuffer); if (written > 0) { count += written; if (xferBuffer.hasRemaining()) xferBuffer.compact(); else xferBuffer.clear(); } else { while (xferBuffer.hasRemaining()) { try { Thread.sleep(5); } catch (InterruptedException e) { LOG.warn(e); } written = destination.write(xferBuffer); if (written > 0) { count += written; if (xferBuffer.hasRemaining()) xferBuffer.compact(); } } if (!xferBuffer.hasRemaining()) xferBuffer.clear(); } } }
From source file:com.rinke.solutions.pinball.io.UsbConnector.java
@Override protected byte[] receive(ConnectionHandle h, int len) { byte[] data = new byte[len]; IntBuffer transfered = IntBuffer.allocate(1); ByteBuffer buffer = ByteBuffer.allocateDirect(data.length); buffer.put(data);// w ww . j av a 2s . c om // Use device handle here UsbHandle usb = (UsbHandle) h; int res = LibUsb.bulkTransfer(usb.getDeviceHandle(), (byte) 0x81, buffer, transfered, 4000); if (res != LibUsb.SUCCESS) throw new LibUsbException("Control transfer failed", res); int read = transfered.get(); if (read != data.length) { log.error("unexpected length returned on bulk: {}", read); } return data; }
From source file:com.ery.ertc.estorm.util.ByteBufferArray.java
/** * We allocate a number of byte buffers as the capacity. In order not to out of the array bounds for the last byte(see * {@link ByteBufferArray#multiple}), we will allocate one additional buffer with capacity 0; * //from ww w .j a v a 2s . c o m * @param capacity * total size of the byte buffer array * @param directByteBuffer * true if we allocate direct buffer */ public ByteBufferArray(long capacity, boolean directByteBuffer) { this.bufferSize = DEFAULT_BUFFER_SIZE; if (this.bufferSize > (capacity / 16)) this.bufferSize = (int) roundUp(capacity / 16, 32768); this.bufferCount = (int) (roundUp(capacity, bufferSize) / bufferSize); LOG.info("Allocating buffers total=" + StringUtils.byteDesc(capacity) + " , sizePerBuffer=" + StringUtils.byteDesc(bufferSize) + ", count=" + bufferCount); buffers = new ByteBuffer[bufferCount + 1]; locks = new Lock[bufferCount + 1]; for (int i = 0; i <= bufferCount; i++) { locks[i] = new ReentrantLock(); if (i < bufferCount) { buffers[i] = directByteBuffer ? ByteBuffer.allocateDirect(bufferSize) : ByteBuffer.allocate(bufferSize); } else { buffers[i] = ByteBuffer.allocate(0); } } }
From source file:io.horizondb.io.files.DirectFileDataInput.java
/** * Creates a new <code>DirectFileDataInput</code> to read data from the specified file. * //from w ww . j av a 2 s.com * @param path the file path. * @param bufferSize the size of the buffer being used. */ public DirectFileDataInput(Path path, int bufferSize) throws IOException { notNull(path, "path parameter must not be null"); isTrue(bufferSize > 0, "the buffer size must be greater than zero"); this.channel = (FileChannel) Files.newByteChannel(path, StandardOpenOption.READ); this.buffer = ByteBuffer.allocateDirect(bufferSize); this.slice = Buffers.wrap(this.buffer); fillBuffer(); }
From source file:org.apache.hadoop.io.crypto.aes.AESDecryptor.java
/** * Creates a new compressor./* w ww .jav a2 s . c om*/ * * @param directBufferSize size of the direct buffer to be used. * @throws CryptoException */ public AESDecryptor(CryptoContext cryptoContext, int directBufferSize) throws CryptoException { this.cryptoContext = cryptoContext; this.directBufferSize = directBufferSize; if (cryptoContext != null) this.key = cryptoContext.getKey(); compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize - AESConstants.AES_BLOCK_SIZE); uncompressedDirectBuf.position(directBufferSize - AESConstants.AES_BLOCK_SIZE); }
From source file:org.apache.hadoop.io.crypto.aes.AESEncryptor.java
/** * Creates a new compressor.//from w ww . j a v a2s . c o m * * @param directBufferSize size of the direct buffer to be used. * @throws Exception */ public AESEncryptor(CryptoContext cryptoContext, int directBufferSize) { this.cryptoContext = cryptoContext; this.directBufferSize = directBufferSize; if (cryptoContext != null) this.key = cryptoContext.getKey(); uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize - AESConstants.AES_BLOCK_SIZE); compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); compressedDirectBuf.position(directBufferSize); }
From source file:org.apache.hadoop.io.compress.lz4.Lz4Compressor.java
/** * Creates a new compressor.//from w w w . j a v a 2 s . co m * * @param directBufferSize size of the direct buffer to be used. * @param useLz4HC use high compression ratio version of lz4, * which trades CPU for compression ratio. */ public Lz4Compressor(int directBufferSize, boolean useLz4HC) { this.useLz4HC = useLz4HC; this.directBufferSize = directBufferSize; uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); compressedDirectBuf.position(directBufferSize); }