List of usage examples for java.nio ByteBuffer limit
public final int limit()
From source file:org.commoncrawl.util.TextBytes.java
/** * Set to contain the contents of a string. *//*from ww w .j a va2 s .c om*/ public void set(String string) { try { ByteBuffer bb = encode(string, true); set(bb.array(), 0, bb.limit()); cachedUTF8 = string; // zbytes = bb.array(); // length = bb.limit(); // offset = 0; } catch (CharacterCodingException e) { throw new RuntimeException("Should not have happened " + e.toString()); } }
From source file:com.spidertracks.datanucleus.convert.ByteConverterContext.java
/** * Allocate a byte buffer and convert the bytes with the given converter. * Performs a mark and a reset on the internal buffer before invoking the * write// w w w. j a v a2 s . c o m * * @param converter * @param value * @return */ private ByteBuffer convertToBytes(ByteConverter converter, Object value) { ByteBuffer buff = converter.writeBytes(value, null, this); if (buff != null) { buff.limit(); buff.reset(); } // System.out.println(String.format("Conversion Object -> Bytes >> value: %s ; hex: %s", value, new String(org.apache.commons.codec.binary.Hex.encodeHex(buff.array())))); return buff; }
From source file:ome.io.bioformats.BfPixelsWrapper.java
/** * cgb - stolen from ImportLibrary - slightly modified * * Examines a byte array to see if it needs to be byte swapped and modifies * the byte array directly.//from w w w .jav a2s .c o m * @param bytes The byte array to check and modify if required. * @return the <i>byteArray</i> either swapped or not for convenience. * @throws IOException if there is an error read from the file. * @throws FormatException if there is an error during metadata parsing. */ public byte[] swapIfRequired(byte[] bytes) throws FormatException, IOException { // We've got nothing to do if the samples are only 8-bits wide. if (pixelSize == 1) return bytes; boolean isLittleEndian = reader.isLittleEndian(); ByteBuffer buffer = ByteBuffer.wrap(bytes); int length; if (isLittleEndian) { if (pixelSize == 2) { // short/ushort ShortBuffer buf = buffer.asShortBuffer(); length = buffer.limit() / 2; for (int i = 0; i < length; i++) { buf.put(i, DataTools.swap(buf.get(i))); } } else if (pixelSize == 4) { // int/uint/float IntBuffer buf = buffer.asIntBuffer(); length = buffer.limit() / 4; for (int i = 0; i < length; i++) { buf.put(i, DataTools.swap(buf.get(i))); } } else if (pixelSize == 8) // long/double { LongBuffer buf = buffer.asLongBuffer(); length = buffer.limit() / 8; for (int i = 0; i < length; i++) { buf.put(i, DataTools.swap(buf.get(i))); } } else { throw new FormatException(String.format("Unsupported sample bit width: %d", pixelSize)); } } // We've got a big-endian file with a big-endian byte array. bytes = buffer.array(); return bytes; }
From source file:org.apache.hadoop.hdfs.hoss.db.FileStreamStore.java
/** * Write from buf to file// ww w . j ava 2 s . c o m * * @param offset * of block * @param buf * ByteBuffer to write * @return long offset where buffer begin was write or -1 if error */ public synchronized long write(final ByteBuffer buf) { if (!validState) throw new InvalidStateException(); final int packet_size = (HEADER_LEN + buf.limit() + FOOTER_LEN); // short + int + data + byte final boolean useDirectIO = (packet_size > (1 << bits)); try { if (useDirectIO) { LOG.warn("WARN: usingDirectIO packet size is greater (" + packet_size + ") than file buffer (" + bufOutput.capacity() + ")"); } // Align output if (alignBlocks && !useDirectIO) { final int diffOffset = nextBlockBoundary(offsetOutputUncommited); if (packet_size > diffOffset) { alignBuffer(diffOffset); offsetOutputUncommited += diffOffset; } } // Remember current offset final long offset = offsetOutputUncommited; // Write pending buffered data to disk if (bufOutput.remaining() < packet_size) { flushBuffer(); } // Write new data to buffer bufOutput.put((byte) ((MAGIC >> 8) & 0xFF)); // Header - Magic // (short, 2 bytes, msb-first) // Header - Magic (short, 2 bytes, lsb-last) bufOutput.put((byte) (MAGIC & 0xFF)); // Header - Data Size (int, 4 bytes) bufOutput.putInt(buf.limit()); if (useDirectIO) { bufOutput.flip(); // Write Header + Data + Footer fcOutput.write(new ByteBuffer[] { bufOutput, buf, ByteBuffer.wrap(new byte[] { MAGIC_FOOT }) }); bufOutput.clear(); offsetOutputUncommited = offsetOutputCommited = fcOutput.position(); if (syncOnFlush) { fcOutput.force(false); if (callback != null) callback.synched(offsetOutputCommited); } } else { bufOutput.put(buf); // Data Body bufOutput.put(MAGIC_FOOT); // Footer // Increment offset of buffered data (header + user-data) offsetOutputUncommited += packet_size; if (flushOnWrite) flushBuffer(); } return offset; } catch (Exception e) { LOG.error("Exception in write()", e); } return -1L; }
From source file:org.apache.cxf.transport.http.asyncclient.SharedOutputBuffer.java
public int write(ByteBuffer b) throws IOException { if (b == null) { return 0; }/*from ww w . j a v a2 s. com*/ this.lock.lock(); try { if (this.shutdown || this.endOfStream) { throw new IllegalStateException("Buffer already closed for writing"); } setInputMode(); if (!this.buffer.hasRemaining()) { flushContent(); setInputMode(); } int c = b.limit() - b.position(); largeWrapper = b; while (largeWrapper.hasRemaining()) { flushContent(); } largeWrapper = null; return c; } finally { this.lock.unlock(); } }
From source file:alluxio.shell.command.CpCommand.java
/** * Copies a file or directory specified by srcPath from the local filesystem to dstPath in the * Alluxio filesystem space.//w w w . j av a 2s . c o m * * @param srcPath the {@link AlluxioURI} of the source file in the local filesystem * @param dstPath the {@link AlluxioURI} of the destination * @throws AlluxioException when Alluxio exception occurs * @throws IOException when non-Alluxio exception occurs */ private void copyPath(AlluxioURI srcPath, AlluxioURI dstPath) throws AlluxioException, IOException { File src = new File(srcPath.getPath()); if (!src.isDirectory()) { // If the dstPath is a directory, then it should be updated to be the path of the file where // src will be copied to. if (mFileSystem.exists(dstPath) && mFileSystem.getStatus(dstPath).isFolder()) { dstPath = dstPath.join(src.getName()); } FileOutStream os = null; try (Closer closer = Closer.create()) { os = closer.register(mFileSystem.createFile(dstPath)); FileInputStream in = closer.register(new FileInputStream(src)); FileChannel channel = closer.register(in.getChannel()); ByteBuffer buf = ByteBuffer.allocate(8 * Constants.MB); while (channel.read(buf) != -1) { buf.flip(); os.write(buf.array(), 0, buf.limit()); } } catch (Exception e) { // Close the out stream and delete the file, so we don't have an incomplete file lying // around. if (os != null) { os.cancel(); if (mFileSystem.exists(dstPath)) { mFileSystem.delete(dstPath); } } throw e; } } else { mFileSystem.createDirectory(dstPath); List<String> errorMessages = new ArrayList<>(); File[] fileList = src.listFiles(); if (fileList == null) { String errMsg = String.format("Failed to list files for directory %s", src); errorMessages.add(errMsg); fileList = new File[0]; } int misFiles = 0; for (File srcFile : fileList) { AlluxioURI newURI = new AlluxioURI(dstPath, new AlluxioURI(srcFile.getName())); try { copyPath(new AlluxioURI(srcPath.getScheme(), srcPath.getAuthority(), srcFile.getPath()), newURI); } catch (IOException e) { errorMessages.add(e.getMessage()); if (!mFileSystem.exists(newURI)) { misFiles++; } } } if (errorMessages.size() != 0) { if (misFiles == fileList.length) { // If the directory doesn't exist and no files were created, then delete the directory if (mFileSystem.exists(dstPath)) { mFileSystem.delete(dstPath); } } throw new IOException(Joiner.on('\n').join(errorMessages)); } } }
From source file:org.apache.hadoop.hdfs.hoss.db.FileBlockStore.java
/** * Write from buf to file/*from w w w . jav a 2 s .com*/ * * @param index * of block * @param buf * ByteBuffer to write * @return true if write is OK */ public boolean set(final int index, final ByteBuffer buf) { if (!validState) throw new InvalidStateException(); if (LOG.isDebugEnabled()) LOG.debug("set(" + index + "," + buf + ")"); try { if (buf.limit() > blockSize) { LOG.error("ERROR: buffer.capacity=" + buf.limit() + " > blocksize=" + blockSize); } if (useMmap) { final MappedByteBuffer mbb = getMmapForIndex(index, true); if (mbb != null) { mbb.put(buf); return true; } // Callback to RAF } fileChannel.position(index * blockSize).write(buf); return true; } catch (Exception e) { LOG.error("Exception in set(" + index + ")", e); } return false; }
From source file:alluxio.cli.fs.command.CpCommand.java
/** * Copies a file or directory specified by srcPath from the local filesystem to dstPath in the * Alluxio filesystem space./*from w w w . j a v a 2 s .c om*/ * * @param srcPath the {@link AlluxioURI} of the source file in the local filesystem * @param dstPath the {@link AlluxioURI} of the destination */ private void copyPath(AlluxioURI srcPath, AlluxioURI dstPath) throws AlluxioException, IOException { File src = new File(srcPath.getPath()); if (!src.isDirectory()) { // If the dstPath is a directory, then it should be updated to be the path of the file where // src will be copied to. if (mFileSystem.exists(dstPath) && mFileSystem.getStatus(dstPath).isFolder()) { dstPath = dstPath.join(src.getName()); } FileOutStream os = null; try (Closer closer = Closer.create()) { FileWriteLocationPolicy locationPolicy; locationPolicy = CommonUtils.createNewClassInstance( Configuration.<FileWriteLocationPolicy>getClass( PropertyKey.USER_FILE_COPY_FROM_LOCAL_WRITE_LOCATION_POLICY), new Class[] {}, new Object[] {}); os = closer.register(mFileSystem.createFile(dstPath, CreateFileOptions.defaults().setLocationPolicy(locationPolicy))); FileInputStream in = closer.register(new FileInputStream(src)); FileChannel channel = closer.register(in.getChannel()); ByteBuffer buf = ByteBuffer.allocate(8 * Constants.MB); while (channel.read(buf) != -1) { buf.flip(); os.write(buf.array(), 0, buf.limit()); } } catch (Exception e) { // Close the out stream and delete the file, so we don't have an incomplete file lying // around. if (os != null) { os.cancel(); if (mFileSystem.exists(dstPath)) { mFileSystem.delete(dstPath); } } throw e; } } else { mFileSystem.createDirectory(dstPath); List<String> errorMessages = new ArrayList<>(); File[] fileList = src.listFiles(); if (fileList == null) { String errMsg = String.format("Failed to list files for directory %s", src); errorMessages.add(errMsg); fileList = new File[0]; } int misFiles = 0; for (File srcFile : fileList) { AlluxioURI newURI = new AlluxioURI(dstPath, new AlluxioURI(srcFile.getName())); try { copyPath(new AlluxioURI(srcPath.getScheme(), srcPath.getAuthority(), srcFile.getPath()), newURI); } catch (IOException e) { errorMessages.add(e.getMessage()); if (!mFileSystem.exists(newURI)) { misFiles++; } } } if (errorMessages.size() != 0) { if (misFiles == fileList.length) { // If the directory doesn't exist and no files were created, then delete the directory if (mFileSystem.exists(dstPath)) { mFileSystem.delete(dstPath); } } throw new IOException(Joiner.on('\n').join(errorMessages)); } } }
From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex.java
private void assertArrayEqualsBuffer(String msgPrefix, byte[] arr, ByteBuffer buf) { assertEquals(/*from w ww . j a v a2 s .com*/ msgPrefix + ": expected " + Bytes.toStringBinary(arr) + ", actual " + Bytes.toStringBinary(buf), 0, Bytes.compareTo(arr, 0, arr.length, buf.array(), buf.arrayOffset(), buf.limit())); }
From source file:dap4.dap4.Dap4Print.java
protected String valueString(Object value, DapType basetype) throws DataException { if (value == null) return "null"; AtomicType atype = basetype.getAtomicType(); boolean unsigned = atype.isUnsigned(); switch (atype) { case Int8:/* w w w . j av a2 s. c om*/ case UInt8: long lvalue = ((Byte) value).longValue(); if (unsigned) lvalue &= 0xFFL; return String.format("%d", lvalue); case Int16: case UInt16: lvalue = ((Short) value).longValue(); if (unsigned) lvalue &= 0xFFFFL; return String.format("%d", lvalue); case Int32: case UInt32: lvalue = ((Integer) value).longValue(); if (unsigned) lvalue &= 0xFFFFFFFFL; return String.format("%d", lvalue); case Int64: case UInt64: lvalue = ((Long) value).longValue(); if (unsigned) { BigInteger b = BigInteger.valueOf(lvalue); b = b.and(DapUtil.BIG_UMASK64); return b.toString(); } else return String.format("%d", lvalue); case Float32: return String.format("%f", ((Float) value).floatValue()); case Float64: return String.format("%f", ((Double) value).doubleValue()); case Char: return "'" + ((Character) value).toString() + "'"; case String: case URL: return "\"" + ((String) value) + "\""; case Opaque: ByteBuffer opaque = (ByteBuffer) value; String s = "0x"; for (int i = 0; i < opaque.limit(); i++) { byte b = opaque.get(i); char c = hexchar((b >> 4) & 0xF); s += c; c = hexchar((b) & 0xF); s += c; } return s; case Enum: return valueString(value, ((DapEnum) basetype).getBaseType()); default: break; } throw new DataException("Unknown type: " + basetype); }