List of usage examples for java.nio ByteBuffer limit
public final int limit()
From source file:org.apache.hadoop.hive.serde2.thrift.TBinarySortableProtocol.java
@Override public void writeBinary(ByteBuffer bin) throws TException { if (bin == null) { writeRawBytes(nullByte, 0, 1);/*from ww w .j a va 2 s.c o m*/ return; } int length = bin.limit() - bin.position() - bin.arrayOffset(); if (bin.hasArray()) { writeBinary(bin.array(), bin.arrayOffset() + bin.position(), length); } else { byte[] copy = new byte[length]; bin.get(copy); writeBinary(copy); } }
From source file:org.sglover.checksum.ChecksumServiceImpl.java
@Override public NodeChecksums getChecksums(final Node node, final InputStream in) { final String nodeId = node.getNodeId(); final Long nodeVersion = node.getNodeVersion(); final Long nodeInternalId = node.getNodeInternalId(); final String versionLabel = node.getVersionLabel(); int x = 0;/*from ww w. ja v a2s. c om*/ NodeChecksums documentChecksums = new NodeChecksums(nodeId, nodeInternalId, nodeVersion, versionLabel, blockSize); try (ReadableByteChannel fc = getChannel(in)) { ByteBuffer data = ByteBuffer.allocate(blockSize * 20); int bytesRead = -1; int blockNum = 1; // starts at 1 do { bytesRead = fc.read(data); if (bytesRead > 0) { x += bytesRead; data.flip(); long numBlocks = data.limit() / blockSize + (data.limit() % blockSize > 0 ? 1 : 0); // spin through the data and create checksums for each block for (int i = 0; i < numBlocks; i++) { int start = i * blockSize; int end = start + blockSize - 1; if (end >= data.limit()) { end = data.limit() - 1; } // calculate the adler32 checksum Adler32 adlerInfo = new Adler32(data, start, end, hasher); // calculate the full md5 checksum String md5sum = hasher.md5(data, start, end); Checksum checksum = new Checksum(blockNum, start, end, adlerInfo.getHash(), adlerInfo.getAdler32(), md5sum); if (blockNum < 2) { System.out.println(checksum); } documentChecksums.addChecksum(checksum); blockNum++; } data.clear(); } } while (bytesRead > 0); } catch (NoSuchAlgorithmException | IOException e) { throw new RuntimeException(e); } return documentChecksums; }
From source file:org.apache.hadoop.hbase.io.hfile.HFileReaderV1.java
@Override public DataInput getGeneralBloomFilterMetadata() throws IOException { // Shouldn't cache Bloom filter blocks, otherwise server would abort when // splitting, see HBASE-6479 ByteBuffer buf = getMetaBlock(HFileWriterV1.BLOOM_FILTER_META_KEY, false); if (buf == null) return null; ByteArrayInputStream bais = new ByteArrayInputStream(buf.array(), buf.arrayOffset(), buf.limit()); return new DataInputStream(bais); }
From source file:org.dbmfs.DatabaseFilesystem.java
public int write(String path, Object fh, boolean isWritepage, ByteBuffer buf, long offset) throws FuseException { log.info("write path:" + path + " offset:" + offset + " isWritepage:" + isWritepage + " buf.limit:" + buf.limit()); if (readOnlyMount) throw new FuseException("Read Only").initErrno(FuseException.EACCES); try {/*w w w . j a v a 2 s.c om*/ if (fh == null) return Errno.EBADE; // ????offset limit??? path = DbmfsUtil.convertRealPath(path.trim()); synchronized (syncFileAccess[((path.hashCode() << 1) >>> 1) % syncFileAccess.length]) { if (bufferedSaveData.containsKey(fh)) { Map bufferedData = bufferedSaveData.get(fh); ByteArrayOutputStream bufferedByteData = (ByteArrayOutputStream) bufferedData .get(bufferedDataBodyKey); long bOffset = ((Long) bufferedData.get(bufferedDataOffset)).longValue(); if ((bOffset + bufferedByteData.size()) == offset) { byte[] nowWriteBytes = new byte[buf.limit()]; buf.get(nowWriteBytes); bufferedByteData.write(nowWriteBytes); return 0; } } else { Map bufferedData = new HashMap(); bufferedData.put("path", path); bufferedData.put("fh", fh); bufferedData.put("isWritepage", isWritepage); ByteArrayOutputStream bufferedByteData = new ByteArrayOutputStream(1024 * 1024 * 2); byte[] nowWriteBytes = new byte[buf.limit()]; buf.get(nowWriteBytes); bufferedByteData.write(nowWriteBytes); bufferedData.put(bufferedDataBodyKey, bufferedByteData); bufferedData.put(bufferedDataOffset, offset); this.bufferedSaveData.put(fh, bufferedData); return 0; } } } catch (Exception e) { throw new FuseException(e); } return 0; }
From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java
/** * Compress a set of columns./*from w ww. j ava 2s . c o m*/ * * The header contains a compressed array of data types. * The body contains compressed columns and their metadata. * The footer contains a compressed array of chunk sizes. The final four bytes of the footer encode the byte size of that compressed array. * * @param colSet * * @return ByteBuffer representing the compressed set. */ @Override public ByteBuffer compress(ColumnBuffer[] colSet) { // Many compression libraries allow you to avoid allocation of intermediate arrays. // To use these API, we need to preallocate the output container. // Reserve space for the header. int[] dataType = new int[colSet.length]; int maxCompressedSize = Snappy.maxCompressedLength(4 * dataType.length); // Reserve space for the compressed nulls BitSet for each column. maxCompressedSize += colSet.length * Snappy.maxCompressedLength((colSet.length / 8) + 1); // Track the length of `List<Integer> compressedSize` which will be declared later. int uncompressedFooterLength = 1 + 2 * colSet.length; for (int colNum = 0; colNum < colSet.length; ++colNum) { // Reserve space for the compressed columns. dataType[colNum] = colSet[colNum].getType().toTType().getValue(); switch (TTypeId.findByValue(dataType[colNum])) { case BOOLEAN_TYPE: maxCompressedSize += Integer.SIZE / Byte.SIZE; // This is for the encoded length. maxCompressedSize += Snappy.maxCompressedLength((colSet.length / 8) + 1); break; case TINYINT_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length); break; case SMALLINT_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Short.SIZE / Byte.SIZE); break; case INT_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE); break; case BIGINT_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Long.SIZE / Byte.SIZE); break; case DOUBLE_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Double.SIZE / Byte.SIZE); break; case BINARY_TYPE: // Reserve space for the size of the compressed array of row sizes. maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE); // Reserve space for the size of the compressed flattened bytes. for (ByteBuffer nextBuffer : colSet[colNum].toTColumn().getBinaryVal().getValues()) { maxCompressedSize += Snappy.maxCompressedLength(nextBuffer.limit()); } // Add an additional value to the list of compressed chunk sizes (length of `rowSize` array). uncompressedFooterLength++; break; case STRING_TYPE: // Reserve space for the size of the compressed array of row sizes. maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE); // Reserve space for the size of the compressed flattened bytes. for (String nextString : colSet[colNum].toTColumn().getStringVal().getValues()) { maxCompressedSize += Snappy .maxCompressedLength(nextString.getBytes(StandardCharsets.UTF_8).length); } // Add an additional value to the list of compressed chunk sizes (length of `rowSize` array). uncompressedFooterLength++; break; default: throw new IllegalStateException("Unrecognized column type"); } } // Reserve space for the footer. maxCompressedSize += Snappy.maxCompressedLength(uncompressedFooterLength * Integer.SIZE / Byte.SIZE); // Allocate the output container. ByteBuffer output = ByteBuffer.allocate(maxCompressedSize); // Allocate the footer. This goes in the footer because we don't know the chunk sizes until after // the columns have been compressed and written. ArrayList<Integer> compressedSize = new ArrayList<Integer>(uncompressedFooterLength); // Write to the output buffer. try { // Write the header. compressedSize.add(writePrimitives(dataType, output)); // Write the compressed columns and metadata. for (int colNum = 0; colNum < colSet.length; colNum++) { switch (TTypeId.findByValue(dataType[colNum])) { case BOOLEAN_TYPE: { TBoolColumn column = colSet[colNum].toTColumn().getBoolVal(); List<Boolean> bools = column.getValues(); BitSet bsBools = new BitSet(bools.size()); for (int rowNum = 0; rowNum < bools.size(); rowNum++) { bsBools.set(rowNum, bools.get(rowNum)); } compressedSize.add(writePrimitives(column.getNulls(), output)); // BitSet won't write trailing zeroes so we encode the length output.putInt(column.getValuesSize()); compressedSize.add(writePrimitives(bsBools.toByteArray(), output)); break; } case TINYINT_TYPE: { TByteColumn column = colSet[colNum].toTColumn().getByteVal(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedBytes(column.getValues(), output)); break; } case SMALLINT_TYPE: { TI16Column column = colSet[colNum].toTColumn().getI16Val(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedShorts(column.getValues(), output)); break; } case INT_TYPE: { TI32Column column = colSet[colNum].toTColumn().getI32Val(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedIntegers(column.getValues(), output)); break; } case BIGINT_TYPE: { TI64Column column = colSet[colNum].toTColumn().getI64Val(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedLongs(column.getValues(), output)); break; } case DOUBLE_TYPE: { TDoubleColumn column = colSet[colNum].toTColumn().getDoubleVal(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedDoubles(column.getValues(), output)); break; } case BINARY_TYPE: { TBinaryColumn column = colSet[colNum].toTColumn().getBinaryVal(); // Initialize the array of row sizes. int[] rowSizes = new int[column.getValuesSize()]; int totalSize = 0; for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) { rowSizes[rowNum] = column.getValues().get(rowNum).limit(); totalSize += column.getValues().get(rowNum).limit(); } // Flatten the data for Snappy for a better compression ratio. ByteBuffer flattenedData = ByteBuffer.allocate(totalSize); for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) { flattenedData.put(column.getValues().get(rowNum)); } // Write nulls bitmap. compressedSize.add(writePrimitives(column.getNulls(), output)); // Write the list of row sizes. compressedSize.add(writePrimitives(rowSizes, output)); // Write the compressed, flattened data. compressedSize.add(writePrimitives(flattenedData.array(), output)); break; } case STRING_TYPE: { TStringColumn column = colSet[colNum].toTColumn().getStringVal(); // Initialize the array of row sizes. int[] rowSizes = new int[column.getValuesSize()]; int totalSize = 0; for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) { rowSizes[rowNum] = column.getValues().get(rowNum).length(); totalSize += column.getValues().get(rowNum).length(); } // Flatten the data for Snappy for a better compression ratio. StringBuilder flattenedData = new StringBuilder(totalSize); for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) { flattenedData.append(column.getValues().get(rowNum)); } // Write nulls bitmap. compressedSize.add(writePrimitives(column.getNulls(), output)); // Write the list of row sizes. compressedSize.add(writePrimitives(rowSizes, output)); // Write the flattened data. compressedSize.add( writePrimitives(flattenedData.toString().getBytes(StandardCharsets.UTF_8), output)); break; } default: throw new IllegalStateException("Unrecognized column type"); } } // Write the footer. output.putInt(writeBoxedIntegers(compressedSize, output)); } catch (IOException e) { e.printStackTrace(); } output.flip(); return output; }
From source file:com.tinspx.util.io.ChannelSourceTest.java
@Test public void testByteBufferSource() throws IOException { int off = 443, len = 17167; ByteBuffer buf, direct; direct = ByteBuffer.allocateDirect(INPUT.length); assertTrue(direct.isDirect());/* ww w. j a v a 2 s .co m*/ direct.put(INPUT); byte[] sub = Arrays.copyOfRange(INPUT, off, off + len); //full input buf = ByteBuffer.wrap(INPUT); ByteSourceTests.testByteSource(ChannelSource.of(buf), INPUT); assertEquals(0, buf.position()); assertEquals(INPUT.length, buf.limit()); buf = ByteBuffer.wrap(INPUT).asReadOnlyBuffer(); ByteSourceTests.testByteSource(ChannelSource.of(buf), INPUT); assertEquals(0, buf.position()); assertEquals(INPUT.length, buf.limit()); direct.clear(); buf = direct; ByteSourceTests.testByteSource(ChannelSource.of(buf), INPUT); assertEquals(0, buf.position()); assertEquals(INPUT.length, buf.limit()); //sub range of input buf = ByteBuffer.wrap(INPUT); buf.clear().position(off).limit(off + len); ByteSourceTests.testByteSource(ChannelSource.of(buf), sub); assertEquals(off, buf.position()); assertEquals(off + len, buf.limit()); buf = ByteBuffer.wrap(INPUT).asReadOnlyBuffer(); buf.clear().position(off).limit(off + len); ByteSourceTests.testByteSource(ChannelSource.of(buf), sub); assertEquals(off, buf.position()); assertEquals(off + len, buf.limit()); direct.clear(); buf = direct; buf.clear().position(off).limit(off + len); ByteSourceTests.testByteSource(ChannelSource.of(buf), sub); assertEquals(off, buf.position()); assertEquals(off + len, buf.limit()); }
From source file:com.liferay.portal.util.FileImpl.java
public boolean isSameContent(File file, String s) { ByteBuffer byteBuffer = CharsetEncoderUtil.encode(StringPool.UTF8, s); return isSameContent(file, byteBuffer.array(), byteBuffer.limit()); }
From source file:com.inductiveautomation.xopc.drivers.modbus2.requests.WriteMultipleRegistersRequest.java
public WriteMultipleRegistersRequest(List<WriteItem> items, ChannelWriter channelWriter, ModbusTransport transport, boolean zeroBased, byte unitId, int timeout, Logger log, boolean swapWords, boolean rightJustifyStrings, boolean reverseStringByteOrder, CommunicationCallback communicationCallback) { super(items, channelWriter, transport, zeroBased, unitId, timeout, log, communicationCallback); this.swapWords = swapWords; this.rightJustifyStrings = rightJustifyStrings; this.reverseStringByteOrder = reverseStringByteOrder; RequestOffsets offsets = new RequestOffsets.Calculator(items).calculate(); // Multiply by two since each unit of length is one 16-bit register. ByteBuffer buffer = ByteBuffer.allocate(offsets.getLength() * 2); Iterator<? extends WriteItem> iter = items.iterator(); while (iter.hasNext()) { WriteItem item = iter.next();/*w w w . j a va2 s . c om*/ ModbusAddress address = (ModbusAddress) item.getAddressObject(); int offset = (address.getStartAddress() - offsets.getStartAddress()) * 2; buffer.position(offset); byte[] bs = getValue(item, address); buffer.put(bs); } short startAddress = (short) offsets.getStartAddress(); short quantity = (short) offsets.getLength(); byte byteCount = (byte) buffer.limit(); byte[] values = buffer.array(); if (zeroBased) { startAddress--; } request = new WriteMultipleRegisters.Request(startAddress, quantity, byteCount, values); }
From source file:org.apache.hc.client5.http.impl.auth.CredSspScheme.java
private String encodeBase64(final ByteBuffer buffer) { final int limit = buffer.limit(); final byte[] bytes = new byte[limit]; buffer.get(bytes);//from w w w . j a va 2s .com return new String(Base64.encodeBase64(bytes), StandardCharsets.US_ASCII); }
From source file:org.apache.cassandra.hadoop.pig.CassandraStorage.java
@Override public Tuple getNext() throws IOException { try {//from w ww . j av a 2s. c o m // load the next pair if (!reader.nextKeyValue()) return null; CfDef cfDef = getCfDef(); ByteBuffer key = (ByteBuffer) reader.getCurrentKey(); SortedMap<ByteBuffer, IColumn> cf = (SortedMap<ByteBuffer, IColumn>) reader.getCurrentValue(); assert key != null && cf != null; // and wrap it in a tuple Tuple tuple = TupleFactory.getInstance().newTuple(2); ArrayList<Tuple> columns = new ArrayList<Tuple>(); tuple.set(0, new DataByteArray(key.array(), key.position() + key.arrayOffset(), key.limit() + key.arrayOffset())); for (Map.Entry<ByteBuffer, IColumn> entry : cf.entrySet()) { columns.add(columnToTuple(entry.getKey(), entry.getValue(), cfDef)); } tuple.set(1, new DefaultDataBag(columns)); return tuple; } catch (InterruptedException e) { throw new IOException(e.getMessage()); } }