List of usage examples for java.nio ByteBuffer limit
public final int limit()
From source file:edu.umass.cs.utils.Util.java
/** * Transfer from src to dst without throwing exception if src.remaining() > * dst.remaining() but copying dst.remaining() bytes from src instead. *//* w w w . j av a2 s . c o m*/ public static ByteBuffer put(ByteBuffer dst, ByteBuffer src) { if (src.remaining() < dst.remaining()) return dst.put(src); int oldLimit = src.limit(); src.limit(src.position() + dst.remaining()); dst.put(src); src.limit(oldLimit); return dst; // byte[] buf = new byte[dst.remaining()]; // src.get(buf); // return dst.put(buf); }
From source file:com.glaf.core.util.ByteBufferUtils.java
public static void write(ByteBuffer buffer, DataOutput out) throws IOException { if (buffer.hasArray()) { out.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining()); } else {/*from www . j av a 2 s . c o m*/ for (int i = buffer.position(); i < buffer.limit(); i++) { out.writeByte(buffer.get(i)); } } }
From source file:com.aerohive.nms.engine.admin.task.licensemgr.license.processor2.PacketUtil.java
public static byte[] join(Header header, byte[] content) { ByteBuffer buf = ByteBuffer.allocate(8192); byte[] outBytes; if (content.length == 0) { outBytes = new byte[0]; } else {/*from w w w .ja v a 2 s .c o m*/ if (header.isSecretFlag()) { // encrypt data outBytes = encryptData(content); } else { outBytes = new byte[content.length]; System.arraycopy(content, 0, outBytes, 0, content.length); } } buf.put(header.getType()); buf.putInt(outBytes.length); buf.put(header.getProtocolVersion()); buf.put(header.isSecretFlag() ? CommConst.Secret_Flag_Yes : CommConst.Secret_Flag_No); buf.put(outBytes); buf.flip(); byte[] dst = new byte[buf.limit()]; buf.get(dst); return dst; }
From source file:com.glaf.core.util.ByteBufferUtils.java
/** * Transfer bytes from one ByteBuffer to another. This function acts as * System.arrayCopy() but for ByteBuffers. * //w w w .jav a 2 s . co m * @param src * the source ByteBuffer * @param srcPos * starting position in the source ByteBuffer * @param dst * the destination ByteBuffer * @param dstPos * starting position in the destination ByteBuffer * @param length * the number of bytes to copy */ public static void arrayCopy(ByteBuffer src, int srcPos, ByteBuffer dst, int dstPos, int length) { if (src.hasArray() && dst.hasArray()) { System.arraycopy(src.array(), src.arrayOffset() + srcPos, dst.array(), dst.arrayOffset() + dstPos, length); } else { if (src.limit() - srcPos < length || dst.limit() - dstPos < length) throw new IndexOutOfBoundsException(); for (int i = 0; i < length; i++) dst.put(dstPos++, src.get(srcPos++)); } }
From source file:edu.umn.cs.spatialHadoop.nasa.HDFRecordReader.java
/** * Recovers all missing entries using a two-dimensional interpolation technique. * @param values The dataset that need to be recovered * @param fillValue The marker that marks missing values * @param waterMask A bit-mask with <code>true</code> values in water areas * and <code>false</code> values for land areas. *///from w ww . j a va 2 s.c o m public static void recoverXYShorts(ByteBuffer values, short fillValue, BitArray waterMask) { // Resolution of the dataset which is the size of each of its two dimensions // e.g., 1200x1200, 2400x2400, or 4800x4800 int resolution = (int) Math.sqrt(values.limit() / 2); // This array stores all the runs of true (non-fill) values. The size is // always even where the two values point to the first and last positions // of the run, respectively ShortArray[] trueRuns = findTrueRuns(values, fillValue); // Now, scan the dataset column by column to recover missing values for (short col = 0; col < resolution; col++) { // Find runs of fillValues and recover all of them short row1 = 0; while (row1 < resolution) { // Skip as many true values as we can while (row1 < resolution && values.getShort(2 * (row1 * resolution + col)) != fillValue) row1++; // Now, row1 points to the first fillValue if (row1 == resolution) { // All entries in the column have true values. No processing needed continue; } short row2 = (short) (row1 + 1); // Skip as many fillValues as we can while (row2 < resolution && values.getShort(2 * (row2 * resolution + col)) == fillValue) row2++; // Now, row2 points to a true value // Offsets of the four true values to the (top, bottom, left, right) short[] offsetsToInterpolate = { -1, -1, -1, -1 }; short[] valuesToInterpolate = new short[4]; if (row1 > 0) { offsetsToInterpolate[0] = (short) (row1 - 1); valuesToInterpolate[0] = values.getShort(2 * (offsetsToInterpolate[0] * resolution + col)); } if (row2 < resolution) { offsetsToInterpolate[1] = row2; valuesToInterpolate[1] = values.getShort(2 * (offsetsToInterpolate[1] * resolution + col)); } for (int row = row1; row < row2; row++) { if (values.getShort(2 * (row * resolution + col)) == fillValue && !waterMask.get((row * resolution + col))) { // The point at (row, col) is on land and has a fill (empty) value // Find the position of the run in this row to find points to the left and right int position = -trueRuns[row].binarySearch(col) - 1; if (position > 0) { // There's a true value to the left offsetsToInterpolate[2] = trueRuns[row].get(position - 1); valuesToInterpolate[2] = values .getShort(2 * (row * resolution + offsetsToInterpolate[2])); } else { offsetsToInterpolate[2] = -1; } if (position < trueRuns[row].size()) { // There's a true value to the right offsetsToInterpolate[3] = trueRuns[row].get(position); valuesToInterpolate[3] = values .getShort(2 * (row * resolution + offsetsToInterpolate[3])); } else { offsetsToInterpolate[3] = -1; } short interpolatedValue = interpolatePoint(row, col, offsetsToInterpolate, valuesToInterpolate, fillValue); values.putShort(2 * (row * resolution + col), interpolatedValue); } } // Skip the current empty run and go to the next one row1 = row2; } } }
From source file:com.glaf.core.util.ByteBufferUtils.java
/** * Compare two ByteBuffer at specified offsets for length. Compares the non * equal bytes as unsigned.//from ww w.j a va 2 s . c o m * * @param bytes1 * First byte buffer to compare. * @param offset1 * Position to start the comparison at in the first array. * @param bytes2 * Second byte buffer to compare. * @param offset2 * Position to start the comparison at in the second array. * @param length * How many bytes to compare? * @return -1 if byte1 is less than byte2, 1 if byte2 is less than byte1 or * 0 if equal. */ public static int compareSubArrays(ByteBuffer bytes1, int offset1, ByteBuffer bytes2, int offset2, int length) { if (null == bytes1) { if (null == bytes2) return 0; else return -1; } if (null == bytes2) return 1; assert bytes1.limit() >= offset1 + length : "The first byte array isn't long enough for the specified offset and length."; assert bytes2.limit() >= offset2 + length : "The second byte array isn't long enough for the specified offset and length."; for (int i = 0; i < length; i++) { byte byte1 = bytes1.get(offset1 + i); byte byte2 = bytes2.get(offset2 + i); if (byte1 == byte2) continue; // compare non-equal bytes as unsigned return (byte1 & 0xFF) < (byte2 & 0xFF) ? -1 : 1; } return 0; }
From source file:com.blm.orc.ReaderImpl.java
/** * Ensure this is an ORC file to prevent users from trying to read text * files or RC files as ORC files.//from w w w . j av a 2 s .co m * @param in the file being read * @param path the filename for error messages * @param psLen the postscript length * @param buffer the tail of the file * @throws IOException */ static void ensureOrcFooter(FSDataInputStream in, Path path, int psLen, ByteBuffer buffer) throws IOException { int len = OrcFile.MAGIC.length(); if (psLen < len + 1) { throw new IOException("Malformed ORC file " + path + ". Invalid postscript length " + psLen); } int offset = buffer.arrayOffset() + buffer.position() + buffer.limit() - 1 - len; byte[] array = buffer.array(); // now look for the magic string at the end of the postscript. if (!Text.decode(array, offset, len).equals(OrcFile.MAGIC)) { // If it isn't there, this may be the 0.11.0 version of ORC. // Read the first 3 bytes of the file to check for the header in.seek(0); byte[] header = new byte[len]; in.readFully(header, 0, len); // if it isn't there, this isn't an ORC file if (!Text.decode(header, 0, len).equals(OrcFile.MAGIC)) { throw new IOException("Malformed ORC file " + path + ". Invalid postscript."); } } }
From source file:com.glaf.core.util.ByteBufferUtils.java
/** * from to /*from www . j ava 2s . c o m*/ * * @param fromBuffer * Buffer ? flush * @param toBuffer * Buffer ? fill * @return number of bytes moved */ public static int put(ByteBuffer fromBuffer, ByteBuffer toBuffer) { int put; int remaining = fromBuffer.remaining(); if (remaining > 0) { // if (remaining <= toBuffer.remaining()) { toBuffer.put(fromBuffer); put = remaining; // from fromBuffer.position(fromBuffer.limit()); } // heap buffer else if (fromBuffer.hasArray()) { put = toBuffer.remaining(); // ?? toBuffer.put(fromBuffer.array(), fromBuffer.arrayOffset() + fromBuffer.position(), put); fromBuffer.position(fromBuffer.position() + put); } // direct buffer else { // ?? put = toBuffer.remaining(); ByteBuffer slice = fromBuffer.slice(); slice.limit(put); toBuffer.put(slice); fromBuffer.position(fromBuffer.position() + put); } } else { put = 0; } return put; }
From source file:org.apache.hadoop.hive.ql.io.orc.ReaderImpl.java
/** * Ensure this is an ORC file to prevent users from trying to read text * files or RC files as ORC files./* w w w.j a va 2 s .c om*/ * @param in the file being read * @param path the filename for error messages * @param psLen the postscript length * @param buffer the tail of the file * @throws IOException */ static void ensureOrcFooter(FSDataInputStream in, Path path, int psLen, ByteBuffer buffer) throws IOException { int len = OrcFile.MAGIC.length(); if (psLen < len + 1) { throw new FileFormatException("Malformed ORC file " + path + ". Invalid postscript length " + psLen); } int offset = buffer.arrayOffset() + buffer.position() + buffer.limit() - 1 - len; byte[] array = buffer.array(); // now look for the magic string at the end of the postscript. if (!Text.decode(array, offset, len).equals(OrcFile.MAGIC)) { // If it isn't there, this may be the 0.11.0 version of ORC. // Read the first 3 bytes of the file to check for the header in.seek(0); byte[] header = new byte[len]; in.readFully(header, 0, len); // if it isn't there, this isn't an ORC file if (!Text.decode(header, 0, len).equals(OrcFile.MAGIC)) { throw new FileFormatException("Malformed ORC file " + path + ". Invalid postscript."); } } }
From source file:org.apache.kylin.engine.mr.common.CuboidStatsUtil.java
public static void writeCuboidStatistics(Configuration conf, Path outputPath, // Map<Long, HyperLogLogPlusCounter> cuboidHLLMap, int samplingPercentage, double mapperOverlapRatio) throws IOException { Path seqFilePath = new Path(outputPath, BatchConstants.CFG_STATISTICS_CUBOID_ESTIMATION_FILENAME); List<Long> allCuboids = new ArrayList<Long>(); allCuboids.addAll(cuboidHLLMap.keySet()); Collections.sort(allCuboids); ByteBuffer valueBuf = ByteBuffer.allocate(BufferedMeasureEncoder.DEFAULT_BUFFER_SIZE); SequenceFile.Writer writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(seqFilePath), SequenceFile.Writer.keyClass(LongWritable.class), SequenceFile.Writer.valueClass(BytesWritable.class)); try {//from w w w . j a v a2 s . co m // mapper overlap ratio at key -1 writer.append(new LongWritable(-1), new BytesWritable(Bytes.toBytes(mapperOverlapRatio))); // sampling percentage at key 0 writer.append(new LongWritable(0L), new BytesWritable(Bytes.toBytes(samplingPercentage))); for (long i : allCuboids) { valueBuf.clear(); cuboidHLLMap.get(i).writeRegisters(valueBuf); valueBuf.flip(); writer.append(new LongWritable(i), new BytesWritable(valueBuf.array(), valueBuf.limit())); } } finally { IOUtils.closeQuietly(writer); } }