List of usage examples for java.nio ByteBuffer arrayOffset
public final int arrayOffset()
From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java
private int writePrimitives(short[] primitives, ByteBuffer output) throws IOException { int bytesWritten = Snappy.rawCompress(primitives, 0, primitives.length * Short.SIZE / Byte.SIZE, output.array(), output.arrayOffset() + output.position()); output.position(output.position() + bytesWritten); return bytesWritten; }
From source file:cn.iie.haiep.hbase.value.Bytes.java
/** * Returns a new byte array, copied from the passed ByteBuffer. * @param bb A ByteBuffer//ww w .j a v a2 s .c om * @return the byte array */ public static byte[] toBytes(ByteBuffer bb) { int length = bb.limit(); byte[] result = new byte[length]; System.arraycopy(bb.array(), bb.arrayOffset(), result, 0, length); return result; }
From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java
private int writePrimitives(double[] primitives, ByteBuffer output) throws IOException { int bytesWritten = Snappy.rawCompress(primitives, 0, primitives.length * Double.SIZE / Byte.SIZE, output.array(), output.arrayOffset() + output.position()); output.position(output.position() + bytesWritten); return bytesWritten; }
From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java
private int writePrimitives(int[] primitives, ByteBuffer output) throws IOException { int bytesWritten = Snappy.rawCompress(primitives, 0, primitives.length * Integer.SIZE / Byte.SIZE, output.array(), output.arrayOffset() + output.position()); output.position(output.position() + bytesWritten); return bytesWritten; }
From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex.java
private void assertArrayEqualsBuffer(String msgPrefix, byte[] arr, ByteBuffer buf) { assertEquals(//from www .java 2 s . c o m msgPrefix + ": expected " + Bytes.toStringBinary(arr) + ", actual " + Bytes.toStringBinary(buf), 0, Bytes.compareTo(arr, 0, arr.length, buf.array(), buf.arrayOffset(), buf.limit())); }
From source file:org.apache.hadoop.hbase.io.hfile.HFileReaderV1.java
@Override public DataInput getGeneralBloomFilterMetadata() throws IOException { // Shouldn't cache Bloom filter blocks, otherwise server would abort when // splitting, see HBASE-6479 ByteBuffer buf = getMetaBlock(HFileWriterV1.BLOOM_FILTER_META_KEY, false); if (buf == null) return null; ByteArrayInputStream bais = new ByteArrayInputStream(buf.array(), buf.arrayOffset(), buf.limit()); return new DataInputStream(bais); }
From source file:org.apache.hadoop.hive.serde2.thrift.TBinarySortableProtocol.java
@Override public void writeBinary(ByteBuffer bin) throws TException { if (bin == null) { writeRawBytes(nullByte, 0, 1);// w w w. jav a 2 s. com return; } int length = bin.limit() - bin.position() - bin.arrayOffset(); if (bin.hasArray()) { writeBinary(bin.array(), bin.arrayOffset() + bin.position(), length); } else { byte[] copy = new byte[length]; bin.get(copy); writeBinary(copy); } }
From source file:org.apache.kylin.engine.spark.SparkCubing.java
private void writeToHFile2(final JavaPairRDD<byte[], byte[]> javaPairRDD, final String[] dataTypes, final int measureSize, final MeasureAggregators aggs, final byte[][] splitKeys, final Configuration conf, final String hFileLocation) { javaPairRDD.repartitionAndSortWithinPartitions(new Partitioner() { @Override//from ww w .java 2s . co m public int numPartitions() { return splitKeys.length + 1; } @Override public int getPartition(Object key) { Preconditions.checkArgument(key instanceof byte[]); for (int i = 0, n = splitKeys.length; i < n; ++i) { if (UnsignedBytes.lexicographicalComparator().compare((byte[]) key, splitKeys[i]) < 0) { return i; } } return splitKeys.length; } }, UnsignedBytes.lexicographicalComparator()) .mapPartitions(new FlatMapFunction<Iterator<Tuple2<byte[], byte[]>>, Tuple2<byte[], byte[]>>() { @Override public Iterable<Tuple2<byte[], byte[]>> call( final Iterator<Tuple2<byte[], byte[]>> tuple2Iterator) throws Exception { return new Iterable<Tuple2<byte[], byte[]>>() { final BufferedMeasureCodec codec = new BufferedMeasureCodec(dataTypes); final Object[] input = new Object[measureSize]; final Object[] result = new Object[measureSize]; @Override public Iterator<Tuple2<byte[], byte[]>> iterator() { return IteratorUtils.merge(tuple2Iterator, UnsignedBytes.lexicographicalComparator(), new Function<Iterable<byte[]>, byte[]>() { @Override public byte[] call(Iterable<byte[]> v1) throws Exception { final LinkedList<byte[]> list = Lists.newLinkedList(v1); if (list.size() == 1) { return list.get(0); } aggs.reset(); for (byte[] v : list) { codec.decode(ByteBuffer.wrap(v), input); aggs.aggregate(input); } aggs.collectStates(result); ByteBuffer buffer = codec.encode(result); byte[] bytes = new byte[buffer.position()]; System.arraycopy(buffer.array(), buffer.arrayOffset(), bytes, 0, buffer.position()); return bytes; } }); } }; } }, true).mapToPair(new PairFunction<Tuple2<byte[], byte[]>, ImmutableBytesWritable, KeyValue>() { @Override public Tuple2<ImmutableBytesWritable, KeyValue> call(Tuple2<byte[], byte[]> tuple2) throws Exception { ImmutableBytesWritable key = new ImmutableBytesWritable(tuple2._1()); KeyValue value = new KeyValue(tuple2._1(), "F1".getBytes(), "M".getBytes(), tuple2._2()); return new Tuple2(key, value); } }).saveAsNewAPIHadoopFile(hFileLocation, ImmutableBytesWritable.class, KeyValue.class, HFileOutputFormat.class, conf); }
From source file:org.apache.cassandra.hadoop.pig.CassandraStorage.java
@Override public Tuple getNext() throws IOException { try {//from ww w. j av a 2 s. c o m // load the next pair if (!reader.nextKeyValue()) return null; CfDef cfDef = getCfDef(); ByteBuffer key = (ByteBuffer) reader.getCurrentKey(); SortedMap<ByteBuffer, IColumn> cf = (SortedMap<ByteBuffer, IColumn>) reader.getCurrentValue(); assert key != null && cf != null; // and wrap it in a tuple Tuple tuple = TupleFactory.getInstance().newTuple(2); ArrayList<Tuple> columns = new ArrayList<Tuple>(); tuple.set(0, new DataByteArray(key.array(), key.position() + key.arrayOffset(), key.limit() + key.arrayOffset())); for (Map.Entry<ByteBuffer, IColumn> entry : cf.entrySet()) { columns.add(columnToTuple(entry.getKey(), entry.getValue(), cfDef)); } tuple.set(1, new DefaultDataBag(columns)); return tuple; } catch (InterruptedException e) { throw new IOException(e.getMessage()); } }
From source file:org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2.java
private void writeDataAndReadFromHFile(Path hfilePath, Algorithm compressAlgo, int entryCount, boolean findMidKey) throws IOException { HFileContext context = new HFileContextBuilder().withBlockSize(4096).withCompression(compressAlgo).build(); HFileWriterV2 writer = (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, new CacheConfig(conf)) .withPath(fs, hfilePath).withFileContext(context).create(); Random rand = new Random(9713312); // Just a fixed seed. List<KeyValue> keyValues = new ArrayList<KeyValue>(entryCount); for (int i = 0; i < entryCount; ++i) { byte[] keyBytes = randomOrderedKey(rand, i); // A random-length random value. byte[] valueBytes = randomValue(rand); KeyValue keyValue = new KeyValue(keyBytes, null, null, valueBytes); writer.append(keyValue);//from w w w.j a v a2 s. c o m keyValues.add(keyValue); } // Add in an arbitrary order. They will be sorted lexicographically by // the key. writer.appendMetaBlock("CAPITAL_OF_USA", new Text("Washington, D.C.")); writer.appendMetaBlock("CAPITAL_OF_RUSSIA", new Text("Moscow")); writer.appendMetaBlock("CAPITAL_OF_FRANCE", new Text("Paris")); writer.close(); FSDataInputStream fsdis = fs.open(hfilePath); // A "manual" version of a new-format HFile reader. This unit test was // written before the V2 reader was fully implemented. long fileSize = fs.getFileStatus(hfilePath).getLen(); FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, fileSize); assertEquals(2, trailer.getMajorVersion()); assertEquals(entryCount, trailer.getEntryCount()); HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(false) .withIncludesTags(false).withCompression(compressAlgo).build(); HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(fsdis, fileSize, meta); // Comparator class name is stored in the trailer in version 2. KVComparator comparator = trailer.createComparator(); HFileBlockIndex.BlockIndexReader dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator, trailer.getNumDataIndexLevels()); HFileBlockIndex.BlockIndexReader metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader( KeyValue.RAW_COMPARATOR, 1); HFileBlock.BlockIterator blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), fileSize - trailer.getTrailerSize()); // Data index. We also read statistics about the block index written after // the root level. dataBlockIndexReader.readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); if (findMidKey) { byte[] midkey = dataBlockIndexReader.midkey(); assertNotNull("Midkey should not be null", midkey); } // Meta index. metaBlockIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).getByteStream(), trailer.getMetaIndexCount()); // File info FileInfo fileInfo = new FileInfo(); fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); byte[] keyValueFormatVersion = fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION); boolean includeMemstoreTS = keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0; // Counters for the number of key/value pairs and the number of blocks int entriesRead = 0; int blocksRead = 0; long memstoreTS = 0; // Scan blocks the way the reader would scan them fsdis.seek(0); long curBlockPos = 0; while (curBlockPos <= trailer.getLastDataBlockOffset()) { HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false); assertEquals(BlockType.DATA, block.getBlockType()); ByteBuffer buf = block.getBufferWithoutHeader(); while (buf.hasRemaining()) { int keyLen = buf.getInt(); int valueLen = buf.getInt(); byte[] key = new byte[keyLen]; buf.get(key); byte[] value = new byte[valueLen]; buf.get(value); if (includeMemstoreTS) { ByteArrayInputStream byte_input = new ByteArrayInputStream(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining()); DataInputStream data_input = new DataInputStream(byte_input); memstoreTS = WritableUtils.readVLong(data_input); buf.position(buf.position() + WritableUtils.getVIntSize(memstoreTS)); } // A brute-force check to see that all keys and values are correct. assertTrue(Bytes.compareTo(key, keyValues.get(entriesRead).getKey()) == 0); assertTrue(Bytes.compareTo(value, keyValues.get(entriesRead).getValue()) == 0); ++entriesRead; } ++blocksRead; curBlockPos += block.getOnDiskSizeWithHeader(); } LOG.info("Finished reading: entries=" + entriesRead + ", blocksRead=" + blocksRead); assertEquals(entryCount, entriesRead); // Meta blocks. We can scan until the load-on-open data offset (which is // the root block index offset in version 2) because we are not testing // intermediate-level index blocks here. int metaCounter = 0; while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) { LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " + trailer.getLoadOnOpenDataOffset()); HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false); assertEquals(BlockType.META, block.getBlockType()); Text t = new Text(); ByteBuffer buf = block.getBufferWithoutHeader(); if (Writables.getWritable(buf.array(), buf.arrayOffset(), buf.limit(), t) == null) { throw new IOException( "Failed to deserialize block " + this + " into a " + t.getClass().getSimpleName()); } Text expectedText = (metaCounter == 0 ? new Text("Paris") : metaCounter == 1 ? new Text("Moscow") : new Text("Washington, D.C.")); assertEquals(expectedText, t); LOG.info("Read meta block data: " + t); ++metaCounter; curBlockPos += block.getOnDiskSizeWithHeader(); } fsdis.close(); }