List of usage examples for com.google.common.primitives Longs BYTES
int BYTES
To view the source code for com.google.common.primitives Longs BYTES.
Click Source Link
From source file:io.druid.query.aggregation.first.DoubleFirstAggregatorFactory.java
@Override public AggregatorFactory getCombiningFactory() { return new DoubleFirstAggregatorFactory(name, name) { @Override//ww w . ja v a2 s . co m public Aggregator factorize(ColumnSelectorFactory metricFactory) { final BaseObjectColumnValueSelector selector = metricFactory.makeColumnValueSelector(name); return new DoubleFirstAggregator(name, null, null) { @Override public void aggregate() { SerializablePair<Long, Double> pair = (SerializablePair<Long, Double>) selector.getObject(); if (pair.lhs < firstTime) { firstTime = pair.lhs; firstValue = pair.rhs; } } }; } @Override public BufferAggregator factorizeBuffered(ColumnSelectorFactory metricFactory) { final BaseObjectColumnValueSelector selector = metricFactory.makeColumnValueSelector(name); return new DoubleFirstBufferAggregator(null, null) { @Override public void aggregate(ByteBuffer buf, int position) { SerializablePair<Long, Double> pair = (SerializablePair<Long, Double>) selector.getObject(); long firstTime = buf.getLong(position); if (pair.lhs < firstTime) { buf.putLong(position, pair.lhs); buf.putDouble(position + Longs.BYTES, pair.rhs); } } @Override public void inspectRuntimeShape(RuntimeShapeInspector inspector) { inspector.visit("selector", selector); } }; } }; }
From source file:com.metamx.druid.index.v1.CompressedLongsIndexedSupplier.java
public static CompressedLongsIndexedSupplier fromLongBuffer(LongBuffer buffer, final ByteOrder byteOrder) { return fromLongBuffer(buffer, 0xFFFF / Longs.BYTES, byteOrder); }
From source file:org.commoncrawl.util.ByteArrayUtils.java
/** * Lexicographically compare two arrays. * * @param buffer1 left operand/*from w ww .jav a 2s.c om*/ * @param buffer2 right operand * @param offset1 Where to start comparing in the left buffer * @param offset2 Where to start comparing in the right buffer * @param length1 How much to compare from the left buffer * @param length2 How much to compare from the right buffer * @return 0 if equal, < 0 if left is less than right, etc. */ public static int compareBytes(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2, int length2) { // Short circuit equal case if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) { return 0; } int minLength = Math.min(length1, length2); int minWords = minLength / Longs.BYTES; int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET; int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET; /* * Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a * time is no slower than comparing 4 bytes at a time even on 32-bit. * On the other hand, it is substantially faster on 64-bit. */ for (int i = 0; i < minWords * Longs.BYTES; i += Longs.BYTES) { long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i); long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i); long diff = lw ^ rw; if (diff != 0) { if (!littleEndian) { return lessThanUnsigned(lw, rw) ? -1 : 1; } // Use binary search int n = 0; int y; int x = (int) diff; if (x == 0) { x = (int) (diff >>> 32); n = 32; } y = x << 16; if (y == 0) { n += 16; } else { x = y; } y = x << 8; if (y == 0) { n += 8; } return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL)); } } // The epilogue to cover the last (minLength % 8) elements. for (int i = minWords * Longs.BYTES; i < minLength; i++) { int result = UnsignedBytes.compare(buffer1[offset1 + i], buffer2[offset2 + i]); if (result != 0) { return result; } } return length1 - length2; }
From source file:io.druid.query.aggregation.variance.VarianceAggregatorCollector.java
public ByteBuffer toByteBuffer() { return ByteBuffer.allocate(Longs.BYTES + Doubles.BYTES + Doubles.BYTES).putLong(count).putDouble(sum) .putDouble(nvariance); }
From source file:com.metamx.druid.index.v1.CompressedLongsIndexedSupplier.java
public static CompressedLongsIndexedSupplier fromLongBuffer(final LongBuffer buffer, final int chunkFactor, final ByteOrder byteOrder) { Preconditions.checkArgument(chunkFactor * Longs.BYTES <= 0xffff, "Chunks must be <= 64k bytes. chunkFactor was[%s]", chunkFactor); return new CompressedLongsIndexedSupplier(buffer.remaining(), chunkFactor, GenericIndexed.fromIterable(new Iterable<ResourceHolder<LongBuffer>>() { @Override/*from ww w.jav a 2 s .c o m*/ public Iterator<ResourceHolder<LongBuffer>> iterator() { return new Iterator<ResourceHolder<LongBuffer>>() { LongBuffer myBuffer = buffer.asReadOnlyBuffer(); @Override public boolean hasNext() { return myBuffer.hasRemaining(); } @Override public ResourceHolder<LongBuffer> next() { LongBuffer retVal = myBuffer.asReadOnlyBuffer(); if (chunkFactor < myBuffer.remaining()) { retVal.limit(retVal.position() + chunkFactor); } myBuffer.position(myBuffer.position() + retVal.remaining()); return StupidResourceHolder.create(retVal); } @Override public void remove() { throw new UnsupportedOperationException(); } }; } }, CompressedLongBufferObjectStrategy.getBufferForOrder(byteOrder))); }
From source file:io.druid.query.metadata.SegmentAnalyzer.java
public ColumnAnalysis analyzeLongColumn(Column column, EnumSet<SegmentMetadataQuery.AnalysisType> analysisTypes) { return lengthBasedAnalysis(column, Longs.BYTES, analysisTypes); }
From source file:co.cask.cdap.internal.io.ReflectionReader.java
protected Object doRead(FROM source, Schema sourceSchema, Schema targetSchema, TypeToken<?> targetTypeToken) throws IOException { Schema.Type sourceType = sourceSchema.getType(); Schema.Type targetType = targetSchema.getType(); switch (sourceType) { case NULL:/* w w w . ja v a 2 s . com*/ check(sourceType == targetType, "Fails to resolve %s to %s", sourceType, targetType); return readNull(source); case BYTES: check(sourceType == targetType, "Fails to resolve %s to %s", sourceType, targetType); ByteBuffer buffer = readBytes(source); if (targetTypeToken.getRawType().equals(byte[].class)) { if (buffer.hasArray()) { byte[] array = buffer.array(); if (buffer.remaining() == array.length) { return array; } byte[] bytes = new byte[buffer.remaining()]; System.arraycopy(array, buffer.arrayOffset() + buffer.position(), bytes, 0, buffer.remaining()); return bytes; } else { byte[] bytes = new byte[buffer.remaining()]; buffer.get(bytes); return bytes; } } else if (targetTypeToken.getRawType().equals(UUID.class) && buffer.remaining() == Longs.BYTES * 2) { return new UUID(buffer.getLong(), buffer.getLong()); } return buffer; case ENUM: check(sourceType == targetType, "Fails to resolve %s to %s", sourceType, targetType); return readEnum(source, sourceSchema, targetSchema, targetTypeToken); case ARRAY: check(sourceType == targetType, "Fails to resolve %s to %s", sourceType, targetType); return readArray(source, sourceSchema, targetSchema, targetTypeToken); case MAP: check(sourceType == targetType, "Fails to resolve %s to %s", sourceType, targetType); return readMap(source, sourceSchema, targetSchema, targetTypeToken); case RECORD: check(sourceType == targetType, "Fails to resolve %s to %s", sourceType, targetType); return readRecord(source, sourceSchema, targetSchema, targetTypeToken); case UNION: return readUnion(source, sourceSchema, targetSchema, targetTypeToken); } // For simple type other than NULL and BYTES if (sourceType.isSimpleType()) { return resolveType(source, sourceType, targetType, targetTypeToken); } throw new IOException(String.format("Fails to resolve %s to %s", sourceSchema, targetSchema)); }
From source file:co.cask.common.internal.io.ReflectionDatumReader.java
private Object readBytes(Decoder decoder, TypeToken<?> targetTypeToken) throws IOException { ByteBuffer buffer = decoder.readBytes(); if (targetTypeToken.getRawType().equals(byte[].class)) { if (buffer.hasArray()) { byte[] array = buffer.array(); if (buffer.remaining() == array.length) { return array; }/*from w ww . j av a 2 s. c o m*/ byte[] bytes = new byte[buffer.remaining()]; System.arraycopy(array, buffer.arrayOffset() + buffer.position(), bytes, 0, buffer.remaining()); return bytes; } else { byte[] bytes = new byte[buffer.remaining()]; buffer.get(bytes); return bytes; } } else if (targetTypeToken.getRawType().equals(UUID.class) && buffer.remaining() == Longs.BYTES * 2) { return new UUID(buffer.getLong(), buffer.getLong()); } return buffer; }
From source file:co.cask.tigon.data.transaction.queue.hbase.HBaseQueueAdmin.java
/** * Returns the column qualifier for the consumer state column. The qualifier is formed by * {@code <groupId><instanceId>}. * @param groupId Group ID of the consumer * @param instanceId Instance ID of the consumer * @return A new byte[] which is the column qualifier. *///from w w w . j a v a2s . com public static byte[] getConsumerStateColumn(long groupId, int instanceId) { byte[] column = new byte[Longs.BYTES + Ints.BYTES]; Bytes.putLong(column, 0, groupId); Bytes.putInt(column, Longs.BYTES, instanceId); return column; }
From source file:co.cask.cdap.data.stream.StreamDataFileIndex.java
private Map.Entry<LongList, LongList> loadIndex(InputStream input) throws IOException { byte[] magic = new byte[INDEX_MAGIC_HEADER.length]; ByteStreams.readFully(input, magic); if (!Arrays.equals(magic, INDEX_MAGIC_HEADER)) { throw new IOException("Unsupported index file format. Expected magic bytes as 'I' '1'"); }/*from w w w.j ava 2 s.c o m*/ // Decode the properties map. In current version, it is not used. StreamUtils.decodeMap(new BinaryDecoder(input)); // Read in all index (timestamp, position pairs). LongList timestamps = new LongArrayList(1000); LongList positions = new LongArrayList(1000); byte[] buf = new byte[Longs.BYTES * 2]; while (ByteStreams.read(input, buf, 0, buf.length) == buf.length) { timestamps.add(Bytes.toLong(buf, 0)); positions.add(Bytes.toLong(buf, Longs.BYTES)); } return Maps.immutableEntry(timestamps, positions); }