Example usage for java.lang Long BYTES

List of usage examples for java.lang Long BYTES

Introduction

In this page you can find the example usage for java.lang Long BYTES.

Prototype

int BYTES

To view the source code for java.lang Long BYTES.

Click Source Link

Document

The number of bytes used to represent a long value in two's complement binary form.

Usage

From source file:org.springframework.kafka.listener.DeadLetterPublishingRecoverer.java

private void enhanceHeaders(RecordHeaders kafkaHeaders, ConsumerRecord<?, ?> record, Exception exception) {
    kafkaHeaders.add(/*  w w  w . ja v a2 s . com*/
            new RecordHeader(KafkaHeaders.DLT_ORIGINAL_TOPIC, record.topic().getBytes(StandardCharsets.UTF_8)));
    kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_PARTITION,
            ByteBuffer.allocate(Integer.BYTES).putInt(record.partition()).array()));
    kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_OFFSET,
            ByteBuffer.allocate(Long.BYTES).putLong(record.offset()).array()));
    kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP,
            ByteBuffer.allocate(Long.BYTES).putLong(record.timestamp()).array()));
    kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE,
            record.timestampType().toString().getBytes(StandardCharsets.UTF_8)));
    kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_EXCEPTION_FQCN,
            exception.getClass().getName().getBytes(StandardCharsets.UTF_8)));
    kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_EXCEPTION_MESSAGE,
            exception.getMessage().getBytes(StandardCharsets.UTF_8)));
    kafkaHeaders.add(new RecordHeader(KafkaHeaders.DLT_EXCEPTION_STACKTRACE,
            getStackTraceAsString(exception).getBytes(StandardCharsets.UTF_8)));
}

From source file:io.druid.segment.data.CompressedVSizeColumnarIntsSerializerTest.java

private void checkV2SerializedSizeAndData(int chunkSize) throws Exception {
    File tmpDirectory = FileUtils.getTempDirectory();
    FileSmoosher smoosher = new FileSmoosher(tmpDirectory);

    GenericIndexedWriter genericIndexed = GenericIndexedWriter.ofCompressedByteBuffers(segmentWriteOutMedium,
            "test", compressionStrategy, Long.BYTES * 10000);
    CompressedVSizeColumnarIntsSerializer writer = new CompressedVSizeColumnarIntsSerializer(
            segmentWriteOutMedium, vals.length > 0 ? Ints.max(vals) : 0, chunkSize, byteOrder,
            compressionStrategy, genericIndexed);
    writer.open();/* ww  w. j av a  2  s  .  co m*/
    for (int val : vals) {
        writer.addValue(val);
    }

    final SmooshedWriter channel = smoosher.addWithSmooshedWriter("test", writer.getSerializedSize());
    writer.writeTo(channel, smoosher);
    channel.close();
    smoosher.close();

    SmooshedFileMapper mapper = Smoosh.map(tmpDirectory);

    CompressedVSizeColumnarIntsSupplier supplierFromByteBuffer = CompressedVSizeColumnarIntsSupplier
            .fromByteBuffer(mapper.mapFile("test"), byteOrder);

    ColumnarInts columnarInts = supplierFromByteBuffer.get();
    for (int i = 0; i < vals.length; ++i) {
        assertEquals(vals[i], columnarInts.get(i));
    }
    CloseQuietly.close(columnarInts);
    mapper.close();
}

From source file:org.apache.druid.segment.data.CompressedVSizeColumnarIntsSerializerTest.java

private void checkV2SerializedSizeAndData(int chunkSize) throws Exception {
    File tmpDirectory = temporaryFolder.newFolder();
    FileSmoosher smoosher = new FileSmoosher(tmpDirectory);

    GenericIndexedWriter genericIndexed = GenericIndexedWriter.ofCompressedByteBuffers(segmentWriteOutMedium,
            "test", compressionStrategy, Long.BYTES * 10000);
    CompressedVSizeColumnarIntsSerializer writer = new CompressedVSizeColumnarIntsSerializer(
            segmentWriteOutMedium, vals.length > 0 ? Ints.max(vals) : 0, chunkSize, byteOrder,
            compressionStrategy, genericIndexed);
    writer.open();//from ww w. ja  v a 2  s  . c o m
    for (int val : vals) {
        writer.addValue(val);
    }

    final SmooshedWriter channel = smoosher.addWithSmooshedWriter("test", writer.getSerializedSize());
    writer.writeTo(channel, smoosher);
    channel.close();
    smoosher.close();

    SmooshedFileMapper mapper = Smoosh.map(tmpDirectory);

    CompressedVSizeColumnarIntsSupplier supplierFromByteBuffer = CompressedVSizeColumnarIntsSupplier
            .fromByteBuffer(mapper.mapFile("test"), byteOrder);

    ColumnarInts columnarInts = supplierFromByteBuffer.get();
    for (int i = 0; i < vals.length; ++i) {
        assertEquals(vals[i], columnarInts.get(i));
    }
    CloseQuietly.close(columnarInts);
    mapper.close();
}

From source file:io.druid.segment.data.CompressedColumnarIntsSerializerTest.java

private void checkV2SerializedSizeAndData(int chunkFactor) throws Exception {
    File tmpDirectory = Files
            .createTempDirectory(StringUtils.format("CompressedIntsIndexedWriterTest_%d", chunkFactor))
            .toFile();/*from  w  w  w. j a v a 2 s.  c o m*/

    FileSmoosher smoosher = new FileSmoosher(tmpDirectory);

    CompressedColumnarIntsSerializer writer = new CompressedColumnarIntsSerializer(segmentWriteOutMedium,
            chunkFactor, byteOrder, compressionStrategy, GenericIndexedWriter.ofCompressedByteBuffers(
                    segmentWriteOutMedium, "test", compressionStrategy, Long.BYTES * 10000));

    writer.open();
    for (int val : vals) {
        writer.addValue(val);
    }
    final SmooshedWriter channel = smoosher.addWithSmooshedWriter("test", writer.getSerializedSize());
    writer.writeTo(channel, smoosher);
    channel.close();
    smoosher.close();

    SmooshedFileMapper mapper = Smoosh.map(tmpDirectory);

    // read from ByteBuffer and check values
    CompressedColumnarIntsSupplier supplierFromByteBuffer = CompressedColumnarIntsSupplier
            .fromByteBuffer(mapper.mapFile("test"), byteOrder);
    ColumnarInts columnarInts = supplierFromByteBuffer.get();
    assertEquals(vals.length, columnarInts.size());
    for (int i = 0; i < vals.length; ++i) {
        assertEquals(vals[i], columnarInts.get(i));
    }
    CloseQuietly.close(columnarInts);
    mapper.close();
}

From source file:de.micromata.genome.logging.spi.ifiles.IndexHeader.java

/**
 * //from  www  . j  av  a 2  s.  c  o  m
 * @param start when to start
 * @param end when to end
 * @param mem the buffer in the memory
 * @param filesize the size of the file
 * @return startoffset, endidx
 */
public List<Pair<Integer, Integer>> getCandiates(Timestamp start, Timestamp end, MappedByteBuffer mem,
        int filesize) {
    List<Pair<Integer, Integer>> ret = new ArrayList<>();

    int pos = filesize - ROW_LENGTH;
    while (pos >= 0) {
        long logt = mem.getLong(pos);
        if (start != null && start.getTime() > logt) {
            continue;
        }
        if (end != null && end.getTime() < logt) {
            break;
        }
        int offset = mem.getInt(pos + Long.BYTES);
        int endOfset = -1;
        if (pos + ROW_LENGTH + Integer.BYTES < filesize) {
            endOfset = mem.getInt(pos + Long.BYTES + ROW_LENGTH);
        }
        if (offset == endOfset) {
            // oops
            System.out.println("Oops");
        }
        ret.add(Pair.make(offset, endOfset));
        pos -= ROW_LENGTH;
    }
    return ret;
}

From source file:de.micromata.genome.logging.spi.ifiles.IndexDirectory.java

public List<String> getLogFileCandiates(Timestamp start, Timestamp end) {
    TreeMap<Long, String> files = new TreeMap<>();

    //    int offset = HEADER_SIZE;
    int max = getWritePos() - ROW_SIZE;
    int offset = max;

    while (offset >= HEADER_SIZE) {
        long st = indexByteBuffer.getLong(offset);
        if (start != null) {
            if (st < start.getTime()) {
                offset -= ROW_SIZE;//from   w  w w .  j  a va2s  . co  m
                continue;
            }
        }
        if (end != null) {
            long et = indexByteBuffer.getLong(offset + Long.BYTES);
            if (et > end.getTime()) {
                offset -= ROW_SIZE;
                continue;
            }
        }
        indexByteBuffer.position(offset + Long.BYTES + Long.BYTES);
        byte[] nameBuffer = new byte[LOG_FILE_NAME_SIZE];
        indexByteBuffer.get(nameBuffer);
        String trimmed = new String(nameBuffer).trim();
        files.put(st, trimmed);
        offset -= ROW_SIZE;
    }
    List<String> ret = new ArrayList<>(files.values());
    return ret;
}

From source file:org.apache.bookkeeper.statelib.impl.kv.RocksdbKVStore.java

@Override
public synchronized void checkpoint() {
    log.info("Checkpoint local state store {} at revision {}", name, getLastRevision());
    byte[] checkpointAtRevisionBytes = new byte[Long.BYTES];
    System.arraycopy(lastRevisionBytes, 0, checkpointAtRevisionBytes, 0, checkpointAtRevisionBytes.length);
    checkpointScheduler.submit(() -> {
        try {//  w  w w .j  ava2 s .  c om
            // TODO: move create checkpoint to the checkpoint method
            checkpointer.checkpointAtTxid(checkpointAtRevisionBytes);
        } catch (StateStoreException e) {
            log.error("Failed to checkpoint state store {} at revision {}", name,
                    Bytes.toLong(checkpointAtRevisionBytes, 0), e);
        }
    });
}

From source file:io.pravega.controller.store.stream.tables.HistoryRecord.java

private void toBytePartial(byte[] b) {
    // length//from  ww w.  j a  v  a2s . c  o  m
    BitConverter.writeInt(b, lengthOffset(), length);
    // epoch
    BitConverter.writeInt(b, epochOffset(), epoch);
    // segments
    for (int i = 0; i < segments.size(); i++) {
        BitConverter.writeInt(b, segmentOffset() + i * Integer.BYTES, segments.get(i));
    }
    // start offset
    BitConverter.writeInt(b, offsetOffset(segments.size()), length - Long.BYTES - Integer.BYTES);
}

From source file:io.pravega.controller.store.stream.tables.HistoryRecord.java

private void remainingByteArray(byte[] b, int start) {
    BitConverter.writeLong(b, start, scaleTime);
    BitConverter.writeInt(b, start + Long.BYTES, length);
}

From source file:io.pravega.controller.store.stream.tables.HistoryRecord.java

private static int tailLengthOffset(int segmentCount) {
    return scaleTimeOffset(segmentCount) + Long.BYTES;
}