Example usage for io.netty.buffer PooledByteBufAllocator DEFAULT

List of usage examples for io.netty.buffer PooledByteBufAllocator DEFAULT

Introduction

In this page you can find the example usage for io.netty.buffer PooledByteBufAllocator DEFAULT.

Prototype

PooledByteBufAllocator DEFAULT

To view the source code for io.netty.buffer PooledByteBufAllocator DEFAULT.

Click Source Link

Usage

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.OffloadIndexBlockImpl.java

License:Apache License

/**
 * Get the content of the index block as InputStream.
 * Read out in format:/*from w  ww . j a v  a  2 s .  com*/
 *   | index_magic_header | index_block_len | data_object_len | data_header_len |
 *   | index_entry_count  | segment_metadata_len | segment metadata | index entries... |
 */
@Override
public OffloadIndexBlock.IndexInputStream toStream() throws IOException {
    int indexEntryCount = this.indexEntries.size();
    byte[] ledgerMetadataByte = buildLedgerMetadataFormat(this.segmentMetadata);
    int segmentMetadataLength = ledgerMetadataByte.length;

    int indexBlockLength = 4 /* magic header */
            + 4 /* index block length */
            + 8 /* data object length */
            + 8 /* data header length */
            + 4 /* index entry count */
            + 4 /* segment metadata length */
            + segmentMetadataLength
            + indexEntryCount * (8 + 4 + 8); /* messageEntryId + blockPartId + blockOffset */

    ByteBuf out = PooledByteBufAllocator.DEFAULT.buffer(indexBlockLength, indexBlockLength);

    out.writeInt(INDEX_MAGIC_WORD).writeInt(indexBlockLength).writeLong(dataObjectLength)
            .writeLong(dataHeaderLength).writeInt(indexEntryCount).writeInt(segmentMetadataLength);
    // write metadata
    out.writeBytes(ledgerMetadataByte);

    // write entries
    this.indexEntries.entrySet().forEach(entry -> out.writeLong(entry.getValue().getEntryId())
            .writeInt(entry.getValue().getPartId()).writeLong(entry.getValue().getOffset()));

    return new OffloadIndexBlock.IndexInputStream(new ByteBufInputStream(out, true), indexBlockLength);
}

From source file:org.apache.bookkeeper.statelib.impl.kv.KVUtils.java

License:Apache License

static ByteBuf serialize(ByteBuf valBuf, long revision) {
    int serializedSize = valBuf.readableBytes() + Long.BYTES;
    ByteBuf buffer = PooledByteBufAllocator.DEFAULT.heapBuffer(serializedSize);
    buffer.writeLong(revision);/*from ww w .j  av  a  2 s . c  om*/
    buffer.writeBytes(valBuf);
    return buffer;
}

From source file:org.apache.bookkeeper.statelib.impl.kv.KVUtils.java

License:Apache License

static ByteBuf serialize(byte[] value, long revision) {
    int serializedSize = value.length + Long.BYTES;
    ByteBuf buffer = PooledByteBufAllocator.DEFAULT.heapBuffer(serializedSize);
    buffer.writeLong(revision);//from w  w  w  .  j  a  v a  2 s. c o  m
    buffer.writeBytes(value);
    return buffer;
}

From source file:org.apache.bookkeeper.statelib.impl.kv.KVUtils.java

License:Apache License

static ByteBuf newCommandBuf(Command cmd) throws IOException {
    ByteBuf buf = PooledByteBufAllocator.DEFAULT.buffer(cmd.getSerializedSize());
    try {/* w  w  w . j ava  2  s.c  o  m*/
        cmd.writeTo(new ByteBufOutputStream(buf));
    } catch (IOException e) {
        buf.release();
        throw e;
    }
    return buf;
}

From source file:org.apache.bookkeeper.statelib.impl.mvcc.MVCCStoreImpl.java

License:Apache License

private IncrementResult<K, V> increment(long revision, WriteBatch batch, IncrementOp<K, V> op) {
    // parameters
    final K key = op.key();
    final long amount = op.amount();

    // raw key//from  w ww  .j  ava2  s . c  o m
    final byte[] rawKey = keyCoder.encode(key);

    MVCCRecord record;
    try {
        record = getKeyRecord(key, rawKey);
    } catch (StateStoreRuntimeException e) {
        throw e;
    }

    // result
    final IncrementResultImpl<K, V> result = resultFactory.newIncrementResult(revision);
    try {
        long oldAmount = 0L;
        if (null != record) {
            // validate the update revision before applying the update to the record
            if (record.compareModRev(revision) >= 0) {
                result.code(Code.SMALLER_REVISION);
                return result;
            }
            if (ValueType.NUMBER != record.getValueType()) {
                result.code(Code.ILLEGAL_OP);
                return result;
            }
            record.setVersion(record.getVersion() + 1);
            oldAmount = record.getValue().getLong(0);
        } else {
            record = MVCCRecord.newRecord();
            record.setCreateRev(revision);
            record.setVersion(0L);
            record.setValue(PooledByteBufAllocator.DEFAULT.buffer(Long.BYTES), ValueType.NUMBER);
        }
        long newAmount = oldAmount + amount;
        record.getValue().writerIndex(0);
        record.getValue().writeLong(newAmount);
        record.setModRev(revision);

        // write the mvcc record back
        batch.put(dataCfHandle, rawKey, recordCoder.encode(record));

        // finalize the result
        result.code(Code.OK);
        if (op.option().getTotal()) {
            result.totalAmount(newAmount);
        }
        return result;
    } catch (RocksDBException rde) {
        result.close();
        throw new StateStoreRuntimeException(rde);
    } catch (StateStoreRuntimeException e) {
        result.close();
        throw e;
    } finally {
        if (null != record) {
            record.recycle();
        }
    }
}

From source file:org.apache.bookkeeper.stream.storage.impl.routing.RoutingHeaderProxyInterceptor.java

License:Apache License

private <ReqT, RespT> ReqT interceptMessage(MethodDescriptor<ReqT, RespT> method, ReqT message) {
    InputStream is = method.getRequestMarshaller().stream(message);
    int bytes;/*from  w w  w .ja v a  2s.  c om*/
    try {
        bytes = is.available();
    } catch (IOException e) {
        log.warn("Encountered exceptions in getting available bytes of message", e);
        throw new RuntimeException("Encountered exception in intercepting message", e);
    }
    ByteBuf buffer = PooledByteBufAllocator.DEFAULT.buffer();
    try {
        buffer.writeBytes(is, bytes);
    } catch (IOException e) {
        log.warn("Encountered exceptions in transferring bytes to the buffer", e);
        buffer.release();
        throw new RuntimeException("Encountered exceptions in transferring bytes to the buffer", e);
    }
    return method.getRequestMarshaller().parse(new ByteBufInputStream(buffer, true));
}

From source file:org.apache.bookkeeper.tools.cli.commands.bookie.ConvertToInterleavedStorageCommand.java

License:Apache License

private boolean handle(ServerConfiguration bkConf) throws Exception {
    LOG.info("=== Converting DbLedgerStorage ===");
    ServerConfiguration conf = new ServerConfiguration(bkConf);
    LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(bkConf, bkConf.getLedgerDirs(),
            new DiskChecker(bkConf.getDiskUsageThreshold(), bkConf.getDiskUsageWarnThreshold()));
    LedgerDirsManager ledgerIndexManager = new LedgerDirsManager(bkConf, bkConf.getLedgerDirs(),
            new DiskChecker(bkConf.getDiskUsageThreshold(), bkConf.getDiskUsageWarnThreshold()));

    DbLedgerStorage dbStorage = new DbLedgerStorage();
    InterleavedLedgerStorage interleavedStorage = new InterleavedLedgerStorage();

    CheckpointSource checkpointSource = new CheckpointSource() {
        @Override//from   w  w  w.  ja  v  a 2  s. c  o  m
        public Checkpoint newCheckpoint() {
            return Checkpoint.MAX;
        }

        @Override
        public void checkpointComplete(Checkpoint checkpoint, boolean compact) {
        }
    };
    Checkpointer checkpointer = new Checkpointer() {
        @Override
        public void startCheckpoint(CheckpointSource.Checkpoint checkpoint) {
            // No-op
        }

        @Override
        public void start() {
            // no-op
        }
    };

    dbStorage.initialize(conf, null, ledgerDirsManager, ledgerIndexManager, null, checkpointSource,
            checkpointer, NullStatsLogger.INSTANCE, PooledByteBufAllocator.DEFAULT);
    interleavedStorage.initialize(conf, null, ledgerDirsManager, ledgerIndexManager, null, checkpointSource,
            checkpointer, NullStatsLogger.INSTANCE, PooledByteBufAllocator.DEFAULT);
    LedgerCache interleavedLedgerCache = interleavedStorage.getLedgerCache();

    int convertedLedgers = 0;
    for (long ledgerId : dbStorage.getActiveLedgersInRange(0, Long.MAX_VALUE)) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Converting ledger {}", ledgerIdFormatter.formatLedgerId(ledgerId));
        }

        interleavedStorage.setMasterKey(ledgerId, dbStorage.readMasterKey(ledgerId));
        if (dbStorage.isFenced(ledgerId)) {
            interleavedStorage.setFenced(ledgerId);
        }

        long lastEntryInLedger = dbStorage.getLastEntryInLedger(ledgerId);
        for (long entryId = 0; entryId <= lastEntryInLedger; entryId++) {
            try {
                long location = dbStorage.getLocation(ledgerId, entryId);
                if (location != 0L) {
                    interleavedLedgerCache.putEntryOffset(ledgerId, entryId, location);
                }
            } catch (Bookie.NoEntryException e) {
                // Ignore entry
            }
        }

        if (++convertedLedgers % 1000 == 0) {
            LOG.info("Converted {} ledgers", convertedLedgers);
        }
    }

    dbStorage.shutdown();

    interleavedLedgerCache.flushLedger(true);
    interleavedStorage.flush();
    interleavedStorage.shutdown();

    String baseDir = ledgerDirsManager.getAllLedgerDirs().get(0).toString();

    // Rename databases and keep backup
    Files.move(FileSystems.getDefault().getPath(baseDir, "ledgers"),
            FileSystems.getDefault().getPath(baseDir, "ledgers.backup"));

    Files.move(FileSystems.getDefault().getPath(baseDir, "locations"),
            FileSystems.getDefault().getPath(baseDir, "locations.backup"));

    LOG.info("---- Done Converting {} ledgers ----", convertedLedgers);
    return true;
}

From source file:org.apache.bookkeeper.tools.cli.commands.bookie.ConvertToInterleavedStorageCommandTest.java

License:Apache License

@Test
public void testConvertToInterleavedStorageCommand() {
    ConvertToInterleavedStorageCommand cmd = new ConvertToInterleavedStorageCommand();
    Assert.assertTrue(cmd.apply(bkFlags, new String[] { "" }));

    try {//www.  j a  va2s. c o  m
        verifyNew(ServerConfiguration.class).withArguments(eq(conf));
        verifyNew(LedgerDirsManager.class, times(2)).withArguments(eq(conf), any(File[].class),
                any(DiskChecker.class));
        verifyNew(DbLedgerStorage.class, times(1)).withNoArguments();
        verifyNew(InterleavedLedgerStorage.class, times(1)).withNoArguments();

        verify(dbStorage, times(1)).initialize(eq(conf), eq(null), any(LedgerDirsManager.class),
                any(LedgerDirsManager.class), eq(null), any(CheckpointSource.class), any(Checkpointer.class),
                eq(NullStatsLogger.INSTANCE), eq(PooledByteBufAllocator.DEFAULT));
        verify(interleavedLedgerStorage, times(1)).initialize(eq(conf), eq(null), any(LedgerDirsManager.class),
                any(LedgerDirsManager.class), eq(null), any(CheckpointSource.class), any(Checkpointer.class),
                eq(NullStatsLogger.INSTANCE), eq(PooledByteBufAllocator.DEFAULT));
        verify(dbStorage, times(1)).getActiveLedgersInRange(anyLong(), anyLong());
        verify(dbStorage, times(10)).readMasterKey(anyLong());
        verify(interleavedLedgerStorage, times(10)).setMasterKey(anyLong(), any());
        verify(dbStorage, times(10)).getLastEntryInLedger(anyLong());
        verify(dbStorage, times(10)).getLocation(anyLong(), anyLong());
        verify(dbStorage, times(1)).shutdown();
        verify(interleavedLedgerCache, times(1)).flushLedger(true);
        verify(interleavedLedgerStorage, times(1)).flush();
        verify(interleavedLedgerStorage, times(1)).shutdown();
    } catch (Exception e) {
        throw new UncheckedExecutionException(e.getMessage(), e);
    }
}

From source file:org.apache.bookkeeper.tools.perf.table.IncrementTask.java

License:Apache License

void incKey(long i) {
    ByteBuf keyBuf = PooledByteBufAllocator.DEFAULT.heapBuffer(flags.keySize);
    getKey(keyBuf, i, keyRange);//from  w w  w  .  j  a  v a 2  s .  c  om
    keyBuf.writerIndex(keyBuf.readerIndex() + keyBuf.writableBytes());

    final long startTime = System.nanoTime();
    table.increment(keyBuf, 100).whenComplete((result, cause) -> {
        if (null != semaphore) {
            semaphore.release();
        }
        if (null != cause) {
            log.error("Error at increment key/amount", cause);
        } else {
            long latencyMicros = TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - startTime);
            writeOpStats.recordOp(latencyMicros);
        }
        keyBuf.release();
    });
}

From source file:org.apache.bookkeeper.tools.perf.table.WriteTask.java

License:Apache License

void writeKey(long i, byte[] valueBytes) {
    final ByteBuf keyBuf = PooledByteBufAllocator.DEFAULT.heapBuffer(flags.keySize);
    getKey(keyBuf, i, keyRange);/*from  w  w w  .j a v a2  s  .  co  m*/
    keyBuf.writerIndex(keyBuf.readerIndex() + keyBuf.writableBytes());
    final ByteBuf valBuf = Unpooled.wrappedBuffer(valueBytes);

    final long startTime = System.nanoTime();
    table.put(keyBuf, valBuf).whenComplete((result, cause) -> {
        if (null != semaphore) {
            semaphore.release();
        }
        if (null != cause) {
            log.error("Error at put key/value", cause);
        } else {
            long latencyMicros = TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - startTime);
            writeOpStats.recordOp(latencyMicros);
        }
        keyBuf.release();
        valBuf.release();
    });
}