Example usage for io.netty.buffer PooledByteBufAllocator DEFAULT

List of usage examples for io.netty.buffer PooledByteBufAllocator DEFAULT

Introduction

In this page you can find the example usage for io.netty.buffer PooledByteBufAllocator DEFAULT.

Prototype

PooledByteBufAllocator DEFAULT

To view the source code for io.netty.buffer PooledByteBufAllocator DEFAULT.

Click Source Link

Usage

From source file:org.apache.bookkeeper.benchmark.BenchBookie.java

License:Apache License

/**
 * @param args//from  ww w  .  ja  v a2 s. c  om
 * @throws InterruptedException
 */
public static void main(String[] args)
        throws InterruptedException, ParseException, IOException, BKException, KeeperException {
    Options options = new Options();
    options.addOption("host", true, "Hostname or IP of bookie to benchmark");
    options.addOption("port", true, "Port of bookie to benchmark (default 3181)");
    options.addOption("zookeeper", true, "Zookeeper ensemble, (default \"localhost:2181\")");
    options.addOption("size", true, "Size of message to send, in bytes (default 1024)");
    options.addOption("warmupCount", true, "Number of messages in warmup phase (default 999)");
    options.addOption("latencyCount", true, "Number of messages in latency phase (default 5000)");
    options.addOption("throughputCount", true, "Number of messages in throughput phase (default 50000)");
    options.addOption("help", false, "This message");

    CommandLineParser parser = new PosixParser();
    CommandLine cmd = parser.parse(options, args);

    if (cmd.hasOption("help") || !cmd.hasOption("host")) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("BenchBookie <options>", options);
        System.exit(-1);
    }

    String addr = cmd.getOptionValue("host");
    int port = Integer.parseInt(cmd.getOptionValue("port", "3181"));
    int size = Integer.parseInt(cmd.getOptionValue("size", "1024"));
    String servers = cmd.getOptionValue("zookeeper", "localhost:2181");
    int warmUpCount = Integer.parseInt(cmd.getOptionValue("warmupCount", "999"));
    int latencyCount = Integer.parseInt(cmd.getOptionValue("latencyCount", "5000"));
    int throughputCount = Integer.parseInt(cmd.getOptionValue("throughputCount", "50000"));

    EventLoopGroup eventLoop;
    if (SystemUtils.IS_OS_LINUX) {
        try {
            eventLoop = new EpollEventLoopGroup();
        } catch (Throwable t) {
            LOG.warn("Could not use Netty Epoll event loop for benchmark {}", t.getMessage());
            eventLoop = new NioEventLoopGroup();
        }
    } else {
        eventLoop = new NioEventLoopGroup();
    }

    OrderedExecutor executor = OrderedExecutor.newBuilder().name("BenchBookieClientScheduler").numThreads(1)
            .build();
    ScheduledExecutorService scheduler = Executors
            .newSingleThreadScheduledExecutor(new DefaultThreadFactory("BookKeeperClientScheduler"));

    ClientConfiguration conf = new ClientConfiguration();
    BookieClient bc = new BookieClientImpl(conf, eventLoop, PooledByteBufAllocator.DEFAULT, executor, scheduler,
            NullStatsLogger.INSTANCE);
    LatencyCallback lc = new LatencyCallback();

    ThroughputCallback tc = new ThroughputCallback();

    long ledger = getValidLedgerId(servers);
    for (long entry = 0; entry < warmUpCount; entry++) {
        ByteBuf toSend = Unpooled.buffer(size);
        toSend.resetReaderIndex();
        toSend.resetWriterIndex();
        toSend.writeLong(ledger);
        toSend.writeLong(entry);
        toSend.writerIndex(toSend.capacity());
        bc.addEntry(new BookieSocketAddress(addr, port), ledger, new byte[20], entry, ByteBufList.get(toSend),
                tc, null, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE);
    }
    LOG.info("Waiting for warmup");
    tc.waitFor(warmUpCount);

    ledger = getValidLedgerId(servers);
    LOG.info("Benchmarking latency");
    long startTime = System.nanoTime();
    for (long entry = 0; entry < latencyCount; entry++) {
        ByteBuf toSend = Unpooled.buffer(size);
        toSend.resetReaderIndex();
        toSend.resetWriterIndex();
        toSend.writeLong(ledger);
        toSend.writeLong(entry);
        toSend.writerIndex(toSend.capacity());
        lc.resetComplete();
        bc.addEntry(new BookieSocketAddress(addr, port), ledger, new byte[20], entry, ByteBufList.get(toSend),
                lc, null, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE);
        lc.waitForComplete();
    }
    long endTime = System.nanoTime();
    LOG.info("Latency: " + (((double) (endTime - startTime)) / ((double) latencyCount)) / 1000000.0);

    ledger = getValidLedgerId(servers);
    LOG.info("Benchmarking throughput");
    startTime = System.currentTimeMillis();
    tc = new ThroughputCallback();
    for (long entry = 0; entry < throughputCount; entry++) {
        ByteBuf toSend = Unpooled.buffer(size);
        toSend.resetReaderIndex();
        toSend.resetWriterIndex();
        toSend.writeLong(ledger);
        toSend.writeLong(entry);
        toSend.writerIndex(toSend.capacity());
        bc.addEntry(new BookieSocketAddress(addr, port), ledger, new byte[20], entry, ByteBufList.get(toSend),
                tc, null, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE);
    }
    tc.waitFor(throughputCount);
    endTime = System.currentTimeMillis();
    LOG.info("Throughput: " + ((long) throughputCount) * 1000 / (endTime - startTime));

    bc.close();
    scheduler.shutdown();
    eventLoop.shutdownGracefully();
    executor.shutdown();
}

From source file:org.apache.bookkeeper.bookie.Bookie.java

License:Apache License

public Bookie(ServerConfiguration conf) throws IOException, InterruptedException, BookieException {
    this(conf, NullStatsLogger.INSTANCE, PooledByteBufAllocator.DEFAULT);
}

From source file:org.apache.bookkeeper.bookie.EntryLogger.java

License:Apache License

/**
 * Create an EntryLogger that stores it's log files in the given directories.
 */// w ww  . j  a  va 2 s  .  c o  m
public EntryLogger(ServerConfiguration conf, LedgerDirsManager ledgerDirsManager) throws IOException {
    this(conf, ledgerDirsManager, null, NullStatsLogger.INSTANCE, PooledByteBufAllocator.DEFAULT);
}

From source file:org.apache.bookkeeper.common.allocator.impl.ByteBufAllocatorImpl.java

License:Apache License

ByteBufAllocatorImpl(ByteBufAllocator pooledAllocator, ByteBufAllocator unpooledAllocator,
        PoolingPolicy poolingPolicy, int poolingConcurrency, OutOfMemoryPolicy outOfMemoryPolicy,
        Consumer<OutOfMemoryError> outOfMemoryListener, LeakDetectionPolicy leakDetectionPolicy) {
    super(poolingPolicy == PoolingPolicy.PooledDirect /* preferDirect */);

    this.poolingPolicy = poolingPolicy;
    this.outOfMemoryPolicy = outOfMemoryPolicy;
    if (outOfMemoryListener == null) {
        this.outOfMemoryListener = (v) -> {
            log.error("Unable to allocate memory", v);
        };//w w w. j a  v  a 2s . co m
    } else {
        this.outOfMemoryListener = outOfMemoryListener;
    }

    if (poolingPolicy == PoolingPolicy.PooledDirect) {
        if (pooledAllocator == null) {
            if (poolingConcurrency == PooledByteBufAllocator.defaultNumDirectArena()) {
                // If all the parameters are the same as in the default Netty pool,
                // just reuse the static instance as the underlying allocator.
                this.pooledAllocator = PooledByteBufAllocator.DEFAULT;
            } else {
                this.pooledAllocator = new PooledByteBufAllocator(true /* preferDirect */,
                        poolingConcurrency /* nHeapArena */, poolingConcurrency /* nDirectArena */,
                        PooledByteBufAllocator.defaultPageSize(), PooledByteBufAllocator.defaultMaxOrder(),
                        PooledByteBufAllocator.defaultTinyCacheSize(),
                        PooledByteBufAllocator.defaultSmallCacheSize(),
                        PooledByteBufAllocator.defaultNormalCacheSize(),
                        PooledByteBufAllocator.defaultUseCacheForAllThreads());
            }
        } else {
            this.pooledAllocator = pooledAllocator;
        }
    } else {
        this.pooledAllocator = null;
    }

    this.unpooledAllocator = (unpooledAllocator != null) ? unpooledAllocator : UnpooledByteBufAllocator.DEFAULT;

    // The setting is static in Netty, so it will actually affect all
    // allocators
    switch (leakDetectionPolicy) {
    case Disabled:
        if (log.isDebugEnabled()) {
            log.debug("Disable Netty allocator leak detector");
        }
        ResourceLeakDetector.setLevel(Level.DISABLED);
        break;

    case Simple:
        log.info("Setting Netty allocator leak detector to Simple");
        ResourceLeakDetector.setLevel(Level.SIMPLE);
        break;

    case Advanced:
        log.info("Setting Netty allocator leak detector to Advanced");
        ResourceLeakDetector.setLevel(Level.ADVANCED);
        break;

    case Paranoid:
        log.info("Setting Netty allocator leak detector to Paranoid");
        ResourceLeakDetector.setLevel(Level.PARANOID);
        break;
    }
}

From source file:org.apache.bookkeeper.common.router.StringUtf8HashRouter.java

License:Apache License

@Override
public ByteBuf getRoutingKeyData(String key) {
    int keyLen = key.length();
    ByteBuf keyBuf = PooledByteBufAllocator.DEFAULT.buffer(keyLen);
    keyBuf.writeCharSequence(key, UTF_8);
    return keyBuf;
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerTest.java

License:Apache License

public ByteBuf getMessageWithMetadata(byte[] data) throws IOException {
    MessageMetadata messageData = MessageMetadata.newBuilder().setPublishTime(System.currentTimeMillis())
            .setProducerName("prod-name").setSequenceId(0).build();
    ByteBuf payload = Unpooled.wrappedBuffer(data, 0, data.length);

    int msgMetadataSize = messageData.getSerializedSize();
    int headersSize = 4 + msgMetadataSize;
    ByteBuf headers = PooledByteBufAllocator.DEFAULT.buffer(headersSize, headersSize);
    ByteBufCodedOutputStream outStream = ByteBufCodedOutputStream.get(headers);
    headers.writeInt(msgMetadataSize);//from  ww  w.ja v  a 2 s  .co m
    messageData.writeTo(outStream);
    outStream.recycle();
    return DoubleByteBuf.get(headers, payload);
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlobStoreBackedInputStreamImpl.java

License:Apache License

public BlobStoreBackedInputStreamImpl(BlobStore blobStore, String bucket, String key, VersionCheck versionCheck,
        long objectLen, int bufferSize) {
    this.blobStore = blobStore;
    this.bucket = bucket;
    this.key = key;
    this.versionCheck = versionCheck;
    this.buffer = PooledByteBufAllocator.DEFAULT.buffer(bufferSize, bufferSize);
    this.objectLen = objectLen;
    this.bufferSize = bufferSize;
    this.cursor = 0;
    this.bufferOffsetStart = this.bufferOffsetEnd = -1;
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlobStoreBackedReadHandleImpl.java

License:Apache License

@Override
public CompletableFuture<LedgerEntries> readAsync(long firstEntry, long lastEntry) {
    log.debug("Ledger {}: reading {} - {}", getId(), firstEntry, lastEntry);
    CompletableFuture<LedgerEntries> promise = new CompletableFuture<>();
    executor.submit(() -> {/*from www .  j av a  2s.co m*/
        if (firstEntry > lastEntry || firstEntry < 0 || lastEntry > getLastAddConfirmed()) {
            promise.completeExceptionally(new BKException.BKIncorrectParameterException());
            return;
        }
        long entriesToRead = (lastEntry - firstEntry) + 1;
        List<LedgerEntry> entries = new ArrayList<LedgerEntry>();
        long nextExpectedId = firstEntry;
        try {
            OffloadIndexEntry entry = index.getIndexEntryForEntry(firstEntry);
            inputStream.seek(entry.getDataOffset());

            while (entriesToRead > 0) {
                int length = dataStream.readInt();
                if (length < 0) { // hit padding or new block
                    inputStream.seekForward(index.getIndexEntryForEntry(nextExpectedId).getDataOffset());
                    length = dataStream.readInt();
                }
                long entryId = dataStream.readLong();

                if (entryId == nextExpectedId) {
                    ByteBuf buf = PooledByteBufAllocator.DEFAULT.buffer(length, length);
                    entries.add(LedgerEntryImpl.create(ledgerId, entryId, length, buf));
                    int toWrite = length;
                    while (toWrite > 0) {
                        toWrite -= buf.writeBytes(dataStream, toWrite);
                    }
                    entriesToRead--;
                    nextExpectedId++;
                } else if (entryId > lastEntry) {
                    log.info("Expected to read {}, but read {}, which is greater than last entry {}",
                            nextExpectedId, entryId, lastEntry);
                    throw new BKException.BKUnexpectedConditionException();
                } else {
                    inputStream.skip(length);
                }
            }

            promise.complete(LedgerEntriesImpl.create(entries));
        } catch (Throwable t) {
            promise.completeExceptionally(t);
            entries.forEach(LedgerEntry::close);
        }
    });
    return promise;
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlockAwareSegmentInputStreamImpl.java

License:Apache License

private List<ByteBuf> readNextEntriesFromLedger(long start, long maxNumberEntries) throws IOException {
    long end = Math.min(start + maxNumberEntries - 1, ledger.getLastAddConfirmed());
    try (LedgerEntries ledgerEntriesOnce = ledger.readAsync(start, end).get()) {
        log.debug("read ledger entries. start: {}, end: {}", start, end);

        List<ByteBuf> entries = Lists.newLinkedList();

        Iterator<LedgerEntry> iterator = ledgerEntriesOnce.iterator();
        while (iterator.hasNext()) {
            LedgerEntry entry = iterator.next();
            ByteBuf buf = entry.getEntryBuffer().retain();
            int entryLength = buf.readableBytes();
            long entryId = entry.getEntryId();

            CompositeByteBuf entryBuf = PooledByteBufAllocator.DEFAULT.compositeBuffer(2);
            ByteBuf entryHeaderBuf = PooledByteBufAllocator.DEFAULT.buffer(ENTRY_HEADER_SIZE,
                    ENTRY_HEADER_SIZE);//from w  w w.j  ava  2s  . c  o m

            entryHeaderBuf.writeInt(entryLength).writeLong(entryId);
            entryBuf.addComponents(true, entryHeaderBuf, buf);

            entries.add(entryBuf);
        }
        return entries;
    } catch (InterruptedException | ExecutionException e) {
        log.error("Exception when get CompletableFuture<LedgerEntries>. ", e);
        if (e instanceof InterruptedException) {
            Thread.currentThread().interrupt();
        }
        throw new IOException(e);
    }
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.DataBlockHeaderImpl.java

License:Apache License

/**
 * Get the content of the data block header as InputStream.
 * Read out in format:/*from  w w  w. j a v  a2  s . c o  m*/
 *   [ magic_word -- int ][ block_len -- int ][ first_entry_id  -- long] [padding zeros]
 */
@Override
public InputStream toStream() {
    ByteBuf out = PooledByteBufAllocator.DEFAULT.buffer(HEADER_MAX_SIZE, HEADER_MAX_SIZE);
    out.writeInt(MAGIC_WORD).writeLong(headerLength).writeLong(blockLength).writeLong(firstEntryId)
            .writeBytes(PADDING);

    // true means the input stream will release the ByteBuf on close
    return new ByteBufInputStream(out, true);
}