Example usage for io.netty.buffer ByteBuf capacity

List of usage examples for io.netty.buffer ByteBuf capacity

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf capacity.

Prototype

public abstract int capacity();

Source Link

Document

Returns the number of bytes (octets) this buffer can contain.

Usage

From source file:org.apache.activemq.artemis.message.CoreMessageTest.java

License:Apache License

private CoreMessage decodeMessage() {

    ByteBuf newBuffer = Unpooled.buffer(BYTE_ENCODE.capacity());
    newBuffer.writeBytes(BYTE_ENCODE, 0, BYTE_ENCODE.writerIndex());

    CoreMessage coreMessage = internalDecode(newBuffer);

    int encodeSize = coreMessage.getEncodeSize();

    Assert.assertEquals(newBuffer.capacity(), encodeSize);

    Assert.assertEquals(ADDRESS, coreMessage.getAddressSimpleString());

    Assert.assertEquals(PROP1_VALUE.toString(), coreMessage.getStringProperty(PROP1_NAME));

    ByteBuf destinedBuffer = Unpooled.buffer(BYTE_ENCODE.array().length);
    coreMessage.sendBuffer(destinedBuffer, 0);

    byte[] destinedArray = destinedBuffer.array();
    byte[] sourceArray = BYTE_ENCODE.array();

    CoreMessage newDecoded = internalDecode(Unpooled.wrappedBuffer(destinedArray));

    Assert.assertEquals(encodeSize, newDecoded.getEncodeSize());

    Assert.assertArrayEquals(sourceArray, destinedArray);

    return coreMessage;
}

From source file:org.apache.activemq.artemis.protocol.amqp.util.NettyWritableTest.java

License:Apache License

@Test
public void testLimit() {
    ByteBuf buffer = Unpooled.buffer(1024);
    NettyWritable writable = new NettyWritable(buffer);

    assertEquals(buffer.capacity(), writable.limit());
}

From source file:org.apache.activemq.artemis.tests.integration.transports.netty.ActiveMQFrameDecoder2Test.java

License:Apache License

@Test
public void testOrdinaryFragmentation() throws Exception {
    final EmbeddedChannel decoder = new EmbeddedChannel(new ActiveMQFrameDecoder2());
    final byte[] data = new byte[ActiveMQFrameDecoder2Test.MSG_LEN];
    ActiveMQFrameDecoder2Test.rand.nextBytes(data);

    ByteBuf src = Unpooled.buffer(ActiveMQFrameDecoder2Test.MSG_CNT * (ActiveMQFrameDecoder2Test.MSG_LEN + 4));
    while (src.writerIndex() < src.capacity()) {
        src.writeInt(ActiveMQFrameDecoder2Test.MSG_LEN);
        src.writeBytes(data);/*  w  w  w .ja  v  a  2s.  c o  m*/
    }

    List<ByteBuf> packets = new ArrayList<ByteBuf>();
    while (src.isReadable()) {
        int length = Math.min(
                ActiveMQFrameDecoder2Test.rand.nextInt(ActiveMQFrameDecoder2Test.FRAGMENT_MAX_LEN),
                src.readableBytes());
        packets.add(src.readBytes(length));
    }

    int cnt = 0;
    for (ByteBuf p : packets) {
        decoder.writeInbound(p);
        for (;;) {
            ByteBuf frame = (ByteBuf) decoder.readInbound();
            if (frame == null) {
                break;
            }
            Assert.assertEquals(4, frame.readerIndex());
            Assert.assertEquals(ActiveMQFrameDecoder2Test.MSG_LEN, frame.readableBytes());
            Assert.assertEquals(Unpooled.wrappedBuffer(data), frame);
            cnt++;
            frame.release();
        }
    }
    Assert.assertEquals(ActiveMQFrameDecoder2Test.MSG_CNT, cnt);
}

From source file:org.apache.bookkeeper.benchmark.BenchBookie.java

License:Apache License

/**
 * @param args//from w  w w  .  j av a  2s. c  om
 * @throws InterruptedException
 */
public static void main(String[] args)
        throws InterruptedException, ParseException, IOException, BKException, KeeperException {
    Options options = new Options();
    options.addOption("host", true, "Hostname or IP of bookie to benchmark");
    options.addOption("port", true, "Port of bookie to benchmark (default 3181)");
    options.addOption("zookeeper", true, "Zookeeper ensemble, (default \"localhost:2181\")");
    options.addOption("size", true, "Size of message to send, in bytes (default 1024)");
    options.addOption("warmupCount", true, "Number of messages in warmup phase (default 999)");
    options.addOption("latencyCount", true, "Number of messages in latency phase (default 5000)");
    options.addOption("throughputCount", true, "Number of messages in throughput phase (default 50000)");
    options.addOption("help", false, "This message");

    CommandLineParser parser = new PosixParser();
    CommandLine cmd = parser.parse(options, args);

    if (cmd.hasOption("help") || !cmd.hasOption("host")) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("BenchBookie <options>", options);
        System.exit(-1);
    }

    String addr = cmd.getOptionValue("host");
    int port = Integer.parseInt(cmd.getOptionValue("port", "3181"));
    int size = Integer.parseInt(cmd.getOptionValue("size", "1024"));
    String servers = cmd.getOptionValue("zookeeper", "localhost:2181");
    int warmUpCount = Integer.parseInt(cmd.getOptionValue("warmupCount", "999"));
    int latencyCount = Integer.parseInt(cmd.getOptionValue("latencyCount", "5000"));
    int throughputCount = Integer.parseInt(cmd.getOptionValue("throughputCount", "50000"));

    EventLoopGroup eventLoop;
    if (SystemUtils.IS_OS_LINUX) {
        try {
            eventLoop = new EpollEventLoopGroup();
        } catch (Throwable t) {
            LOG.warn("Could not use Netty Epoll event loop for benchmark {}", t.getMessage());
            eventLoop = new NioEventLoopGroup();
        }
    } else {
        eventLoop = new NioEventLoopGroup();
    }

    OrderedExecutor executor = OrderedExecutor.newBuilder().name("BenchBookieClientScheduler").numThreads(1)
            .build();
    ScheduledExecutorService scheduler = Executors
            .newSingleThreadScheduledExecutor(new DefaultThreadFactory("BookKeeperClientScheduler"));

    ClientConfiguration conf = new ClientConfiguration();
    BookieClient bc = new BookieClientImpl(conf, eventLoop, PooledByteBufAllocator.DEFAULT, executor, scheduler,
            NullStatsLogger.INSTANCE);
    LatencyCallback lc = new LatencyCallback();

    ThroughputCallback tc = new ThroughputCallback();

    long ledger = getValidLedgerId(servers);
    for (long entry = 0; entry < warmUpCount; entry++) {
        ByteBuf toSend = Unpooled.buffer(size);
        toSend.resetReaderIndex();
        toSend.resetWriterIndex();
        toSend.writeLong(ledger);
        toSend.writeLong(entry);
        toSend.writerIndex(toSend.capacity());
        bc.addEntry(new BookieSocketAddress(addr, port), ledger, new byte[20], entry, ByteBufList.get(toSend),
                tc, null, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE);
    }
    LOG.info("Waiting for warmup");
    tc.waitFor(warmUpCount);

    ledger = getValidLedgerId(servers);
    LOG.info("Benchmarking latency");
    long startTime = System.nanoTime();
    for (long entry = 0; entry < latencyCount; entry++) {
        ByteBuf toSend = Unpooled.buffer(size);
        toSend.resetReaderIndex();
        toSend.resetWriterIndex();
        toSend.writeLong(ledger);
        toSend.writeLong(entry);
        toSend.writerIndex(toSend.capacity());
        lc.resetComplete();
        bc.addEntry(new BookieSocketAddress(addr, port), ledger, new byte[20], entry, ByteBufList.get(toSend),
                lc, null, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE);
        lc.waitForComplete();
    }
    long endTime = System.nanoTime();
    LOG.info("Latency: " + (((double) (endTime - startTime)) / ((double) latencyCount)) / 1000000.0);

    ledger = getValidLedgerId(servers);
    LOG.info("Benchmarking throughput");
    startTime = System.currentTimeMillis();
    tc = new ThroughputCallback();
    for (long entry = 0; entry < throughputCount; entry++) {
        ByteBuf toSend = Unpooled.buffer(size);
        toSend.resetReaderIndex();
        toSend.resetWriterIndex();
        toSend.writeLong(ledger);
        toSend.writeLong(entry);
        toSend.writerIndex(toSend.capacity());
        bc.addEntry(new BookieSocketAddress(addr, port), ledger, new byte[20], entry, ByteBufList.get(toSend),
                tc, null, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE);
    }
    tc.waitFor(throughputCount);
    endTime = System.currentTimeMillis();
    LOG.info("Throughput: " + ((long) throughputCount) * 1000 / (endTime - startTime));

    bc.close();
    scheduler.shutdown();
    eventLoop.shutdownGracefully();
    executor.shutdown();
}

From source file:org.apache.bookkeeper.bookie.Bookie.java

License:Apache License

private ByteBuf createExplicitLACEntry(long ledgerId, ByteBuf explicitLac) {
    ByteBuf bb = allocator.directBuffer(8 + 8 + 4 + explicitLac.capacity());
    bb.writeLong(ledgerId);//from ww  w .java 2s .c om
    bb.writeLong(METAENTRY_ID_LEDGER_EXPLICITLAC);
    bb.writeInt(explicitLac.capacity());
    bb.writeBytes(explicitLac);
    return bb;
}

From source file:org.apache.bookkeeper.bookie.EntryLogger.java

License:Apache License

private EntryLogEntry getFCForEntryInternal(long ledgerId, long entryId, long entryLogId, long pos)
        throws EntryLookupException, IOException {
    ByteBuf sizeBuff = sizeBuffer.get();
    sizeBuff.clear();//www .j a v a2s  .c o  m
    pos -= 4; // we want to get the entrySize as well as the ledgerId and entryId
    BufferedReadChannel fc;
    try {
        fc = getChannelForLogId(entryLogId);
    } catch (FileNotFoundException e) {
        throw new EntryLookupException.MissingLogFileException(ledgerId, entryId, entryLogId, pos);
    }

    try {
        if (readFromLogChannel(entryLogId, fc, sizeBuff, pos) != sizeBuff.capacity()) {
            throw new EntryLookupException.MissingEntryException(ledgerId, entryId, entryLogId, pos);
        }
    } catch (BufferedChannelBase.BufferedChannelClosedException | AsynchronousCloseException e) {
        throw new EntryLookupException.MissingLogFileException(ledgerId, entryId, entryLogId, pos);
    }
    pos += 4;
    int entrySize = sizeBuff.readInt();

    // entrySize does not include the ledgerId
    if (entrySize > maxSaneEntrySize) {
        LOG.warn("Sanity check failed for entry size of " + entrySize + " at location " + pos + " in "
                + entryLogId);
    }
    if (entrySize < MIN_SANE_ENTRY_SIZE) {
        LOG.error("Read invalid entry length {}", entrySize);
        throw new EntryLookupException.InvalidEntryLengthException(ledgerId, entryId, entryLogId, pos);
    }

    long thisLedgerId = sizeBuff.getLong(4);
    long thisEntryId = sizeBuff.getLong(12);
    if (thisLedgerId != ledgerId || thisEntryId != entryId) {
        throw new EntryLookupException.WrongEntryException(thisEntryId, thisLedgerId, ledgerId, entryId,
                entryLogId, pos);
    }
    return new EntryLogEntry(entrySize, fc);
}

From source file:org.apache.bookkeeper.bookie.EntryLogger.java

License:Apache License

/**
 * Scan entry log./*from  w  w  w .  j a  va2  s .  c o  m*/
 *
 * @param entryLogId Entry Log Id
 * @param scanner Entry Log Scanner
 * @throws IOException
 */
public void scanEntryLog(long entryLogId, EntryLogScanner scanner) throws IOException {
    // Buffer where to read the entrySize (4 bytes) and the ledgerId (8 bytes)
    ByteBuf headerBuffer = Unpooled.buffer(4 + 8);
    BufferedReadChannel bc;
    // Get the BufferedChannel for the current entry log file
    try {
        bc = getChannelForLogId(entryLogId);
    } catch (IOException e) {
        LOG.warn("Failed to get channel to scan entry log: " + entryLogId + ".log");
        throw e;
    }
    // Start the read position in the current entry log file to be after
    // the header where all of the ledger entries are.
    long pos = LOGFILE_HEADER_SIZE;

    // Start with a reasonably sized buffer size
    ByteBuf data = allocator.directBuffer(1024 * 1024);

    try {

        // Read through the entry log file and extract the ledger ID's.
        while (true) {
            // Check if we've finished reading the entry log file.
            if (pos >= bc.size()) {
                break;
            }
            if (readFromLogChannel(entryLogId, bc, headerBuffer, pos) != headerBuffer.capacity()) {
                LOG.warn("Short read for entry size from entrylog {}", entryLogId);
                return;
            }
            long offset = pos;
            pos += 4;
            int entrySize = headerBuffer.readInt();
            long ledgerId = headerBuffer.readLong();
            headerBuffer.clear();

            if (ledgerId == INVALID_LID || !scanner.accept(ledgerId)) {
                // skip this entry
                pos += entrySize;
                continue;
            }
            // read the entry

            data.clear();
            data.capacity(entrySize);
            int rc = readFromLogChannel(entryLogId, bc, data, pos);
            if (rc != entrySize) {
                LOG.warn("Short read for ledger entry from entryLog {}@{} ({} != {})", entryLogId, pos, rc,
                        entrySize);
                return;
            }
            // process the entry
            scanner.process(ledgerId, offset, data);

            // Advance position to the next entry
            pos += entrySize;
        }
    } finally {
        data.release();
    }
}

From source file:org.apache.bookkeeper.bookie.FileInfo.java

License:Apache License

public void setExplicitLac(ByteBuf lac) {
    long explicitLacValue;
    synchronized (this) {
        if (explicitLac == null) {
            explicitLac = ByteBuffer.allocate(lac.capacity());
        }//  w  w  w.  j  a  v a  2  s . c  o m
        lac.readBytes(explicitLac);
        explicitLac.rewind();

        // skip the ledger id
        explicitLac.getLong();
        explicitLacValue = explicitLac.getLong();
        explicitLac.rewind();
        if (LOG.isDebugEnabled()) {
            LOG.debug("fileInfo:SetLac: {}", explicitLac);
        }
        needFlushHeader = true;
    }
    setLastAddConfirmed(explicitLacValue);
}

From source file:org.apache.bookkeeper.bookie.Journal.java

License:Apache License

/**
 * A thread used for persisting journal entries to journal files.
 *
 * <p>//from  ww  w .j  a v  a2s .c o  m
 * Besides persisting journal entries, it also takes responsibility of
 * rolling journal files when a journal file reaches journal file size
 * limitation.
 * </p>
 * <p>
 * During journal rolling, it first closes the writing journal, generates
 * new journal file using current timestamp, and continue persistence logic.
 * Those journals will be garbage collected in SyncThread.
 * </p>
 * @see org.apache.bookkeeper.bookie.SyncThread
 */
@Override
public void run() {
    LOG.info("Starting journal on {}", journalDirectory);

    if (conf.isBusyWaitEnabled()) {
        try {
            CpuAffinity.acquireCore();
        } catch (Exception e) {
            LOG.warn("Unable to acquire CPU core for Journal thread: {}", e.getMessage(), e);
        }
    }

    RecyclableArrayList<QueueEntry> toFlush = entryListRecycler.newInstance();
    int numEntriesToFlush = 0;
    ByteBuf lenBuff = Unpooled.buffer(4);
    ByteBuf paddingBuff = Unpooled.buffer(2 * conf.getJournalAlignmentSize());
    paddingBuff.writeZero(paddingBuff.capacity());

    BufferedChannel bc = null;
    JournalChannel logFile = null;
    forceWriteThread.start();
    Stopwatch journalCreationWatcher = Stopwatch.createUnstarted();
    Stopwatch journalFlushWatcher = Stopwatch.createUnstarted();
    long batchSize = 0;
    try {
        List<Long> journalIds = listJournalIds(journalDirectory, null);
        // Should not use MathUtils.now(), which use System.nanoTime() and
        // could only be used to measure elapsed time.
        // http://docs.oracle.com/javase/1.5.0/docs/api/java/lang/System.html#nanoTime%28%29
        long logId = journalIds.isEmpty() ? System.currentTimeMillis() : journalIds.get(journalIds.size() - 1);
        long lastFlushPosition = 0;
        boolean groupWhenTimeout = false;

        long dequeueStartTime = 0L;

        QueueEntry qe = null;
        while (true) {
            // new journal file to write
            if (null == logFile) {

                logId = logId + 1;

                journalCreationWatcher.reset().start();
                logFile = new JournalChannel(journalDirectory, logId, journalPreAllocSize,
                        journalWriteBufferSize, journalAlignmentSize, removePagesFromCache,
                        journalFormatVersionToWrite, getBufferedChannelBuilder());

                journalStats.getJournalCreationStats().registerSuccessfulEvent(
                        journalCreationWatcher.stop().elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);

                bc = logFile.getBufferedChannel();

                lastFlushPosition = bc.position();
            }

            if (qe == null) {
                if (dequeueStartTime != 0) {
                    journalStats.getJournalProcessTimeStats().registerSuccessfulEvent(
                            MathUtils.elapsedNanos(dequeueStartTime), TimeUnit.NANOSECONDS);
                }

                if (numEntriesToFlush == 0) {
                    qe = queue.take();
                    dequeueStartTime = MathUtils.nowInNano();
                    journalStats.getJournalQueueStats().registerSuccessfulEvent(
                            MathUtils.elapsedNanos(qe.enqueueTime), TimeUnit.NANOSECONDS);
                } else {
                    long pollWaitTimeNanos = maxGroupWaitInNanos
                            - MathUtils.elapsedNanos(toFlush.get(0).enqueueTime);
                    if (flushWhenQueueEmpty || pollWaitTimeNanos < 0) {
                        pollWaitTimeNanos = 0;
                    }
                    qe = queue.poll(pollWaitTimeNanos, TimeUnit.NANOSECONDS);
                    dequeueStartTime = MathUtils.nowInNano();

                    if (qe != null) {
                        journalStats.getJournalQueueStats().registerSuccessfulEvent(
                                MathUtils.elapsedNanos(qe.enqueueTime), TimeUnit.NANOSECONDS);
                    }

                    boolean shouldFlush = false;
                    // We should issue a forceWrite if any of the three conditions below holds good
                    // 1. If the oldest pending entry has been pending for longer than the max wait time
                    if (maxGroupWaitInNanos > 0 && !groupWhenTimeout
                            && (MathUtils.elapsedNanos(toFlush.get(0).enqueueTime) > maxGroupWaitInNanos)) {
                        groupWhenTimeout = true;
                    } else if (maxGroupWaitInNanos > 0 && groupWhenTimeout && qe != null
                            && MathUtils.elapsedNanos(qe.enqueueTime) < maxGroupWaitInNanos) {
                        // when group timeout, it would be better to look forward, as there might be lots of
                        // entries already timeout
                        // due to a previous slow write (writing to filesystem which impacted by force write).
                        // Group those entries in the queue
                        // a) already timeout
                        // b) limit the number of entries to group
                        groupWhenTimeout = false;
                        shouldFlush = true;
                        journalStats.getFlushMaxWaitCounter().inc();
                    } else if (qe != null
                            && ((bufferedEntriesThreshold > 0 && toFlush.size() > bufferedEntriesThreshold)
                                    || (bc.position() > lastFlushPosition + bufferedWritesThreshold))) {
                        // 2. If we have buffered more than the buffWriteThreshold or bufferedEntriesThreshold
                        shouldFlush = true;
                        journalStats.getFlushMaxOutstandingBytesCounter().inc();
                    } else if (qe == null) {
                        // We should get here only if we flushWhenQueueEmpty is true else we would wait
                        // for timeout that would put is past the maxWait threshold
                        // 3. If the queue is empty i.e. no benefit of grouping. This happens when we have one
                        // publish at a time - common case in tests.
                        shouldFlush = true;
                        journalStats.getFlushEmptyQueueCounter().inc();
                    }

                    // toFlush is non null and not empty so should be safe to access getFirst
                    if (shouldFlush) {
                        if (journalFormatVersionToWrite >= JournalChannel.V5) {
                            writePaddingBytes(logFile, paddingBuff, journalAlignmentSize);
                        }
                        journalFlushWatcher.reset().start();
                        bc.flush();

                        for (int i = 0; i < toFlush.size(); i++) {
                            QueueEntry entry = toFlush.get(i);
                            if (entry != null && (!syncData || entry.ackBeforeSync)) {
                                toFlush.set(i, null);
                                numEntriesToFlush--;
                                cbThreadPool.execute(entry);
                            }
                        }

                        lastFlushPosition = bc.position();
                        journalStats.getJournalFlushStats().registerSuccessfulEvent(
                                journalFlushWatcher.stop().elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);

                        // Trace the lifetime of entries through persistence
                        if (LOG.isDebugEnabled()) {
                            for (QueueEntry e : toFlush) {
                                if (e != null) {
                                    LOG.debug("Written and queuing for flush Ledger: {}  Entry: {}", e.ledgerId,
                                            e.entryId);
                                }
                            }
                        }

                        journalStats.getForceWriteBatchEntriesStats()
                                .registerSuccessfulValue(numEntriesToFlush);
                        journalStats.getForceWriteBatchBytesStats().registerSuccessfulValue(batchSize);

                        boolean shouldRolloverJournal = (lastFlushPosition > maxJournalSize);
                        if (syncData) {
                            // Trigger data sync to disk in the "Force-Write" thread.
                            // Callback will be triggered after data is committed to disk
                            forceWriteRequests.put(createForceWriteRequest(logFile, logId, lastFlushPosition,
                                    toFlush, shouldRolloverJournal, false));
                            toFlush = entryListRecycler.newInstance();
                            numEntriesToFlush = 0;
                        } else {
                            // Data is already written on the file (though it might still be in the OS page-cache)
                            lastLogMark.setCurLogMark(logId, lastFlushPosition);
                            toFlush.clear();
                            numEntriesToFlush = 0;
                            if (shouldRolloverJournal) {
                                forceWriteRequests.put(createForceWriteRequest(logFile, logId,
                                        lastFlushPosition, EMPTY_ARRAY_LIST, shouldRolloverJournal, false));
                            }
                        }

                        batchSize = 0L;
                        // check whether journal file is over file limit
                        if (shouldRolloverJournal) {
                            // if the journal file is rolled over, the journal file will be closed after last
                            // entry is force written to disk.
                            logFile = null;
                            continue;
                        }
                    }
                }
            }

            if (!running) {
                LOG.info("Journal Manager is asked to shut down, quit.");
                break;
            }

            if (qe == null) { // no more queue entry
                continue;
            }
            if ((qe.entryId == Bookie.METAENTRY_ID_LEDGER_EXPLICITLAC)
                    && (journalFormatVersionToWrite < JournalChannel.V6)) {
                /*
                 * this means we are using new code which supports
                 * persisting explicitLac, but "journalFormatVersionToWrite"
                 * is set to some older value (< V6). In this case we
                 * shouldn't write this special entry
                 * (METAENTRY_ID_LEDGER_EXPLICITLAC) to Journal.
                 */
                qe.entry.release();
            } else if (qe.entryId != Bookie.METAENTRY_ID_FORCE_LEDGER) {
                int entrySize = qe.entry.readableBytes();
                journalStats.getJournalWriteBytes().add(entrySize);
                journalStats.getJournalQueueSize().dec();

                batchSize += (4 + entrySize);

                lenBuff.clear();
                lenBuff.writeInt(entrySize);

                // preAlloc based on size
                logFile.preAllocIfNeeded(4 + entrySize);

                bc.write(lenBuff);
                bc.write(qe.entry);
                qe.entry.release();
            }

            toFlush.add(qe);
            numEntriesToFlush++;
            qe = null;
        }
    } catch (IOException ioe) {
        LOG.error("I/O exception in Journal thread!", ioe);
    } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
        LOG.info("Journal exits when shutting down");
    } finally {
        // There could be packets queued for forceWrite on this logFile
        // That is fine as this exception is going to anyway take down the
        // the bookie. If we execute this as a part of graceful shutdown,
        // close will flush the file system cache making any previous
        // cached writes durable so this is fine as well.
        IOUtils.close(LOG, bc);
    }
    LOG.info("Journal exited loop!");
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.TransientLedgerInfo.java

License:Apache License

public void setExplicitLac(ByteBuf lac) {
    long explicitLacValue;
    synchronized (this) {
        if (explicitLac == null) {
            explicitLac = ByteBuffer.allocate(lac.capacity());
        }/*from w w w. j  a  v a 2 s. c o  m*/
        lac.readBytes(explicitLac);
        explicitLac.rewind();

        // skip the ledger id
        explicitLac.getLong();
        explicitLacValue = explicitLac.getLong();
        explicitLac.rewind();

        lastAccessed = System.currentTimeMillis();
    }
    setLastAddConfirmed(explicitLacValue);
}