List of usage examples for io.netty.buffer ByteBuf writerIndex
public abstract ByteBuf writerIndex(int writerIndex);
From source file:org.apache.activemq.artemis.core.message.impl.MessageImpl.java
License:Apache License
@Override public synchronized ActiveMQBuffer getBodyBufferDuplicate() { // Must copy buffer before sending it ByteBuf byteBuf = ChannelBufferWrapper.unwrap(getBodyBuffer().byteBuf()); byteBuf = byteBuf.duplicate();/* w ww. ja va2s . c o m*/ byteBuf.readerIndex(getBodyBuffer().readerIndex()); byteBuf.writerIndex(getBodyBuffer().writerIndex()); return new ResetLimitWrappedActiveMQBuffer(BODY_OFFSET, byteBuf, null); }
From source file:org.apache.activemq.artemis.core.replication.ReplicationManager.java
License:Apache License
/** * Sends large files in reasonably sized chunks to the backup during replication synchronization. * * @param content journal type or {@code null} for large-messages and pages * @param pageStore page store name for pages, or {@code null} otherwise * @param id journal file id or (large) message id * @param file/*from w ww.j a v a 2 s .c o m*/ * @param maxBytesToSend maximum number of bytes to read and send from the file * @throws Exception */ private void sendLargeFile(AbstractJournalStorageManager.JournalContent content, SimpleString pageStore, final long id, SequentialFile file, long maxBytesToSend) throws Exception { if (!enabled) return; if (!file.isOpen()) { file.open(); } int size = 32 * 1024; int flowControlSize = 10; int packetsSent = 0; FlushAction action = new FlushAction(); try { try (FileInputStream fis = new FileInputStream(file.getJavaFile()); FileChannel channel = fis.getChannel()) { // We can afford having a single buffer here for this entire loop // because sendReplicatePacket will encode the packet as a NettyBuffer // through ActiveMQBuffer class leaving this buffer free to be reused on the next copy while (true) { final ByteBuf buffer = PooledByteBufAllocator.DEFAULT.directBuffer(size, size); buffer.clear(); ByteBuffer byteBuffer = buffer.writerIndex(size).readerIndex(0).nioBuffer(); final int bytesRead = channel.read(byteBuffer); int toSend = bytesRead; if (bytesRead > 0) { if (bytesRead >= maxBytesToSend) { toSend = (int) maxBytesToSend; maxBytesToSend = 0; } else { maxBytesToSend = maxBytesToSend - bytesRead; } } logger.debug("sending " + buffer.writerIndex() + " bytes on file " + file.getFileName()); // sending -1 or 0 bytes will close the file at the backup // We cannot simply send everything of a file through the executor, // otherwise we would run out of memory. // so we don't use the executor here sendReplicatePacket(new ReplicationSyncFileMessage(content, pageStore, id, toSend, buffer), true); packetsSent++; if (packetsSent % flowControlSize == 0) { flushReplicationStream(action); } if (bytesRead == -1 || bytesRead == 0 || maxBytesToSend == 0) break; } } flushReplicationStream(action); } finally { if (file.isOpen()) file.close(); } }
From source file:org.apache.activemq.artemis.protocol.amqp.util.NettyWritableTest.java
License:Apache License
@Test public void testHasRemaining() { ByteBuf buffer = Unpooled.buffer(100, 100); NettyWritable writable = new NettyWritable(buffer); assertTrue(writable.hasRemaining()); writable.put((byte) 0); assertTrue(writable.hasRemaining()); buffer.writerIndex(buffer.maxCapacity()); assertFalse(writable.hasRemaining()); }
From source file:org.apache.arrow.vector.VectorTrimmer.java
License:Apache License
public static void trim(ByteBuf data, int idx) { data.writerIndex(idx); if (data instanceof ArrowBuf) { // data.capacity(idx); data.writerIndex(idx);// w w w . jav a 2 s . c o m } }
From source file:org.apache.bookkeeper.benchmark.BenchBookie.java
License:Apache License
/** * @param args// w ww .j a va2 s . com * @throws InterruptedException */ public static void main(String[] args) throws InterruptedException, ParseException, IOException, BKException, KeeperException { Options options = new Options(); options.addOption("host", true, "Hostname or IP of bookie to benchmark"); options.addOption("port", true, "Port of bookie to benchmark (default 3181)"); options.addOption("zookeeper", true, "Zookeeper ensemble, (default \"localhost:2181\")"); options.addOption("size", true, "Size of message to send, in bytes (default 1024)"); options.addOption("warmupCount", true, "Number of messages in warmup phase (default 999)"); options.addOption("latencyCount", true, "Number of messages in latency phase (default 5000)"); options.addOption("throughputCount", true, "Number of messages in throughput phase (default 50000)"); options.addOption("help", false, "This message"); CommandLineParser parser = new PosixParser(); CommandLine cmd = parser.parse(options, args); if (cmd.hasOption("help") || !cmd.hasOption("host")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("BenchBookie <options>", options); System.exit(-1); } String addr = cmd.getOptionValue("host"); int port = Integer.parseInt(cmd.getOptionValue("port", "3181")); int size = Integer.parseInt(cmd.getOptionValue("size", "1024")); String servers = cmd.getOptionValue("zookeeper", "localhost:2181"); int warmUpCount = Integer.parseInt(cmd.getOptionValue("warmupCount", "999")); int latencyCount = Integer.parseInt(cmd.getOptionValue("latencyCount", "5000")); int throughputCount = Integer.parseInt(cmd.getOptionValue("throughputCount", "50000")); EventLoopGroup eventLoop; if (SystemUtils.IS_OS_LINUX) { try { eventLoop = new EpollEventLoopGroup(); } catch (Throwable t) { LOG.warn("Could not use Netty Epoll event loop for benchmark {}", t.getMessage()); eventLoop = new NioEventLoopGroup(); } } else { eventLoop = new NioEventLoopGroup(); } OrderedExecutor executor = OrderedExecutor.newBuilder().name("BenchBookieClientScheduler").numThreads(1) .build(); ScheduledExecutorService scheduler = Executors .newSingleThreadScheduledExecutor(new DefaultThreadFactory("BookKeeperClientScheduler")); ClientConfiguration conf = new ClientConfiguration(); BookieClient bc = new BookieClientImpl(conf, eventLoop, PooledByteBufAllocator.DEFAULT, executor, scheduler, NullStatsLogger.INSTANCE); LatencyCallback lc = new LatencyCallback(); ThroughputCallback tc = new ThroughputCallback(); long ledger = getValidLedgerId(servers); for (long entry = 0; entry < warmUpCount; entry++) { ByteBuf toSend = Unpooled.buffer(size); toSend.resetReaderIndex(); toSend.resetWriterIndex(); toSend.writeLong(ledger); toSend.writeLong(entry); toSend.writerIndex(toSend.capacity()); bc.addEntry(new BookieSocketAddress(addr, port), ledger, new byte[20], entry, ByteBufList.get(toSend), tc, null, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE); } LOG.info("Waiting for warmup"); tc.waitFor(warmUpCount); ledger = getValidLedgerId(servers); LOG.info("Benchmarking latency"); long startTime = System.nanoTime(); for (long entry = 0; entry < latencyCount; entry++) { ByteBuf toSend = Unpooled.buffer(size); toSend.resetReaderIndex(); toSend.resetWriterIndex(); toSend.writeLong(ledger); toSend.writeLong(entry); toSend.writerIndex(toSend.capacity()); lc.resetComplete(); bc.addEntry(new BookieSocketAddress(addr, port), ledger, new byte[20], entry, ByteBufList.get(toSend), lc, null, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE); lc.waitForComplete(); } long endTime = System.nanoTime(); LOG.info("Latency: " + (((double) (endTime - startTime)) / ((double) latencyCount)) / 1000000.0); ledger = getValidLedgerId(servers); LOG.info("Benchmarking throughput"); startTime = System.currentTimeMillis(); tc = new ThroughputCallback(); for (long entry = 0; entry < throughputCount; entry++) { ByteBuf toSend = Unpooled.buffer(size); toSend.resetReaderIndex(); toSend.resetWriterIndex(); toSend.writeLong(ledger); toSend.writeLong(entry); toSend.writerIndex(toSend.capacity()); bc.addEntry(new BookieSocketAddress(addr, port), ledger, new byte[20], entry, ByteBufList.get(toSend), tc, null, BookieProtocol.FLAG_NONE, false, WriteFlag.NONE); } tc.waitFor(throughputCount); endTime = System.currentTimeMillis(); LOG.info("Throughput: " + ((long) throughputCount) * 1000 / (endTime - startTime)); bc.close(); scheduler.shutdown(); eventLoop.shutdownGracefully(); executor.shutdown(); }
From source file:org.apache.bookkeeper.bookie.CheckpointOnNewLedgersTest.java
License:Apache License
private static ByteBuf createByteBuf(long ledgerId, long entryId, int entrySize) { byte[] data = new byte[entrySize]; ThreadLocalRandom.current().nextBytes(data); ByteBuf buffer = Unpooled.wrappedBuffer(data); buffer.writerIndex(0); buffer.writeLong(ledgerId);//from w w w .java 2 s .c o m buffer.writeLong(entryId); buffer.writeLong(entryId - 1); // lac buffer.writerIndex(entrySize); return buffer; }
From source file:org.apache.bookkeeper.bookie.EntryLogger.java
License:Apache License
public ByteBuf internalReadEntry(long ledgerId, long entryId, long location) throws IOException, Bookie.NoEntryException { long entryLogId = logIdForOffset(location); long pos = posForOffset(location); final EntryLogEntry entry; try {// w w w . j a va2 s .c o m entry = getFCForEntryInternal(ledgerId, entryId, entryLogId, pos); } catch (EntryLookupException.MissingEntryException entryLookupError) { throw new Bookie.NoEntryException("Short read from entrylog " + entryLogId, ledgerId, entryId); } catch (EntryLookupException e) { throw new IOException(e.toString()); } ByteBuf data = allocator.buffer(entry.entrySize, entry.entrySize); int rc = readFromLogChannel(entryLogId, entry.fc, data, pos); if (rc != entry.entrySize) { // Note that throwing NoEntryException here instead of IOException is not // without risk. If all bookies in a quorum throw this same exception // the client will assume that it has reached the end of the ledger. // However, this may not be the case, as a very specific error condition // could have occurred, where the length of the entry was corrupted on all // replicas. However, the chance of this happening is very very low, so // returning NoEntryException is mostly safe. data.release(); throw new Bookie.NoEntryException("Short read for " + ledgerId + "@" + entryId + " in " + entryLogId + "@" + pos + "(" + rc + "!=" + entry.entrySize + ")", ledgerId, entryId); } data.writerIndex(entry.entrySize); return data; }
From source file:org.apache.bookkeeper.bookie.Journal.java
License:Apache License
static void writePaddingBytes(JournalChannel jc, ByteBuf paddingBuffer, int journalAlignSize) throws IOException { int bytesToAlign = (int) (jc.bc.position() % journalAlignSize); if (0 != bytesToAlign) { int paddingBytes = journalAlignSize - bytesToAlign; if (paddingBytes < 8) { paddingBytes = journalAlignSize - (8 - paddingBytes); } else {//from w w w .j av a 2 s . c om paddingBytes -= 8; } paddingBuffer.clear(); // padding mask paddingBuffer.writeInt(PADDING_MASK); // padding len paddingBuffer.writeInt(paddingBytes); // padding bytes paddingBuffer.writerIndex(paddingBuffer.writerIndex() + paddingBytes); jc.preAllocIfNeeded(paddingBuffer.readableBytes()); // write padding bytes jc.bc.write(paddingBuffer); } }
From source file:org.apache.bookkeeper.bookie.storage.ldb.WriteCacheTest.java
License:Apache License
@Test public void simple() throws Exception { WriteCache cache = new WriteCache(allocator, 10 * 1024); ByteBuf entry1 = allocator.buffer(1024); ByteBufUtil.writeUtf8(entry1, "entry-1"); entry1.writerIndex(entry1.capacity()); assertTrue(cache.isEmpty());/*from w w w . j a v a 2s . c o m*/ assertEquals(0, cache.count()); assertEquals(0, cache.size()); cache.put(1, 1, entry1); assertFalse(cache.isEmpty()); assertEquals(1, cache.count()); assertEquals(entry1.readableBytes(), cache.size()); assertEquals(entry1, cache.get(1, 1)); assertNull(cache.get(1, 2)); assertNull(cache.get(2, 1)); assertEquals(entry1, cache.getLastEntry(1)); assertEquals(null, cache.getLastEntry(2)); cache.clear(); assertTrue(cache.isEmpty()); assertEquals(0, cache.count()); assertEquals(0, cache.size()); entry1.release(); cache.close(); }
From source file:org.apache.bookkeeper.bookie.storage.ldb.WriteCacheTest.java
License:Apache License
@Test public void cacheFull() throws Exception { int cacheSize = 10 * 1024; int entrySize = 1024; int entriesCount = cacheSize / entrySize; WriteCache cache = new WriteCache(allocator, cacheSize); ByteBuf entry = allocator.buffer(entrySize); entry.writerIndex(entry.capacity()); for (int i = 0; i < entriesCount; i++) { assertTrue(cache.put(1, i, entry)); }/*ww w . j a va2 s.com*/ assertFalse(cache.put(1, 11, entry)); assertFalse(cache.isEmpty()); assertEquals(entriesCount, cache.count()); assertEquals(cacheSize, cache.size()); AtomicInteger findCount = new AtomicInteger(0); cache.forEach((ledgerId, entryId, data) -> { findCount.incrementAndGet(); }); assertEquals(entriesCount, findCount.get()); cache.deleteLedger(1); findCount.set(0); cache.forEach((ledgerId, entryId, data) -> { findCount.incrementAndGet(); }); assertEquals(0, findCount.get()); entry.release(); cache.close(); }