Example usage for io.netty.buffer ByteBuf writeZero

List of usage examples for io.netty.buffer ByteBuf writeZero

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf writeZero.

Prototype

public abstract ByteBuf writeZero(int length);

Source Link

Document

Fills this buffer with NUL (0x00) starting at the current writerIndex and increases the writerIndex by the specified length .

Usage

From source file:net.tomp2p.rpc.SimpleBloomFilter.java

License:Apache License

/**
 * Converts data to a byte buffer. The first two bytes contain the size of
 * this simple bloom filter. Thus, the bloom filter can only be of length
 * 65536.// ww w  . j a v a2  s. c  o  m
 * 
 * @param buf
 *            The byte buffer where the bloom filter will be written.
 */
public void toByteBuf(final ByteBuf buf) {
    buf.writeShort(byteArraySize + SIZE_HEADER_ELEMENTS + SIZE_HEADER_LENGTH);
    buf.writeInt(expectedElements);
    byte[] tmp = RPCUtils.toByteArray(bitSet);
    int currentByteArraySize = tmp.length;
    buf.writeBytes(tmp);
    buf.writeZero(byteArraySize - currentByteArraySize);
}

From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java

License:Apache License

private JournalChannel writeV5Journal(File journalDir, int numEntries, byte[] masterKey) throws Exception {
    long logId = System.currentTimeMillis();
    JournalChannel jc = new JournalChannel(journalDir, logId);

    BufferedChannel bc = jc.getBufferedChannel();

    ByteBuf paddingBuff = Unpooled.buffer();
    paddingBuff.writeZero(2 * JournalChannel.SECTOR_SIZE);
    byte[] data = new byte[4 * 1024 * 1024];
    Arrays.fill(data, (byte) 'X');
    long lastConfirmed = LedgerHandle.INVALID_ENTRY_ID;
    long length = 0;
    for (int i = 0; i <= numEntries; i++) {
        ByteBuf packet;/* w  ww. j a v a 2  s .  c o  m*/
        if (i == 0) {
            packet = generateMetaEntry(1, masterKey);
        } else {
            packet = ClientUtil.generatePacket(1, i, lastConfirmed, length, data, 0, i);
        }
        lastConfirmed = i;
        length += i;
        ByteBuf lenBuff = Unpooled.buffer();
        lenBuff.writeInt(packet.readableBytes());
        bc.write(lenBuff);
        bc.write(packet);
        packet.release();
        Journal.writePaddingBytes(jc, paddingBuff, JournalChannel.SECTOR_SIZE);
    }
    // write fence key
    ByteBuf packet = generateFenceEntry(1);
    ByteBuf lenBuf = Unpooled.buffer();
    lenBuf.writeInt(packet.readableBytes());
    bc.write(lenBuf);
    bc.write(packet);
    Journal.writePaddingBytes(jc, paddingBuff, JournalChannel.SECTOR_SIZE);
    bc.flushAndForceWrite(false);
    updateJournalVersion(jc, JournalChannel.V5);
    return jc;
}

From source file:org.apache.bookkeeper.bookie.Journal.java

License:Apache License

/**
 * A thread used for persisting journal entries to journal files.
 *
 * <p>//from ww  w .jav  a  2 s.c  om
 * Besides persisting journal entries, it also takes responsibility of
 * rolling journal files when a journal file reaches journal file size
 * limitation.
 * </p>
 * <p>
 * During journal rolling, it first closes the writing journal, generates
 * new journal file using current timestamp, and continue persistence logic.
 * Those journals will be garbage collected in SyncThread.
 * </p>
 * @see org.apache.bookkeeper.bookie.SyncThread
 */
@Override
public void run() {
    LOG.info("Starting journal on {}", journalDirectory);

    if (conf.isBusyWaitEnabled()) {
        try {
            CpuAffinity.acquireCore();
        } catch (Exception e) {
            LOG.warn("Unable to acquire CPU core for Journal thread: {}", e.getMessage(), e);
        }
    }

    RecyclableArrayList<QueueEntry> toFlush = entryListRecycler.newInstance();
    int numEntriesToFlush = 0;
    ByteBuf lenBuff = Unpooled.buffer(4);
    ByteBuf paddingBuff = Unpooled.buffer(2 * conf.getJournalAlignmentSize());
    paddingBuff.writeZero(paddingBuff.capacity());

    BufferedChannel bc = null;
    JournalChannel logFile = null;
    forceWriteThread.start();
    Stopwatch journalCreationWatcher = Stopwatch.createUnstarted();
    Stopwatch journalFlushWatcher = Stopwatch.createUnstarted();
    long batchSize = 0;
    try {
        List<Long> journalIds = listJournalIds(journalDirectory, null);
        // Should not use MathUtils.now(), which use System.nanoTime() and
        // could only be used to measure elapsed time.
        // http://docs.oracle.com/javase/1.5.0/docs/api/java/lang/System.html#nanoTime%28%29
        long logId = journalIds.isEmpty() ? System.currentTimeMillis() : journalIds.get(journalIds.size() - 1);
        long lastFlushPosition = 0;
        boolean groupWhenTimeout = false;

        long dequeueStartTime = 0L;

        QueueEntry qe = null;
        while (true) {
            // new journal file to write
            if (null == logFile) {

                logId = logId + 1;

                journalCreationWatcher.reset().start();
                logFile = new JournalChannel(journalDirectory, logId, journalPreAllocSize,
                        journalWriteBufferSize, journalAlignmentSize, removePagesFromCache,
                        journalFormatVersionToWrite, getBufferedChannelBuilder());

                journalStats.getJournalCreationStats().registerSuccessfulEvent(
                        journalCreationWatcher.stop().elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);

                bc = logFile.getBufferedChannel();

                lastFlushPosition = bc.position();
            }

            if (qe == null) {
                if (dequeueStartTime != 0) {
                    journalStats.getJournalProcessTimeStats().registerSuccessfulEvent(
                            MathUtils.elapsedNanos(dequeueStartTime), TimeUnit.NANOSECONDS);
                }

                if (numEntriesToFlush == 0) {
                    qe = queue.take();
                    dequeueStartTime = MathUtils.nowInNano();
                    journalStats.getJournalQueueStats().registerSuccessfulEvent(
                            MathUtils.elapsedNanos(qe.enqueueTime), TimeUnit.NANOSECONDS);
                } else {
                    long pollWaitTimeNanos = maxGroupWaitInNanos
                            - MathUtils.elapsedNanos(toFlush.get(0).enqueueTime);
                    if (flushWhenQueueEmpty || pollWaitTimeNanos < 0) {
                        pollWaitTimeNanos = 0;
                    }
                    qe = queue.poll(pollWaitTimeNanos, TimeUnit.NANOSECONDS);
                    dequeueStartTime = MathUtils.nowInNano();

                    if (qe != null) {
                        journalStats.getJournalQueueStats().registerSuccessfulEvent(
                                MathUtils.elapsedNanos(qe.enqueueTime), TimeUnit.NANOSECONDS);
                    }

                    boolean shouldFlush = false;
                    // We should issue a forceWrite if any of the three conditions below holds good
                    // 1. If the oldest pending entry has been pending for longer than the max wait time
                    if (maxGroupWaitInNanos > 0 && !groupWhenTimeout
                            && (MathUtils.elapsedNanos(toFlush.get(0).enqueueTime) > maxGroupWaitInNanos)) {
                        groupWhenTimeout = true;
                    } else if (maxGroupWaitInNanos > 0 && groupWhenTimeout && qe != null
                            && MathUtils.elapsedNanos(qe.enqueueTime) < maxGroupWaitInNanos) {
                        // when group timeout, it would be better to look forward, as there might be lots of
                        // entries already timeout
                        // due to a previous slow write (writing to filesystem which impacted by force write).
                        // Group those entries in the queue
                        // a) already timeout
                        // b) limit the number of entries to group
                        groupWhenTimeout = false;
                        shouldFlush = true;
                        journalStats.getFlushMaxWaitCounter().inc();
                    } else if (qe != null
                            && ((bufferedEntriesThreshold > 0 && toFlush.size() > bufferedEntriesThreshold)
                                    || (bc.position() > lastFlushPosition + bufferedWritesThreshold))) {
                        // 2. If we have buffered more than the buffWriteThreshold or bufferedEntriesThreshold
                        shouldFlush = true;
                        journalStats.getFlushMaxOutstandingBytesCounter().inc();
                    } else if (qe == null) {
                        // We should get here only if we flushWhenQueueEmpty is true else we would wait
                        // for timeout that would put is past the maxWait threshold
                        // 3. If the queue is empty i.e. no benefit of grouping. This happens when we have one
                        // publish at a time - common case in tests.
                        shouldFlush = true;
                        journalStats.getFlushEmptyQueueCounter().inc();
                    }

                    // toFlush is non null and not empty so should be safe to access getFirst
                    if (shouldFlush) {
                        if (journalFormatVersionToWrite >= JournalChannel.V5) {
                            writePaddingBytes(logFile, paddingBuff, journalAlignmentSize);
                        }
                        journalFlushWatcher.reset().start();
                        bc.flush();

                        for (int i = 0; i < toFlush.size(); i++) {
                            QueueEntry entry = toFlush.get(i);
                            if (entry != null && (!syncData || entry.ackBeforeSync)) {
                                toFlush.set(i, null);
                                numEntriesToFlush--;
                                cbThreadPool.execute(entry);
                            }
                        }

                        lastFlushPosition = bc.position();
                        journalStats.getJournalFlushStats().registerSuccessfulEvent(
                                journalFlushWatcher.stop().elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);

                        // Trace the lifetime of entries through persistence
                        if (LOG.isDebugEnabled()) {
                            for (QueueEntry e : toFlush) {
                                if (e != null) {
                                    LOG.debug("Written and queuing for flush Ledger: {}  Entry: {}", e.ledgerId,
                                            e.entryId);
                                }
                            }
                        }

                        journalStats.getForceWriteBatchEntriesStats()
                                .registerSuccessfulValue(numEntriesToFlush);
                        journalStats.getForceWriteBatchBytesStats().registerSuccessfulValue(batchSize);

                        boolean shouldRolloverJournal = (lastFlushPosition > maxJournalSize);
                        if (syncData) {
                            // Trigger data sync to disk in the "Force-Write" thread.
                            // Callback will be triggered after data is committed to disk
                            forceWriteRequests.put(createForceWriteRequest(logFile, logId, lastFlushPosition,
                                    toFlush, shouldRolloverJournal, false));
                            toFlush = entryListRecycler.newInstance();
                            numEntriesToFlush = 0;
                        } else {
                            // Data is already written on the file (though it might still be in the OS page-cache)
                            lastLogMark.setCurLogMark(logId, lastFlushPosition);
                            toFlush.clear();
                            numEntriesToFlush = 0;
                            if (shouldRolloverJournal) {
                                forceWriteRequests.put(createForceWriteRequest(logFile, logId,
                                        lastFlushPosition, EMPTY_ARRAY_LIST, shouldRolloverJournal, false));
                            }
                        }

                        batchSize = 0L;
                        // check whether journal file is over file limit
                        if (shouldRolloverJournal) {
                            // if the journal file is rolled over, the journal file will be closed after last
                            // entry is force written to disk.
                            logFile = null;
                            continue;
                        }
                    }
                }
            }

            if (!running) {
                LOG.info("Journal Manager is asked to shut down, quit.");
                break;
            }

            if (qe == null) { // no more queue entry
                continue;
            }
            if ((qe.entryId == Bookie.METAENTRY_ID_LEDGER_EXPLICITLAC)
                    && (journalFormatVersionToWrite < JournalChannel.V6)) {
                /*
                 * this means we are using new code which supports
                 * persisting explicitLac, but "journalFormatVersionToWrite"
                 * is set to some older value (< V6). In this case we
                 * shouldn't write this special entry
                 * (METAENTRY_ID_LEDGER_EXPLICITLAC) to Journal.
                 */
                qe.entry.release();
            } else if (qe.entryId != Bookie.METAENTRY_ID_FORCE_LEDGER) {
                int entrySize = qe.entry.readableBytes();
                journalStats.getJournalWriteBytes().add(entrySize);
                journalStats.getJournalQueueSize().dec();

                batchSize += (4 + entrySize);

                lenBuff.clear();
                lenBuff.writeInt(entrySize);

                // preAlloc based on size
                logFile.preAllocIfNeeded(4 + entrySize);

                bc.write(lenBuff);
                bc.write(qe.entry);
                qe.entry.release();
            }

            toFlush.add(qe);
            numEntriesToFlush++;
            qe = null;
        }
    } catch (IOException ioe) {
        LOG.error("I/O exception in Journal thread!", ioe);
    } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
        LOG.info("Journal exits when shutting down");
    } finally {
        // There could be packets queued for forceWrite on this logFile
        // That is fine as this exception is going to anyway take down the
        // the bookie. If we execute this as a part of graceful shutdown,
        // close will flush the file system cache making any previous
        // cached writes durable so this is fine as well.
        IOUtils.close(LOG, bc);
    }
    LOG.info("Journal exited loop!");
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorageWriteCacheTest.java

License:Apache License

@Test
public void writeCacheFull() throws Exception {
    storage.setMasterKey(4, "key".getBytes());
    assertEquals(false, storage.isFenced(4));
    assertEquals(true, storage.ledgerExists(4));

    assertEquals("key", new String(storage.readMasterKey(4)));

    // Add enough entries to fill the 1st write cache
    for (int i = 0; i < 5; i++) {
        ByteBuf entry = Unpooled.buffer(100 * 1024 + 2 * 8);
        entry.writeLong(4); // ledger id
        entry.writeLong(i); // entry id
        entry.writeZero(100 * 1024);
        storage.addEntry(entry);/*w w w.  ja va 2s . com*/
    }

    for (int i = 0; i < 5; i++) {
        ByteBuf entry = Unpooled.buffer(100 * 1024 + 2 * 8);
        entry.writeLong(4); // ledger id
        entry.writeLong(5 + i); // entry id
        entry.writeZero(100 * 1024);
        storage.addEntry(entry);
    }

    // Next add should fail for cache full
    ByteBuf entry = Unpooled.buffer(100 * 1024 + 2 * 8);
    entry.writeLong(4); // ledger id
    entry.writeLong(22); // entry id
    entry.writeZero(100 * 1024);

    try {
        storage.addEntry(entry);
        fail("Should have thrown exception");
    } catch (OperationRejectedException e) {
        // Expected
    }
}

From source file:org.dcache.xrootd.protocol.messages.AuthenticationResponse.java

License:Open Source License

/**
 * Code is shared by outbound authentication request used by tpc client.
 *///from ww  w  . j  a  va2  s. co  m
public static void writeBytes(ByteBuf buffer, String protocol, int step, List<XrootdBucket> buckets) {
    byte[] bytes = protocol.getBytes(US_ASCII);
    buffer.writeBytes(bytes);
    /* protocol must be 0-padded to 4 bytes */
    buffer.writeZero(4 - bytes.length);

    buffer.writeInt(step);
    for (XrootdBucket bucket : buckets) {
        bucket.serialize(buffer);
    }

    buffer.writeInt(BucketType.kXRS_none.getCode());
}

From source file:org.dcache.xrootd.protocol.messages.OpenResponse.java

License:Open Source License

@Override
protected void getBytes(ByteBuf buffer) {
    buffer.writeInt(fileHandle);//w ww  .  j  a v  a2s .c om

    if (cpsize != null && cptype != null) {
        buffer.writeInt(cpsize);
        int len = Math.min(cptype.length(), 4);
        buffer.writeBytes(cptype.getBytes(US_ASCII), 0, len);
        buffer.writeZero(4 - len);
    } else if (fs != null) {
        buffer.writeZero(8);
    }

    if (fs != null) {
        buffer.writeBytes(fs.toString().getBytes(US_ASCII));
        buffer.writeByte('\0');
    }
}

From source file:org.dcache.xrootd.security.NestedBucketBuffer.java

License:Open Source License

@Override
/**/*w ww  . ja  v  a 2 s .co m*/
 * Serialize all the buckets in that buffer to an outputstream.
 *
 * @param out The ByteBuf to which this buffer will be serialized
 */
public void serialize(ByteBuf out) {

    super.serialize(out);

    //
    // The nesting is a bit tricky. First, we skip 4 bytes (here we store later the
    // size of the nested serialized bucket buffer, which we don't know yet). Then, we
    // serialize the nested bucket buffer and store it in the bytebuffer. Then we jump
    // back to the previously marked position and store the size of the nested bucket buffer.
    //
    int start = out.writerIndex();
    out.writeInt(0); // placeholder value

    /* the protocol is be 0-padded to 4 bytes */
    byte[] protocol = _protocol.getBytes(US_ASCII);
    out.writeBytes(protocol);
    out.writeZero(4 - protocol.length);

    out.writeInt(_step);

    for (XrootdBucket bucket : _nestedBuckets.values()) {
        bucket.serialize(out);
    }

    out.writeInt(BucketType.kXRS_none.getCode());

    out.setInt(start, out.writerIndex() - start - 4);
}

From source file:org.dcache.xrootd.tpc.protocol.messages.OutboundAuthenticationRequest.java

License:Open Source License

@Override
protected void getParams(ByteBuf buffer) {
    // pad ... skip the 16 bytes
    buffer.writeZero(16);
    buffer.writeInt(12 + length);//  www  .  ja  va  2  s .  co m
    writeBytes(buffer, protocol, step, buckets);
}

From source file:org.eclipse.neoscada.protocol.iec60870.apci.APDUEncoder.java

License:Open Source License

private void handleUFormat(final UnnumberedControl msg, final ByteBuf out) {
    out.ensureWritable(6);//from  w w w. j a v  a 2s .  co  m
    out.writeByte(Constants.START_BYTE);
    out.writeByte(4);
    out.writeByte(msg.getFunction().getNumericValue() | 0x03 /* bits 1 and 2*/);
    out.writeZero(3);
}

From source file:org.jfxvnc.net.rfb.codec.encoder.ClientCutTextEncoder.java

License:Apache License

@Override
protected void encode(ChannelHandlerContext ctx, ClientCutText msg, List<Object> out) throws Exception {
    byte[] text = msg.getText().getBytes(StandardCharsets.ISO_8859_1);
    ByteBuf buf = ctx.alloc().buffer(8 + text.length);
    buf.writeByte(ClientEventType.CLIENT_CUT_TEXT);
    buf.writeZero(3);
    buf.writeInt(text.length);//  w  w w  .j a v a2 s. c o  m
    buf.writeBytes(text);

    out.add(buf);
}