List of usage examples for io.netty.buffer ByteBuf readLong
public abstract long readLong();
From source file:org.apache.bookkeeper.tools.cli.commands.bookie.FormatUtil.java
License:Apache License
/** * Format the message into a readable format. * @param pos/* w w w . j a va 2 s . co m*/ * File offset of the message stored in entry log file * @param recBuff * Entry Data * @param printMsg * Whether printing the message body * @param ledgerIdFormatter * @param entryFormatter */ public static void formatEntry(long pos, ByteBuf recBuff, boolean printMsg, LedgerIdFormatter ledgerIdFormatter, EntryFormatter entryFormatter) { int entrySize = recBuff.readableBytes(); long ledgerId = recBuff.readLong(); long entryId = recBuff.readLong(); System.out.println("--------- Lid=" + ledgerIdFormatter.formatLedgerId(ledgerId) + ", Eid=" + entryId + ", ByteOffset=" + pos + ", EntrySize=" + entrySize + " ---------"); if (entryId == Bookie.METAENTRY_ID_LEDGER_KEY) { int masterKeyLen = recBuff.readInt(); byte[] masterKey = new byte[masterKeyLen]; recBuff.readBytes(masterKey); System.out.println("Type: META"); System.out.println("MasterKey: " + bytes2Hex(masterKey)); System.out.println(); return; } if (entryId == Bookie.METAENTRY_ID_FENCE_KEY) { System.out.println("Type: META"); System.out.println("Fenced"); System.out.println(); return; } // process a data entry long lastAddConfirmed = recBuff.readLong(); System.out.println("Type: DATA"); System.out.println("LastConfirmed: " + lastAddConfirmed); if (!printMsg) { System.out.println(); return; } // skip digest checking recBuff.skipBytes(8); System.out.println("Data:"); System.out.println(); try { byte[] ret = new byte[recBuff.readableBytes()]; recBuff.readBytes(ret); entryFormatter.formatEntry(ret); } catch (Exception e) { System.out.println("N/A. Corrupted."); } System.out.println(); }
From source file:org.apache.flink.runtime.executiongraph.ExecutionAttemptID.java
License:Apache License
public static ExecutionAttemptID fromByteBuf(ByteBuf buf) { long lower = buf.readLong(); long upper = buf.readLong(); return new ExecutionAttemptID(lower, upper); }
From source file:org.apache.flink.runtime.io.network.partition.consumer.InputChannelID.java
License:Apache License
public static InputChannelID fromByteBuf(ByteBuf buf) { long lower = buf.readLong(); long upper = buf.readLong(); return new InputChannelID(lower, upper); }
From source file:org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID.java
License:Apache License
public static IntermediateResultPartitionID fromByteBuf(ByteBuf buf) { long lower = buf.readLong(); long upper = buf.readLong(); return new IntermediateResultPartitionID(lower, upper); }
From source file:org.apache.flink.runtime.query.netty.message.KvStateRequestSerializer.java
License:Apache License
/** * Deserializes the KvState request message. * * <p><strong>Important</strong>: the returned buffer is sliced from the * incoming ByteBuf stream and retained. Therefore, it needs to be recycled * by the consumer.//from ww w . j a v a2 s . com * * @param buf Buffer to deserialize (expected to be positioned after header) * @return Deserialized KvStateRequest */ public static KvStateRequest deserializeKvStateRequest(ByteBuf buf) { long requestId = buf.readLong(); KvStateID kvStateId = new KvStateID(buf.readLong(), buf.readLong()); // Serialized key and namespace int length = buf.readInt(); if (length < 0) { throw new IllegalArgumentException( "Negative length for serialized key and namespace. " + "This indicates a serialization error."); } // Copy the buffer in order to be able to safely recycle the ByteBuf byte[] serializedKeyAndNamespace = new byte[length]; if (length > 0) { buf.readBytes(serializedKeyAndNamespace); } return new KvStateRequest(requestId, kvStateId, serializedKeyAndNamespace); }
From source file:org.apache.flink.runtime.query.netty.message.KvStateRequestSerializer.java
License:Apache License
/** * Deserializes the KvState request result. * * @param buf Buffer to deserialize (expected to be positioned after header) * @return Deserialized KvStateRequestResult *//*from w w w . j a va 2s.c om*/ public static KvStateRequestResult deserializeKvStateRequestResult(ByteBuf buf) { long requestId = buf.readLong(); // Serialized KvState int length = buf.readInt(); if (length < 0) { throw new IllegalArgumentException( "Negative length for serialized result. " + "This indicates a serialization error."); } byte[] serializedValue = new byte[length]; if (length > 0) { buf.readBytes(serializedValue); } return new KvStateRequestResult(requestId, serializedValue); }
From source file:org.apache.flink.runtime.query.netty.message.KvStateRequestSerializer.java
License:Apache License
/** * Deserializes the KvState request failure. * * @param buf Buffer to deserialize (expected to be positioned after header) * @return Deserialized KvStateRequestFailure *//* www. j av a2 s . c o m*/ public static KvStateRequestFailure deserializeKvStateRequestFailure(ByteBuf buf) throws IOException, ClassNotFoundException { long requestId = buf.readLong(); Throwable cause; try (ByteBufInputStream bbis = new ByteBufInputStream(buf); ObjectInputStream in = new ObjectInputStream(bbis)) { cause = (Throwable) in.readObject(); } return new KvStateRequestFailure(requestId, cause); }
From source file:org.apache.giraph.comm.netty.handler.ResponseClientHandler.java
License:Apache License
@Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { if (!(msg instanceof ByteBuf)) { throw new IllegalStateException("messageReceived: Got a " + "non-ByteBuf message " + msg); }// w ww . j a v a 2 s . co m ByteBuf buf = (ByteBuf) msg; int senderId = -1; long requestId = -1; int response = -1; try { senderId = buf.readInt(); requestId = buf.readLong(); response = buf.readByte(); } catch (IndexOutOfBoundsException e) { throw new IllegalStateException("channelRead: Got IndexOutOfBoundsException ", e); } ReferenceCountUtil.release(buf); // Simulate a failed response on the first response (if desired) if (dropFirstResponse && !ALREADY_DROPPED_FIRST_RESPONSE) { LOG.info("messageReceived: Simulating dropped response " + response + " for request " + requestId); setAlreadyDroppedFirstResponse(); synchronized (workerIdOutstandingRequestMap) { workerIdOutstandingRequestMap.notifyAll(); } return; } if (response == 1) { LOG.info("messageReceived: Already completed request (taskId = " + senderId + ", requestId = " + requestId + ")"); } else if (response != 0) { throw new IllegalStateException("messageReceived: Got illegal response " + response); } RequestInfo requestInfo = workerIdOutstandingRequestMap.remove(new ClientRequestId(senderId, requestId)); if (requestInfo == null) { LOG.info("messageReceived: Already received response for (taskId = " + senderId + ", requestId = " + requestId + ")"); } else { if (LOG.isDebugEnabled()) { LOG.debug("messageReceived: Completed (taskId = " + senderId + ")" + requestInfo + ". Waiting on " + workerIdOutstandingRequestMap.size() + " requests"); } } // Help NettyClient#waitSomeRequests() to finish faster synchronized (workerIdOutstandingRequestMap) { workerIdOutstandingRequestMap.notifyAll(); } }
From source file:org.apache.helix.ipc.netty.NettyHelixIPCCallbackHandler.java
License:Apache License
@Override protected void channelRead0(ChannelHandlerContext ctx, ByteBuf byteBuf) throws Exception { try {//from ww w .ja v a2 s . c om // Message length int messageLength = byteBuf.readInt(); // Message version @SuppressWarnings("unused") int messageVersion = byteBuf.readInt(); // Message type int messageType = byteBuf.readInt(); // Message ID UUID messageId = new UUID(byteBuf.readLong(), byteBuf.readLong()); // Cluster int clusterSize = byteBuf.readInt(); checkLength("clusterSize", clusterSize, messageLength); String clusterName = toNonEmptyString(clusterSize, byteBuf); // Resource int resourceSize = byteBuf.readInt(); checkLength("resourceSize", resourceSize, messageLength); String resourceName = toNonEmptyString(resourceSize, byteBuf); // Partition int partitionSize = byteBuf.readInt(); checkLength("partitionSize", partitionSize, messageLength); String partitionName = toNonEmptyString(partitionSize, byteBuf); // State int stateSize = byteBuf.readInt(); checkLength("stateSize", stateSize, messageLength); String state = toNonEmptyString(stateSize, byteBuf); // Source instance int srcInstanceSize = byteBuf.readInt(); checkLength("srcInstanceSize", srcInstanceSize, messageLength); String srcInstance = toNonEmptyString(srcInstanceSize, byteBuf); // Destination instance int dstInstanceSize = byteBuf.readInt(); checkLength("dstInstanceSize", dstInstanceSize, messageLength); String dstInstance = toNonEmptyString(dstInstanceSize, byteBuf); // Message int messageSize = byteBuf.readInt(); ByteBuf message = byteBuf.slice(byteBuf.readerIndex(), messageSize); // Error check if (dstInstance == null) { throw new IllegalStateException( "Received message addressed to null destination from " + srcInstance); } else if (!dstInstance.equals(instanceName)) { throw new IllegalStateException( instanceName + " received message addressed to " + dstInstance + " from " + srcInstance); } else if (callbacks.get(messageType) == null) { throw new IllegalStateException("No callback registered for message type " + messageType); } // Build scope HelixMessageScope scope = new HelixMessageScope.Builder().cluster(clusterName).resource(resourceName) .partition(partitionName).state(state).sourceInstance(srcInstance).build(); // Get callback HelixIPCCallback callback = callbacks.get(messageType); if (callback == null) { throw new IllegalStateException("No callback registered for message type " + messageType); } // Handle callback callback.onMessage(scope, messageId, message); // Stats statRxMsg.mark(); statRxBytes.mark(messageLength); } finally { byteBuf.release(); } }
From source file:org.apache.jackrabbit.oak.plugins.segment.standby.codec.ReplyDecoder.java
License:Apache License
private Segment decodeSegment(ByteBuf in, int len, byte type) { long msb = in.readLong(); long lsb = in.readLong(); long hash = in.readLong(); // #readBytes throws a 'REPLAY' exception if there are not enough bytes // available for reading ByteBuf data = in.readBytes(len - 25); byte[] segment; if (data.hasArray()) { segment = data.array();// ww w.j a v a 2 s. c o m } else { segment = new byte[len - 25]; in.readBytes(segment); } Hasher hasher = Hashing.murmur3_32().newHasher(); long check = hasher.putBytes(segment).hash().padToLong(); if (hash == check) { SegmentId id = new SegmentId(store.getTracker(), msb, lsb); Segment s = new Segment(store.getTracker(), id, ByteBuffer.wrap(segment)); log.debug("received segment with id {} and size {}", id, s.size()); return s; } log.debug("received corrupted segment {}, ignoring", new UUID(msb, lsb)); return null; }