Example usage for java.nio ByteBuffer remaining

List of usage examples for java.nio ByteBuffer remaining

Introduction

In this page you can find the example usage for java.nio ByteBuffer remaining.

Prototype

public final int remaining() 

Source Link

Document

Returns the number of remaining elements in this buffer, that is limit - position .

Usage

From source file:org.apache.hadoop.hbase.ipc.CellBlockBuilder.java

private ByteBuffer decompress(CompressionCodec compressor, ByteBuffer cellBlock) throws IOException {
    // GZIPCodec fails w/ NPE if no configuration.
    if (compressor instanceof Configurable) {
        ((Configurable) compressor).setConf(this.conf);
    }//  w ww .ja  v a 2 s .com
    Decompressor poolDecompressor = CodecPool.getDecompressor(compressor);
    CompressionInputStream cis = compressor.createInputStream(new ByteBufferInputStream(cellBlock),
            poolDecompressor);
    ByteBufferOutputStream bbos;
    try {
        // TODO: This is ugly. The buffer will be resized on us if we guess wrong.
        // TODO: Reuse buffers.
        bbos = new ByteBufferOutputStream(cellBlock.remaining() * this.cellBlockDecompressionMultiplier);
        IOUtils.copy(cis, bbos);
        bbos.close();
        cellBlock = bbos.getByteBuffer();
    } finally {
        CodecPool.returnDecompressor(poolDecompressor);
    }
    return cellBlock;
}

From source file:org.apache.hadoop.fs.TestEnhancedByteBufferAccess.java

@Test
public void testZeroCopyReads() throws Exception {
    HdfsConfiguration conf = initZeroCopyTest();
    MiniDFSCluster cluster = null;/*from   w w  w  .ja v a2 s. co  m*/
    final Path TEST_PATH = new Path("/a");
    FSDataInputStream fsIn = null;
    final int TEST_FILE_LENGTH = 3 * BLOCK_SIZE;

    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short) 1, 7567L);
        try {
            DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
        } catch (InterruptedException e) {
            Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
        } catch (TimeoutException e) {
            Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
        }
        fsIn = fs.open(TEST_PATH);
        byte original[] = new byte[TEST_FILE_LENGTH];
        IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
        fsIn.close();
        fsIn = fs.open(TEST_PATH);
        ByteBuffer result = fsIn.read(null, BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.assertEquals(BLOCK_SIZE, result.remaining());
        HdfsDataInputStream dfsIn = (HdfsDataInputStream) fsIn;
        Assert.assertEquals(BLOCK_SIZE, dfsIn.getReadStatistics().getTotalBytesRead());
        Assert.assertEquals(BLOCK_SIZE, dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
        Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE), byteBufferToArray(result));
        fsIn.releaseBuffer(result);
    } finally {
        if (fsIn != null)
            fsIn.close();
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
    }
}

From source file:org.commoncrawl.hadoop.mergeutils.SequenceFileSpillWriter.java

public SequenceFileSpillWriter(FileSystem fileSystem, Configuration conf, Path outputFilePath,
        Class<KeyType> keyClass, Class<ValueType> valueClass,
        SequenceFileIndexWriter<KeyType, ValueType> optionalIndexWriter, CompressionCodec codec,
        short replicationFactor) throws IOException {

    _bufferQueue = new LinkedBlockingQueue<QueuedBufferItem>(
            conf.getInt(QUEUE_CAPACITY_PARAM, BUFFER_QUEUE_CAPACITY));
    _spillBufferSize = conf.getInt(SPILL_WRITER_BUFFER_SIZE_PARAM, DEFAULT_SPILL_BUFFER_SIZE);
    _outputStream = fileSystem.create(outputFilePath, true, 10 * 1024 * 1024, replicationFactor,
            fileSystem.getDefaultBlockSize());
    // allocate buffer ...
    _activeBuffer = ByteBuffer.allocate(_spillBufferSize);
    // assign index writer ..
    _indexWriter = optionalIndexWriter;/*w w  w .  ja  v  a2s .  c o  m*/

    if (codec != null) {
        writer = SequenceFile.createWriter(conf, _outputStream, keyClass, valueClass, CompressionType.BLOCK,
                codec);
    } else {
        writer = SequenceFile.createWriter(conf, _outputStream, keyClass, valueClass, CompressionType.NONE,
                null);
    }

    _writerThread = new Thread(new Runnable() {

        @Override
        public void run() {
            // LOG.info("Writer Thread Starting");

            while (true) {

                QueuedBufferItem queuedBufferItem = null;

                try {
                    queuedBufferItem = _bufferQueue.take();
                } catch (InterruptedException e) {
                    // TODO Auto-generated catch block
                    e.printStackTrace();
                }
                if (queuedBufferItem._buffer == null) {
                    // LOG.info("Writer Thread received empty buffer item. Exiting");
                    return;
                } else {

                    ByteBuffer theBuffer = queuedBufferItem._buffer;

                    // LOG.info("Writer Thread received item. Limit:" +
                    // theBuffer.limit());

                    // get byte pointer
                    byte[] bufferAsBytes = theBuffer.array();

                    int itemsWritten = 0;
                    long timeStart = System.currentTimeMillis();

                    while (theBuffer.remaining() != 0) {

                        // now read in key length
                        int keyLen = theBuffer.getInt();
                        // mark key position
                        int keyPos = theBuffer.position();
                        // now skip past key length
                        theBuffer.position(keyPos + keyLen);
                        // read value length
                        int valueLen = theBuffer.getInt();
                        // mark value position
                        int valuePosition = theBuffer.position();
                        // now skip past it ...
                        theBuffer.position(valuePosition + valueLen);
                        // now write this out to the sequence file ...

                        try {
                            spillRawRecord2(bufferAsBytes, keyPos, keyLen, bufferAsBytes, valuePosition,
                                    valueLen);
                        } catch (IOException e) {
                            LOG.error("Writer Thread Failed with Error:" + CCStringUtils.stringifyException(e));
                            _writerException = e;
                            return;
                        }
                        itemsWritten++;
                    }
                    // LOG.info("Writer Thread Finished With Buffer. Wrote:"+
                    // itemsWritten + " in:" + (System.currentTimeMillis() -
                    // timeStart));
                }
            }
        }

    });
    _writerThread.start();
}

From source file:com.github.ambry.utils.UtilsTest.java

@Test
public void testReadFileToByteBuffer() throws IOException {
    File file = File.createTempFile("test", "1");
    file.deleteOnExit();//  w  w  w  . j a  v a 2 s .c  om
    FileChannel fileChannel = Utils.openChannel(file, false);
    byte[] referenceBytes = new byte[20];
    new Random().nextBytes(referenceBytes);
    FileUtils.writeByteArrayToFile(file, referenceBytes);

    // fill up fresh byteBuffer
    ByteBuffer buffer = ByteBuffer.allocate(20);
    Utils.readFileToByteBuffer(fileChannel, 0, buffer);
    assertArrayEquals("Data mismatch", referenceBytes, buffer.array());

    // write to byteBuffer based on buffer remaining
    buffer.limit(10);
    buffer.position(0);
    assertEquals("buffer remaining should be 10", 10, buffer.remaining());
    Utils.readFileToByteBuffer(fileChannel, 10, buffer);
    assertEquals("buffer remaining should be 0", 0, buffer.remaining());
    for (int i = 0; i < 10; i++) {
        assertEquals("First 10 bytes in buffer should match last 10 bytes in file", buffer.array()[i],
                referenceBytes[i + 10]);
    }

    // byteBuffer.remaining() + starting offset > file size, exception is expected.
    buffer.clear();
    assertEquals("buffer remaining should be 20", 20, buffer.remaining());
    try {
        Utils.readFileToByteBuffer(fileChannel, 1, buffer);
        fail("Should fail");
    } catch (IOException e) {
    }

    // starting offset exceeds file size, exception is expected.
    buffer.clear();
    assertEquals("buffer remaining should be 20", 20, buffer.remaining());
    try {
        Utils.readFileToByteBuffer(fileChannel, 21, buffer);
        fail("Should fail");
    } catch (IOException e) {
    }
}

From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java

private void writePreV2Journal(File journalDir, int numEntries) throws Exception {
    long logId = System.currentTimeMillis();
    File fn = new File(journalDir, Long.toHexString(logId) + ".txn");

    FileChannel fc = new RandomAccessFile(fn, "rw").getChannel();

    ByteBuffer zeros = ByteBuffer.allocate(512);
    fc.write(zeros, 4 * 1024 * 1024);/*  w  w  w . j  a  v a2s . c om*/
    fc.position(0);

    byte[] data = "JournalTestData".getBytes();
    long lastConfirmed = LedgerHandle.INVALID_ENTRY_ID;
    for (int i = 1; i <= numEntries; i++) {
        ByteBuffer packet = ClientUtil.generatePacket(1, i, lastConfirmed, i * data.length, data)
                .toByteBuffer();
        lastConfirmed = i;
        ByteBuffer lenBuff = ByteBuffer.allocate(4);
        lenBuff.putInt(packet.remaining());
        lenBuff.flip();

        fc.write(lenBuff);
        fc.write(packet);
    }
}

From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java

private JournalChannel writeV2Journal(File journalDir, int numEntries) throws Exception {
    long logId = System.currentTimeMillis();
    JournalChannel jc = new JournalChannel(journalDir, logId);

    moveToPosition(jc, JournalChannel.VERSION_HEADER_SIZE);

    BufferedChannel bc = jc.getBufferedChannel();

    byte[] data = new byte[1024];
    Arrays.fill(data, (byte) 'X');
    long lastConfirmed = LedgerHandle.INVALID_ENTRY_ID;
    for (int i = 1; i <= numEntries; i++) {
        ByteBuffer packet = ClientUtil.generatePacket(1, i, lastConfirmed, i * data.length, data)
                .toByteBuffer();//from   w  w w.  ja v a  2s  . co m
        lastConfirmed = i;
        ByteBuffer lenBuff = ByteBuffer.allocate(4);
        lenBuff.putInt(packet.remaining());
        lenBuff.flip();

        bc.write(lenBuff);
        bc.write(packet);
    }
    bc.flush(true);

    updateJournalVersion(jc, JournalChannel.V2);

    return jc;
}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2.java

private void writeDataAndReadFromHFile(Path hfilePath, Algorithm compressAlgo, int entryCount,
        boolean findMidKey) throws IOException {

    HFileContext context = new HFileContextBuilder().withBlockSize(4096).withCompression(compressAlgo).build();
    HFileWriterV2 writer = (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, new CacheConfig(conf))
            .withPath(fs, hfilePath).withFileContext(context).create();

    Random rand = new Random(9713312); // Just a fixed seed.
    List<KeyValue> keyValues = new ArrayList<KeyValue>(entryCount);

    for (int i = 0; i < entryCount; ++i) {
        byte[] keyBytes = randomOrderedKey(rand, i);

        // A random-length random value.
        byte[] valueBytes = randomValue(rand);
        KeyValue keyValue = new KeyValue(keyBytes, null, null, valueBytes);
        writer.append(keyValue);/*  ww  w .j  av  a2s  .co  m*/
        keyValues.add(keyValue);
    }

    // Add in an arbitrary order. They will be sorted lexicographically by
    // the key.
    writer.appendMetaBlock("CAPITAL_OF_USA", new Text("Washington, D.C."));
    writer.appendMetaBlock("CAPITAL_OF_RUSSIA", new Text("Moscow"));
    writer.appendMetaBlock("CAPITAL_OF_FRANCE", new Text("Paris"));

    writer.close();

    FSDataInputStream fsdis = fs.open(hfilePath);

    // A "manual" version of a new-format HFile reader. This unit test was
    // written before the V2 reader was fully implemented.

    long fileSize = fs.getFileStatus(hfilePath).getLen();
    FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, fileSize);

    assertEquals(2, trailer.getMajorVersion());
    assertEquals(entryCount, trailer.getEntryCount());

    HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(false)
            .withIncludesTags(false).withCompression(compressAlgo).build();

    HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(fsdis, fileSize, meta);
    // Comparator class name is stored in the trailer in version 2.
    KVComparator comparator = trailer.createComparator();
    HFileBlockIndex.BlockIndexReader dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
            trailer.getNumDataIndexLevels());
    HFileBlockIndex.BlockIndexReader metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
            KeyValue.RAW_COMPARATOR, 1);

    HFileBlock.BlockIterator blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(),
            fileSize - trailer.getTrailerSize());
    // Data index. We also read statistics about the block index written after
    // the root level.
    dataBlockIndexReader.readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
            trailer.getDataIndexCount());

    if (findMidKey) {
        byte[] midkey = dataBlockIndexReader.midkey();
        assertNotNull("Midkey should not be null", midkey);
    }

    // Meta index.
    metaBlockIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).getByteStream(),
            trailer.getMetaIndexCount());
    // File info
    FileInfo fileInfo = new FileInfo();
    fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
    byte[] keyValueFormatVersion = fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION);
    boolean includeMemstoreTS = keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0;

    // Counters for the number of key/value pairs and the number of blocks
    int entriesRead = 0;
    int blocksRead = 0;
    long memstoreTS = 0;

    // Scan blocks the way the reader would scan them
    fsdis.seek(0);
    long curBlockPos = 0;
    while (curBlockPos <= trailer.getLastDataBlockOffset()) {
        HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false);
        assertEquals(BlockType.DATA, block.getBlockType());
        ByteBuffer buf = block.getBufferWithoutHeader();
        while (buf.hasRemaining()) {
            int keyLen = buf.getInt();
            int valueLen = buf.getInt();

            byte[] key = new byte[keyLen];
            buf.get(key);

            byte[] value = new byte[valueLen];
            buf.get(value);

            if (includeMemstoreTS) {
                ByteArrayInputStream byte_input = new ByteArrayInputStream(buf.array(),
                        buf.arrayOffset() + buf.position(), buf.remaining());
                DataInputStream data_input = new DataInputStream(byte_input);

                memstoreTS = WritableUtils.readVLong(data_input);
                buf.position(buf.position() + WritableUtils.getVIntSize(memstoreTS));
            }

            // A brute-force check to see that all keys and values are correct.
            assertTrue(Bytes.compareTo(key, keyValues.get(entriesRead).getKey()) == 0);
            assertTrue(Bytes.compareTo(value, keyValues.get(entriesRead).getValue()) == 0);

            ++entriesRead;
        }
        ++blocksRead;
        curBlockPos += block.getOnDiskSizeWithHeader();
    }
    LOG.info("Finished reading: entries=" + entriesRead + ", blocksRead=" + blocksRead);
    assertEquals(entryCount, entriesRead);

    // Meta blocks. We can scan until the load-on-open data offset (which is
    // the root block index offset in version 2) because we are not testing
    // intermediate-level index blocks here.

    int metaCounter = 0;
    while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) {
        LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " + trailer.getLoadOnOpenDataOffset());
        HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false);
        assertEquals(BlockType.META, block.getBlockType());
        Text t = new Text();
        ByteBuffer buf = block.getBufferWithoutHeader();
        if (Writables.getWritable(buf.array(), buf.arrayOffset(), buf.limit(), t) == null) {
            throw new IOException(
                    "Failed to deserialize block " + this + " into a " + t.getClass().getSimpleName());
        }
        Text expectedText = (metaCounter == 0 ? new Text("Paris")
                : metaCounter == 1 ? new Text("Moscow") : new Text("Washington, D.C."));
        assertEquals(expectedText, t);
        LOG.info("Read meta block data: " + t);
        ++metaCounter;
        curBlockPos += block.getOnDiskSizeWithHeader();
    }

    fsdis.close();
}

From source file:org.apache.hadoop.fs.TestEnhancedByteBufferAccess.java

@Test
public void testShortZeroCopyReads() throws Exception {
    HdfsConfiguration conf = initZeroCopyTest();
    MiniDFSCluster cluster = null;//from w w w .  ja  v  a 2  s.co m
    final Path TEST_PATH = new Path("/a");
    FSDataInputStream fsIn = null;
    final int TEST_FILE_LENGTH = 3 * BLOCK_SIZE;

    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short) 1, 7567L);
        try {
            DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
        } catch (InterruptedException e) {
            Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
        } catch (TimeoutException e) {
            Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
        }
        fsIn = fs.open(TEST_PATH);
        byte original[] = new byte[TEST_FILE_LENGTH];
        IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
        fsIn.close();
        fsIn = fs.open(TEST_PATH);

        // Try to read (2 * ${BLOCK_SIZE}), but only get ${BLOCK_SIZE} because of the block size.
        HdfsDataInputStream dfsIn = (HdfsDataInputStream) fsIn;
        ByteBuffer result = dfsIn.read(null, 2 * BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.assertEquals(BLOCK_SIZE, result.remaining());
        Assert.assertEquals(BLOCK_SIZE, dfsIn.getReadStatistics().getTotalBytesRead());
        Assert.assertEquals(BLOCK_SIZE, dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
        Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE), byteBufferToArray(result));
        dfsIn.releaseBuffer(result);

        // Try to read (1 + ${BLOCK_SIZE}), but only get ${BLOCK_SIZE} because of the block size.
        result = dfsIn.read(null, 1 + BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.assertEquals(BLOCK_SIZE, result.remaining());
        Assert.assertArrayEquals(Arrays.copyOfRange(original, BLOCK_SIZE, 2 * BLOCK_SIZE),
                byteBufferToArray(result));
        dfsIn.releaseBuffer(result);
    } finally {
        if (fsIn != null)
            fsIn.close();
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
    }
}

From source file:com.turn.ttorrent.client.ConnectionHandler.java

/**
 * Validate an expected handshake on a connection.
 *
 * <p>//from   ww w  .  j  a v a2  s .  co m
 * Reads an expected handshake message from the given connected socket,
 * parses it and validates that the torrent hash_info corresponds to the
 * torrent we're sharing, and that the peerId matches the peer ID we expect
 * to see coming from the remote peer.
 * </p>
 *
 * @param channel The connected socket channel to the remote peer.
 * @param peerId The peer ID we expect in the handshake. If <em>null</em>,
 * any peer ID is accepted (this is the case for incoming connections).
 * @return The validated handshake message object.
 */
private Handshake validateHandshake(SocketChannel channel, byte[] peerId) throws IOException, ParseException {
    ByteBuffer len = ByteBuffer.allocate(1);
    ByteBuffer data;

    // Read the handshake from the wire
    logger.trace("Reading handshake size (1 byte) from {}...", this.socketRepr(channel));
    if (channel.read(len) < len.capacity()) {
        throw new IOException("Handshake size read underrrun");
    }

    len.rewind();
    int pstrlen = len.get();

    data = ByteBuffer.allocate(Handshake.BASE_HANDSHAKE_LENGTH + pstrlen);
    data.put((byte) pstrlen);
    int expected = data.remaining();
    int read = channel.read(data);
    if (read < expected) {
        throw new IOException("Handshake data read underrun (" + read + " < " + expected + " bytes)");
    }

    // Parse and check the handshake
    data.rewind();
    Handshake hs = Handshake.parse(data);
    if (!Arrays.equals(hs.getInfoHash(), this.torrent.getInfoHash())) {
        throw new ParseException("Handshake for unknow torrent " + Utils.bytesToHex(hs.getInfoHash()) + " from "
                + this.socketRepr(channel) + ".", pstrlen + 9);
    }

    if (peerId != null && !Arrays.equals(hs.getPeerId(), peerId)) {
        throw new ParseException("Announced peer ID " + Utils.bytesToHex(hs.getPeerId())
                + " did not match expected peer ID " + Utils.bytesToHex(peerId) + ".", pstrlen + 29);
    }

    return hs;
}

From source file:com.bittorrent.mpetazzoni.client.ConnectionHandler.java

/**
 * Validate an expected handshake on a connection.
 *
 * <p>//from   w  w  w. j  a v  a2s  .  co m
 * Reads an expected handshake message from the given connected socket,
 * parses it and validates that the torrent hash_info corresponds to the
 * torrent we're sharing, and that the peerId matches the peer ID we expect
 * to see coming from the remote peer.
 * </p>
 *
 * @param channel The connected socket channel to the remote peer.
 * @param peerId The peer ID we expect in the handshake. If <em>null</em>,
 * any peer ID is accepted (this is the case for incoming connections).
 * @return The validated handshake message object.
 */
private Handshake validateHandshake(SocketChannel channel, byte[] peerId) throws IOException, ParseException {
    ByteBuffer len = ByteBuffer.allocate(1);
    ByteBuffer data;

    // Read the handshake from the wire
    logger.trace("Reading handshake size (1 byte) from {}...", this.socketRepr(channel));
    if (channel.read(len) < len.capacity()) {
        throw new IOException("Handshake size read underrrun");
    }

    len.rewind();
    int pstrlen = len.get();

    data = ByteBuffer.allocate(Handshake.BASE_HANDSHAKE_LENGTH + pstrlen);
    data.put((byte) pstrlen);
    int expected = data.remaining();
    int read = channel.read(data);
    if (read < expected) {
        throw new IOException("Handshake data read underrun (" + read + " < " + expected + " bytes)");
    }

    // Parse and check the handshake
    data.rewind();
    Handshake hs = Handshake.parse(data);
    if (!Arrays.equals(hs.getInfoHash(), this.torrent.getInfoHash())) {
        throw new ParseException("Handshake for unknow torrent "
                + Torrent.byteArrayToHexString(hs.getInfoHash()) + " from " + this.socketRepr(channel) + ".",
                pstrlen + 9);
    }

    if (peerId != null && !Arrays.equals(hs.getPeerId(), peerId)) {
        throw new ParseException(
                "Announced peer ID " + Torrent.byteArrayToHexString(hs.getPeerId())
                        + " did not match expected peer ID " + Torrent.byteArrayToHexString(peerId) + ".",
                pstrlen + 29);
    }

    return hs;
}