Example usage for java.nio ByteBuffer limit

List of usage examples for java.nio ByteBuffer limit

Introduction

In this page you can find the example usage for java.nio ByteBuffer limit.

Prototype

public final int limit() 

Source Link

Document

Returns the limit of this buffer.

Usage

From source file:de.spqrinfo.cups4j.operations.IppOperation.java

/**
 * Sends a request to the provided url//from  w w  w  .  j  a  va 2  s  .  c  om
 *
 * @param url
 * @param ippBuf
 *
 * @param documentStream
 * @return result
 * @throws Exception
 */
private IppResult sendRequest(URL url, ByteBuffer ippBuf, InputStream documentStream) throws Exception {
    IppResult ippResult = null;
    if (ippBuf == null) {
        return null;
    }

    if (url == null) {
        return null;
    }

    HttpClient client = new DefaultHttpClient();

    // will not work with older versions of CUPS!
    client.getParams().setParameter("http.protocol.version", HttpVersion.HTTP_1_1);
    client.getParams().setParameter("http.socket.timeout", new Integer(10000));
    client.getParams().setParameter("http.connection.timeout", new Integer(10000));
    client.getParams().setParameter("http.protocol.content-charset", "UTF-8");
    client.getParams().setParameter("http.method.response.buffer.warnlimit", new Integer(8092));

    // probabaly not working with older CUPS versions
    client.getParams().setParameter("http.protocol.expect-continue", Boolean.valueOf(true));

    HttpPost httpPost = new HttpPost(new URI("http://" + url.getHost() + ":" + ippPort) + url.getPath());

    httpPost.getParams().setParameter("http.socket.timeout", new Integer(10000));

    byte[] bytes = new byte[ippBuf.limit()];
    ippBuf.get(bytes);

    ByteArrayInputStream headerStream = new ByteArrayInputStream(bytes);

    // If we need to send a document, concatenate InputStreams
    InputStream inputStream = headerStream;
    if (documentStream != null) {
        inputStream = new SequenceInputStream(headerStream, documentStream);
    }

    // set length to -1 to advice the entity to read until EOF
    InputStreamEntity requestEntity = new InputStreamEntity(inputStream, -1);

    requestEntity.setContentType(IPP_MIME_TYPE);
    httpPost.setEntity(requestEntity);

    httpStatusLine = null;

    ResponseHandler<byte[]> handler = new ResponseHandler<byte[]>() {
        public byte[] handleResponse(HttpResponse response) throws ClientProtocolException, IOException {
            HttpEntity entity = response.getEntity();
            httpStatusLine = response.getStatusLine().toString();
            if (entity != null) {
                return EntityUtils.toByteArray(entity);
            } else {
                return null;
            }
        }
    };

    byte[] result = client.execute(httpPost, handler);

    IppResponse ippResponse = new IppResponse();

    ippResult = ippResponse.getResponse(ByteBuffer.wrap(result));
    ippResult.setHttpStatusResponse(httpStatusLine);

    // IppResultPrinter.print(ippResult);

    client.getConnectionManager().shutdown();
    return ippResult;
}

From source file:org.apache.hadoop.hbase.regionserver.TestStoreFile.java

private void checkHalfHFile(final HRegionFileSystem regionFs, final StoreFile f) throws IOException {
    byte[] midkey = f.createReader().midkey();
    KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
    byte[] midRow = midKV.getRow();
    // Create top split.
    HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), null, midRow);
    Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true);
    // Create bottom split.
    HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), midRow, null);
    Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false);
    // Make readers on top and bottom.
    StoreFile.Reader top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader();
    StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE)
            .createReader();/*from w ww .  ja  v  a  2 s.  co  m*/
    ByteBuffer previous = null;
    LOG.info("Midkey: " + midKV.toString());
    ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midkey);
    try {
        // Now make two HalfMapFiles and assert they can read the full backing
        // file, one from the top and the other from the bottom.
        // Test bottom half first.
        // Now test reading from the top.
        boolean first = true;
        ByteBuffer key = null;
        HFileScanner topScanner = top.getScanner(false, false);
        while ((!topScanner.isSeeked() && topScanner.seekTo())
                || (topScanner.isSeeked() && topScanner.next())) {
            key = topScanner.getKey();

            if (topScanner.getReader().getComparator().compareFlatKey(key.array(), key.arrayOffset(),
                    key.limit(), midkey, 0, midkey.length) < 0) {
                fail("key=" + Bytes.toStringBinary(key) + " < midkey=" + Bytes.toStringBinary(midkey));
            }
            if (first) {
                first = false;
                LOG.info("First in top: " + Bytes.toString(Bytes.toBytes(key)));
            }
        }
        LOG.info("Last in top: " + Bytes.toString(Bytes.toBytes(key)));

        first = true;
        HFileScanner bottomScanner = bottom.getScanner(false, false);
        while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) {
            previous = bottomScanner.getKey();
            key = bottomScanner.getKey();
            if (first) {
                first = false;
                LOG.info("First in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
            }
            assertTrue(key.compareTo(bbMidkeyBytes) < 0);
        }
        if (previous != null) {
            LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
        }
        // Remove references.
        regionFs.cleanupDaughterRegion(topHri);
        regionFs.cleanupDaughterRegion(bottomHri);

        // Next test using a midkey that does not exist in the file.
        // First, do a key that is < than first key. Ensure splits behave
        // properly.
        byte[] badmidkey = Bytes.toBytes("  .");
        assertTrue(fs.exists(f.getPath()));
        topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true);
        bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);

        assertNull(bottomPath);

        top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader();
        // Now read from the top.
        first = true;
        topScanner = top.getScanner(false, false);
        while ((!topScanner.isSeeked() && topScanner.seekTo()) || topScanner.next()) {
            key = topScanner.getKey();
            assertTrue(topScanner.getReader().getComparator().compareFlatKey(key.array(), key.arrayOffset(),
                    key.limit(), badmidkey, 0, badmidkey.length) >= 0);
            if (first) {
                first = false;
                KeyValue keyKV = KeyValue.createKeyValueFromKey(key);
                LOG.info("First top when key < bottom: " + keyKV);
                String tmp = Bytes.toString(keyKV.getRow());
                for (int i = 0; i < tmp.length(); i++) {
                    assertTrue(tmp.charAt(i) == 'a');
                }
            }
        }
        KeyValue keyKV = KeyValue.createKeyValueFromKey(key);
        LOG.info("Last top when key < bottom: " + keyKV);
        String tmp = Bytes.toString(keyKV.getRow());
        for (int i = 0; i < tmp.length(); i++) {
            assertTrue(tmp.charAt(i) == 'z');
        }
        // Remove references.
        regionFs.cleanupDaughterRegion(topHri);
        regionFs.cleanupDaughterRegion(bottomHri);

        // Test when badkey is > than last key in file ('||' > 'zz').
        badmidkey = Bytes.toBytes("|||");
        topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true);
        bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
        assertNull(topPath);
        bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE).createReader();
        first = true;
        bottomScanner = bottom.getScanner(false, false);
        while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) {
            key = bottomScanner.getKey();
            if (first) {
                first = false;
                keyKV = KeyValue.createKeyValueFromKey(key);
                LOG.info("First bottom when key > top: " + keyKV);
                tmp = Bytes.toString(keyKV.getRow());
                for (int i = 0; i < tmp.length(); i++) {
                    assertTrue(tmp.charAt(i) == 'a');
                }
            }
        }
        keyKV = KeyValue.createKeyValueFromKey(key);
        LOG.info("Last bottom when key > top: " + keyKV);
        for (int i = 0; i < tmp.length(); i++) {
            assertTrue(Bytes.toString(keyKV.getRow()).charAt(i) == 'z');
        }
    } finally {
        if (top != null) {
            top.close(true); // evict since we are about to delete the file
        }
        if (bottom != null) {
            bottom.close(true); // evict since we are about to delete the file
        }
        fs.delete(f.getPath(), true);
    }
}

From source file:org.alfresco.contentstore.CassandraContentStore.java

private ByteBuffer getNodeBlock(Node node, long rangeId, int size) {
    ByteBuffer bb = null;

    String nodeId = node.getNodeId();
    long nodeVersion = node.getNodeVersion();
    MimeType mimeType = node.getMimeType();

    ResultSet rs = cassandraSession.getCassandraSession()
            .execute(getNodeBlockStatement.bind(nodeId, nodeVersion, mimeType.getMimetype(), rangeId));
    Row row = rs.one();//from   w w w . ja  va2  s  .  c o  m
    if (row != null) {
        bb = row.getBytes("data");
        bb.compact();
        bb.flip();
        if (bb.limit() > size) {
            bb.limit(size);
        }
    }

    return bb;
}

From source file:com.alvermont.terraj.fracplanet.geom.VertexBufferArray.java

/**
 * Resize the buffer. This is done by reallocating a new one and copying
 * data from the old buffer to the new one. This is necessary as buffers
 * cannot be dynamically resized.// ww  w .ja  v a 2 s . c  o m
 */
protected void resizeBuffer() {
    // we can't resize it so we have to allocate a new one and copy the data
    final int slots = (buffer.capacity() / ELEMENTSIZE);
    final int newCapacity = buffer.capacity()
            + (((slots * CAPACITY_PCT_INCREASE) / HUNDRED_PERCENT) * ELEMENTSIZE);

    final ByteBuffer newBuffer = ByteBuffer.allocateDirect(newCapacity).order(ByteOrder.nativeOrder());

    if (log.isDebugEnabled()) {
        log.debug("Resizing vertex buffer capacity to: " + newBuffer.capacity());
    }

    final FloatBuffer oldVertexBuffer = positionBuffer;
    final FloatBuffer oldNormalBuffer = normalBuffer;
    final ByteBuffer oldColourBuffer = colourBuffer;
    final ByteBuffer oldEmissiveBuffer = emissiveBuffer;

    this.buffer = newBuffer;

    sliceAndDice(newCapacity / ELEMENTSIZE);

    oldVertexBuffer.rewind();
    positionBuffer.rewind();
    positionBuffer.limit(oldVertexBuffer.limit());
    positionBuffer.put(oldVertexBuffer);

    oldNormalBuffer.rewind();
    normalBuffer.rewind();
    normalBuffer.limit(oldNormalBuffer.limit());
    normalBuffer.put(oldNormalBuffer);

    oldColourBuffer.rewind();
    colourBuffer.rewind();
    colourBuffer.limit(oldColourBuffer.limit());
    colourBuffer.put(oldColourBuffer);

    oldEmissiveBuffer.rewind();
    emissiveBuffer.rewind();
    emissiveBuffer.limit(oldEmissiveBuffer.limit());
    emissiveBuffer.put(oldEmissiveBuffer);
}

From source file:org.apache.hadoop.hbase.regionserver.TestHStoreFile.java

private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f) throws IOException {
    f.initReader();/*from  w  w  w.java 2s . c o  m*/
    Cell midkey = f.getReader().midkey();
    KeyValue midKV = (KeyValue) midkey;
    byte[] midRow = CellUtil.cloneRow(midKV);
    // Create top split.
    HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), null, midRow);
    Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true);
    // Create bottom split.
    HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), midRow, null);
    Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false);
    // Make readers on top and bottom.
    HStoreFile topF = new HStoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, true);
    topF.initReader();
    StoreFileReader top = topF.getReader();
    HStoreFile bottomF = new HStoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE, true);
    bottomF.initReader();
    StoreFileReader bottom = bottomF.getReader();
    ByteBuffer previous = null;
    LOG.info("Midkey: " + midKV.toString());
    ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midKV.getKey());
    try {
        // Now make two HalfMapFiles and assert they can read the full backing
        // file, one from the top and the other from the bottom.
        // Test bottom half first.
        // Now test reading from the top.
        boolean first = true;
        ByteBuffer key = null;
        HFileScanner topScanner = top.getScanner(false, false);
        while ((!topScanner.isSeeked() && topScanner.seekTo())
                || (topScanner.isSeeked() && topScanner.next())) {
            key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey());

            if ((topScanner.getReader().getComparator().compare(midKV, key.array(), key.arrayOffset(),
                    key.limit())) > 0) {
                fail("key=" + Bytes.toStringBinary(key) + " < midkey=" + midkey);
            }
            if (first) {
                first = false;
                LOG.info("First in top: " + Bytes.toString(Bytes.toBytes(key)));
            }
        }
        LOG.info("Last in top: " + Bytes.toString(Bytes.toBytes(key)));

        first = true;
        HFileScanner bottomScanner = bottom.getScanner(false, false);
        while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) {
            previous = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey());
            key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey());
            if (first) {
                first = false;
                LOG.info("First in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
            }
            assertTrue(key.compareTo(bbMidkeyBytes) < 0);
        }
        if (previous != null) {
            LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
        }
        // Remove references.
        regionFs.cleanupDaughterRegion(topHri);
        regionFs.cleanupDaughterRegion(bottomHri);

        // Next test using a midkey that does not exist in the file.
        // First, do a key that is < than first key. Ensure splits behave
        // properly.
        byte[] badmidkey = Bytes.toBytes("  .");
        assertTrue(fs.exists(f.getPath()));
        topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true);
        bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);

        assertNull(bottomPath);

        topF = new HStoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, true);
        topF.initReader();
        top = topF.getReader();
        // Now read from the top.
        first = true;
        topScanner = top.getScanner(false, false);
        KeyValue.KeyOnlyKeyValue keyOnlyKV = new KeyValue.KeyOnlyKeyValue();
        while ((!topScanner.isSeeked() && topScanner.seekTo()) || topScanner.next()) {
            key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey());
            keyOnlyKV.setKey(key.array(), 0 + key.arrayOffset(), key.limit());
            assertTrue(topScanner.getReader().getComparator().compare(keyOnlyKV, badmidkey, 0,
                    badmidkey.length) >= 0);
            if (first) {
                first = false;
                KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key);
                LOG.info("First top when key < bottom: " + keyKV);
                String tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength());
                for (int i = 0; i < tmp.length(); i++) {
                    assertTrue(tmp.charAt(i) == 'a');
                }
            }
        }
        KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key);
        LOG.info("Last top when key < bottom: " + keyKV);
        String tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength());
        for (int i = 0; i < tmp.length(); i++) {
            assertTrue(tmp.charAt(i) == 'z');
        }
        // Remove references.
        regionFs.cleanupDaughterRegion(topHri);
        regionFs.cleanupDaughterRegion(bottomHri);

        // Test when badkey is > than last key in file ('||' > 'zz').
        badmidkey = Bytes.toBytes("|||");
        topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true);
        bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
        assertNull(topPath);

        bottomF = new HStoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE, true);
        bottomF.initReader();
        bottom = bottomF.getReader();
        first = true;
        bottomScanner = bottom.getScanner(false, false);
        while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) {
            key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey());
            if (first) {
                first = false;
                keyKV = KeyValueUtil.createKeyValueFromKey(key);
                LOG.info("First bottom when key > top: " + keyKV);
                tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength());
                for (int i = 0; i < tmp.length(); i++) {
                    assertTrue(tmp.charAt(i) == 'a');
                }
            }
        }
        keyKV = KeyValueUtil.createKeyValueFromKey(key);
        LOG.info("Last bottom when key > top: " + keyKV);
        for (int i = 0; i < tmp.length(); i++) {
            assertTrue(Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength())
                    .charAt(i) == 'z');
        }
    } finally {
        if (top != null) {
            top.close(true); // evict since we are about to delete the file
        }
        if (bottom != null) {
            bottom.close(true); // evict since we are about to delete the file
        }
        fs.delete(f.getPath(), true);
    }
}

From source file:org.apache.kylin.engine.mr.steps.FactDistinctColumnsReducer.java

private void outputStatistics(List<Long> allCuboids) throws IOException, InterruptedException {
    // output written to baseDir/statistics/statistics-r-00000 (etc)
    String statisticsFileName = BatchConstants.CFG_OUTPUT_STATISTICS + "/"
            + BatchConstants.CFG_OUTPUT_STATISTICS;

    ByteBuffer valueBuf = ByteBuffer.allocate(BufferedMeasureCodec.DEFAULT_BUFFER_SIZE);

    // mapper overlap ratio at key -1
    long grandTotal = 0;
    for (HLLCounter hll : cuboidHLLMap.values()) {
        grandTotal += hll.getCountEstimate();
    }/*ww  w .j  a  va2  s .  c  o  m*/
    double mapperOverlapRatio = grandTotal == 0 ? 0 : (double) totalRowsBeforeMerge / grandTotal;
    mos.write(BatchConstants.CFG_OUTPUT_STATISTICS, new LongWritable(-1),
            new BytesWritable(Bytes.toBytes(mapperOverlapRatio)), statisticsFileName);

    // mapper number at key -2
    mos.write(BatchConstants.CFG_OUTPUT_STATISTICS, new LongWritable(-2),
            new BytesWritable(Bytes.toBytes(baseCuboidRowCountInMappers.size())), statisticsFileName);

    // sampling percentage at key 0
    mos.write(BatchConstants.CFG_OUTPUT_STATISTICS, new LongWritable(0L),
            new BytesWritable(Bytes.toBytes(samplingPercentage)), statisticsFileName);

    for (long i : allCuboids) {
        valueBuf.clear();
        cuboidHLLMap.get(i).writeRegisters(valueBuf);
        valueBuf.flip();
        mos.write(BatchConstants.CFG_OUTPUT_STATISTICS, new LongWritable(i),
                new BytesWritable(valueBuf.array(), valueBuf.limit()), statisticsFileName);
    }
}

From source file:org.alfresco.contentstore.ChecksumTest.java

private void applyPatch(ByteBuffer mem, PatchDocumentImpl patchDocument)
        throws FileNotFoundException, IOException {
    int blockSize = checksumService.getBlockSize();

    long start = System.currentTimeMillis();
    int newLimit = -1;

    Iterator<Patch> patchIt = patchDocument.getPatches().iterator();
    while (patchIt.hasNext()) {
        Patch patch = patchIt.next();
        int lastMatchingBlockIndex = patch.getLastMatchIndex();
        //            int lastMatchingBlock = patchDocument.getMatchedBlocks().get(lastMatchingBlockIndex);
        int pos = lastMatchingBlockIndex * blockSize;
        //            int pos = (lastMatchingBlockIndex + 1) * blockSize;
        mem.position(pos);/*  w w w .ja  v  a2s.c o m*/

        ByteBuffer bb = ByteBuffer.wrap(patch.getBuffer());

        mem.put(bb);

        if (!patchIt.hasNext()) {
            int x = pos + patch.getSize();
            if (x < mem.limit()) {
                newLimit = x;
            }
        }
    }

    mem.position(0);
    if (newLimit != -1) {
        mem.limit(newLimit);
    }

    long end = System.currentTimeMillis();
    long time = end - start;

    System.out.println("patch time = " + time);
}

From source file:org.apache.hadoop.fs.TestEnhancedByteBufferAccess.java

@Test
public void test2GBMmapLimit() throws Exception {
    Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles());
    HdfsConfiguration conf = initZeroCopyTest();
    final long TEST_FILE_LENGTH = 2469605888L;
    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, TEST_FILE_LENGTH);
    MiniDFSCluster cluster = null;/*from  w  w w  . ja  va2s . com*/
    final Path TEST_PATH = new Path("/a");
    final String CONTEXT = "test2GBMmapLimit";
    conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);

    FSDataInputStream fsIn = null, fsIn2 = null;
    ByteBuffer buf1 = null, buf2 = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short) 1, 0xB);
        DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);

        fsIn = fs.open(TEST_PATH);
        buf1 = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.assertEquals(1, buf1.remaining());
        fsIn.releaseBuffer(buf1);
        buf1 = null;
        fsIn.seek(2147483640L);
        buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.assertEquals(7, buf1.remaining());
        Assert.assertEquals(Integer.MAX_VALUE, buf1.limit());
        fsIn.releaseBuffer(buf1);
        buf1 = null;
        Assert.assertEquals(2147483647L, fsIn.getPos());
        try {
            buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
            Assert.fail("expected UnsupportedOperationException");
        } catch (UnsupportedOperationException e) {
            // expected; can't read past 2GB boundary.
        }
        fsIn.close();
        fsIn = null;

        // Now create another file with normal-sized blocks, and verify we
        // can read past 2GB
        final Path TEST_PATH2 = new Path("/b");
        conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 268435456L);
        DFSTestUtil.createFile(fs, TEST_PATH2, 1024 * 1024, TEST_FILE_LENGTH, 268435456L, (short) 1, 0xA);

        fsIn2 = fs.open(TEST_PATH2);
        fsIn2.seek(2147483640L);
        buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.assertEquals(8, buf2.remaining());
        Assert.assertEquals(2147483648L, fsIn2.getPos());
        fsIn2.releaseBuffer(buf2);
        buf2 = null;
        buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.assertEquals(1024, buf2.remaining());
        Assert.assertEquals(2147484672L, fsIn2.getPos());
        fsIn2.releaseBuffer(buf2);
        buf2 = null;
    } finally {
        if (buf1 != null) {
            fsIn.releaseBuffer(buf1);
        }
        if (buf2 != null) {
            fsIn2.releaseBuffer(buf2);
        }
        IOUtils.cleanup(null, fsIn, fsIn2);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:org.apache.nifi.processor.util.listen.dispatcher.DatagramChannelDispatcher.java

@Override
public void run() {
    final ByteBuffer buffer = bufferPool.poll();
    while (!stopped) {
        try {/*from  w  w w  .ja  va2 s  .co m*/
            int selected = selector.select();
            // if stopped the selector could already be closed which would result in a ClosedSelectorException
            if (selected > 0 && !stopped) {
                Iterator<SelectionKey> selectorKeys = selector.selectedKeys().iterator();
                // if stopped we don't want to modify the keys because close() may still be in progress
                while (selectorKeys.hasNext() && !stopped) {
                    SelectionKey key = selectorKeys.next();
                    selectorKeys.remove();
                    if (!key.isValid()) {
                        continue;
                    }
                    DatagramChannel channel = (DatagramChannel) key.channel();
                    SocketAddress socketAddress;
                    buffer.clear();
                    while (!stopped && (socketAddress = channel.receive(buffer)) != null) {
                        String sender = "";
                        if (socketAddress instanceof InetSocketAddress) {
                            sender = ((InetSocketAddress) socketAddress).getAddress().toString();
                        }

                        // create a byte array from the buffer
                        buffer.flip();
                        byte bytes[] = new byte[buffer.limit()];
                        buffer.get(bytes, 0, buffer.limit());

                        final Map<String, String> metadata = EventFactoryUtil.createMapWithSender(sender);
                        final E event = eventFactory.create(bytes, metadata, null);
                        events.offer(event);

                        buffer.clear();
                    }
                }
            }
        } catch (InterruptedException e) {
            stopped = true;
            Thread.currentThread().interrupt();
        } catch (IOException e) {
            logger.error("Error reading from DatagramChannel", e);
        }
    }

    if (buffer != null) {
        try {
            bufferPool.put(buffer);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    }
}