List of usage examples for java.nio ByteBuffer limit
public final int limit()
From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlock.java
static void assertBuffersEqual(ByteBuffer expectedBuffer, ByteBuffer actualBuffer, Compression.Algorithm compression, DataBlockEncoding encoding, boolean pread) { if (!actualBuffer.equals(expectedBuffer)) { int prefix = 0; int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit()); while (prefix < minLimit && expectedBuffer.get(prefix) == actualBuffer.get(prefix)) { prefix++;//from w w w. j a va2 s . co m } fail(String.format( "Content mismath for compression %s, encoding %s, " + "pread %s, commonPrefix %d, expected %s, got %s", compression, encoding, pread, prefix, nextBytesToStr(expectedBuffer, prefix), nextBytesToStr(actualBuffer, prefix))); } }
From source file:org.apache.jackrabbit.oak.plugins.segment.file.TarReader.java
private static Map<UUID, List<UUID>> parseGraph(ByteBuffer graphByteBuffer) { int count = graphByteBuffer.getInt(graphByteBuffer.limit() - 12); ByteBuffer buffer = graphByteBuffer.duplicate(); buffer.limit(graphByteBuffer.limit() - 16); List<UUID> uuids = newArrayListWithCapacity(count); for (int i = 0; i < count; i++) { uuids.add(new UUID(buffer.getLong(), buffer.getLong())); }/*from ww w . j a va 2 s . c o m*/ Map<UUID, List<UUID>> graph = newHashMap(); while (buffer.hasRemaining()) { UUID uuid = uuids.get(buffer.getInt()); List<UUID> list = newArrayList(); int refid = buffer.getInt(); while (refid != -1) { list.add(uuids.get(refid)); refid = buffer.getInt(); } graph.put(uuid, list); } return graph; }
From source file:com.gamesalutes.utils.ByteUtils.java
/** * Reads all the bytes from the given input stream and stores them in the specified buffer. * If the input buffer is <code>null</code> or does not have the capacity to store all the input, a * new buffer is created and returned. The input stream is closed regardless of whether an * <code>IOException</code> is thrown. * /*from ww w. j av a 2s . c o m*/ * * @param in the <code>InputStream</code> to read * @param buf a <code>ByteBuffer</code> to use for storage or <code>null</code> to just allocate a new one * If <code>buf</code> is not large enough it will be expanded using {@link #growBuffer(ByteBuffer, int)} * @return the buffer containing the read data * @throws IOException */ public static ByteBuffer readBytes(InputStream in, ByteBuffer buf) throws IOException { try { if (buf == null) buf = ByteBuffer.allocate(READ_BUFFER_SIZE); // note the input position int startPos = buf.position(); byte[] tmp = new byte[NETWORK_BYTE_SIZE]; int read; // read until end of file while ((read = in.read(tmp)) > 0) { if (buf.remaining() < read) { buf = ByteUtils.growBuffer(buf, buf.limit() + (read - buf.remaining())); } buf.put(tmp, 0, read); } buf.flip(); // reset starting position to be that of input buffer buf.position(startPos); return buf; } finally { MiscUtils.closeStream(in); } }
From source file:net.darkmist.alib.io.BufferUtil.java
public static byte[] asBytes(ByteBuffer buf) { buf = buf.duplicate();//from w w w.j av a 2s .co m /* To use buf.array() the buffer must: * be writable as the array will be writable * have arrayOffset() == 0 or the array will not start at the right location * the returned array must be the same length as the buffer's limit or it will be the wrong size. */ if (!buf.isReadOnly() && buf.hasArray() && buf.arrayOffset() == 0) { logger.debug("!read-only, hasArray && offset is 0"); byte[] ret = buf.array(); if (ret.length == buf.limit()) return ret; logger.debug("length of array !=limit, doing copy..."); } byte[] bytes = new byte[buf.limit()]; buf.get(bytes, 0, buf.limit()); return bytes; }
From source file:org.apache.solr.handler.TestBlobHandler.java
public static void postAndCheck(CloudSolrClient cloudClient, String baseUrl, String blobName, ByteBuffer bytes, int count) throws Exception { postData(cloudClient, baseUrl, blobName, bytes); String url;/*from ww w . j a v a 2s . c om*/ Map map = null; List l; final RTimer timer = new RTimer(); int i = 0; for (; i < 150; i++) {//15 secs url = baseUrl + "/.system/blob/" + blobName; map = TestSolrConfigHandlerConcurrent.getAsMap(url, cloudClient); String numFound = String .valueOf(Utils.getObjectByPath(map, false, Arrays.asList("response", "numFound"))); if (!("" + count).equals(numFound)) { Thread.sleep(100); continue; } l = (List) Utils.getObjectByPath(map, false, Arrays.asList("response", "docs")); assertNotNull(l); map = (Map) l.get(0); assertEquals("" + bytes.limit(), String.valueOf(map.get("size"))); return; } fail(StrUtils.formatString( "Could not successfully add blob after {0} attempts. Expecting {1} items. time elapsed {2} output for url is {3}", i, count, timer.getTime(), getAsString(map))); }
From source file:org.apache.nutch.indexer.elastic.segment.SegmentIndexerJob.java
/** * Checks if the page's content is truncated. * //from w w w.j ava2 s . c o m * @param url * @param page * @return If the page is truncated <code>true</code>. When it is not, or when it could be determined, <code>false</code>. */ public static boolean isTruncated(String url, WebPage page) { ByteBuffer content = page.getContent(); if (content == null) { return false; } Utf8 lengthUtf8 = page.getFromHeaders(new Utf8(HttpHeaders.CONTENT_LENGTH)); if (lengthUtf8 == null) { return false; } String lengthStr = lengthUtf8.toString().trim(); if (StringUtil.isEmpty(lengthStr)) { return false; } int inHeaderSize; try { inHeaderSize = Integer.parseInt(lengthStr); } catch (NumberFormatException e) { LOG.warn("Wrong contentlength format for " + url, e); return false; } int actualSize = content.limit(); if (inHeaderSize > actualSize) { LOG.warn(url + " skipped. Content of size " + inHeaderSize + " was truncated to " + actualSize); return true; } if (LOG.isDebugEnabled()) { LOG.debug(url + " actualSize=" + actualSize + " inHeaderSize=" + inHeaderSize); } return false; }
From source file:com.ah.ui.actions.home.clientManagement.service.CertificateGenSV.java
@SuppressWarnings("resource") public static byte[] readFromFile(File file) throws IOException { FileChannel fileChannel = new FileInputStream(file).getChannel(); ByteBuffer bb = ByteBuffer.allocate((int) fileChannel.size()); fileChannel.read(bb);/* w w w .j a v a 2s . c om*/ fileChannel.close(); bb.flip(); byte[] bytes; if (bb.hasArray()) { bytes = bb.array(); } else { bytes = new byte[bb.limit()]; bb.get(bytes); } return bytes; }
From source file:org.apache.jackrabbit.oak.segment.file.TarReader.java
private static Map<UUID, List<UUID>> parseGraph(ByteBuffer graphByteBuffer, boolean bulkOnly) { int count = graphByteBuffer.getInt(graphByteBuffer.limit() - 12); ByteBuffer buffer = graphByteBuffer.duplicate(); buffer.limit(graphByteBuffer.limit() - 16); List<UUID> uuids = newArrayListWithCapacity(count); for (int i = 0; i < count; i++) { uuids.add(new UUID(buffer.getLong(), buffer.getLong())); }//from w ww .j av a2 s . c o m Map<UUID, List<UUID>> graph = newHashMap(); while (buffer.hasRemaining()) { UUID uuid = uuids.get(buffer.getInt()); List<UUID> list = newArrayList(); int refid = buffer.getInt(); while (refid != -1) { UUID ref = uuids.get(refid); if (!bulkOnly || !isDataSegmentId(ref.getLeastSignificantBits())) { list.add(ref); } refid = buffer.getInt(); } graph.put(uuid, list); } return graph; }
From source file:org.cloudfoundry.caldecott.server.converter.ByteBufferHttpMessageConverter.java
@Override protected Long getContentLength(ByteBuffer buffer, MediaType contentType) { return new Long(buffer.limit()); }
From source file:org.apache.kylin.engine.mr.common.CubeStatsWriter.java
public static void writeCuboidStatistics(Configuration conf, Path outputPath, // Map<Long, HLLCounter> cuboidHLLMap, int samplingPercentage, int mapperNumber, double mapperOverlapRatio) throws IOException { Path seqFilePath = new Path(outputPath, BatchConstants.CFG_STATISTICS_CUBOID_ESTIMATION_FILENAME); List<Long> allCuboids = new ArrayList<Long>(); allCuboids.addAll(cuboidHLLMap.keySet()); Collections.sort(allCuboids); ByteBuffer valueBuf = ByteBuffer.allocate(BufferedMeasureCodec.DEFAULT_BUFFER_SIZE); SequenceFile.Writer writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(seqFilePath), SequenceFile.Writer.keyClass(LongWritable.class), SequenceFile.Writer.valueClass(BytesWritable.class)); try {// w w w. j a v a 2s . c o m // mapper overlap ratio at key -1 writer.append(new LongWritable(-1), new BytesWritable(Bytes.toBytes(mapperOverlapRatio))); // mapper number at key -2 writer.append(new LongWritable(-2), new BytesWritable(Bytes.toBytes(mapperNumber))); // sampling percentage at key 0 writer.append(new LongWritable(0L), new BytesWritable(Bytes.toBytes(samplingPercentage))); for (long i : allCuboids) { valueBuf.clear(); cuboidHLLMap.get(i).writeRegisters(valueBuf); valueBuf.flip(); writer.append(new LongWritable(i), new BytesWritable(valueBuf.array(), valueBuf.limit())); } } finally { IOUtils.closeQuietly(writer); } }