List of usage examples for java.nio ByteBuffer limit
public final int limit()
From source file:org.apache.storm.daemon.logviewer.handler.LogviewerLogSearchHandler.java
private Map<String, Object> mkMatchData(byte[] needle, ByteBuffer haystack, int haystackOffset, int fileOffset, Path canonicalPath, boolean isDaemon, byte[] beforeBytes, byte[] afterBytes) throws UnsupportedEncodingException, UnknownHostException { String url;/*from ww w. j av a 2 s .c o m*/ if (isDaemon) { url = urlToMatchCenteredInLogPageDaemonFile(needle, canonicalPath, fileOffset, logviewerPort); } else { url = urlToMatchCenteredInLogPage(needle, canonicalPath, fileOffset, logviewerPort); } byte[] haystackBytes = haystack.array(); String beforeString; String afterString; if (haystackOffset >= GREP_CONTEXT_SIZE) { beforeString = new String(haystackBytes, (haystackOffset - GREP_CONTEXT_SIZE), GREP_CONTEXT_SIZE, "UTF-8"); } else { int numDesired = Math.max(0, GREP_CONTEXT_SIZE - haystackOffset); int beforeSize = beforeBytes != null ? beforeBytes.length : 0; int numExpected = Math.min(beforeSize, numDesired); if (numExpected > 0) { StringBuilder sb = new StringBuilder(); sb.append(new String(beforeBytes, beforeSize - numExpected, numExpected, "UTF-8")); sb.append(new String(haystackBytes, 0, haystackOffset, "UTF-8")); beforeString = sb.toString(); } else { beforeString = new String(haystackBytes, 0, haystackOffset, "UTF-8"); } } int needleSize = needle.length; int afterOffset = haystackOffset + needleSize; int haystackSize = haystack.limit(); if ((afterOffset + GREP_CONTEXT_SIZE) < haystackSize) { afterString = new String(haystackBytes, afterOffset, GREP_CONTEXT_SIZE, "UTF-8"); } else { int numDesired = GREP_CONTEXT_SIZE - (haystackSize - afterOffset); int afterSize = afterBytes != null ? afterBytes.length : 0; int numExpected = Math.min(afterSize, numDesired); if (numExpected > 0) { StringBuilder sb = new StringBuilder(); sb.append(new String(haystackBytes, afterOffset, (haystackSize - afterOffset), "UTF-8")); sb.append(new String(afterBytes, 0, numExpected, "UTF-8")); afterString = sb.toString(); } else { afterString = new String(haystackBytes, afterOffset, (haystackSize - afterOffset), "UTF-8"); } } Map<String, Object> ret = new HashMap<>(); ret.put("byteOffset", fileOffset); ret.put("beforeString", beforeString); ret.put("afterString", afterString); ret.put("matchString", new String(needle, "UTF-8")); ret.put("logviewerURL", url); return ret; }
From source file:org.apache.bookkeeper.bookie.BookieShell.java
/** * Format the message into a readable format. * * @param pos//from w ww . j a va 2 s.c o m * File offset of the message stored in entry log file * @param recBuff * Entry Data * @param printMsg * Whether printing the message body */ private void formatEntry(long pos, ByteBuffer recBuff, boolean printMsg) { long ledgerId = recBuff.getLong(); long entryId = recBuff.getLong(); int entrySize = recBuff.limit(); System.out.println("--------- Lid=" + ledgerId + ", Eid=" + entryId + ", ByteOffset=" + pos + ", EntrySize=" + entrySize + " ---------"); if (entryId == Bookie.METAENTRY_ID_LEDGER_KEY) { int masterKeyLen = recBuff.getInt(); byte[] masterKey = new byte[masterKeyLen]; recBuff.get(masterKey); System.out.println("Type: META"); System.out.println("MasterKey: " + bytes2Hex(masterKey)); System.out.println(); return; } if (entryId == Bookie.METAENTRY_ID_FENCE_KEY) { System.out.println("Type: META"); System.out.println("Fenced"); System.out.println(); return; } // process a data entry long lastAddConfirmed = recBuff.getLong(); System.out.println("Type: DATA"); System.out.println("LastConfirmed: " + lastAddConfirmed); if (!printMsg) { System.out.println(); return; } // skip digest checking recBuff.position(32 + 8); System.out.println("Data:"); System.out.println(); try { byte[] ret = new byte[recBuff.remaining()]; recBuff.get(ret); formatter.formatEntry(ret); } catch (Exception e) { System.out.println("N/A. Corrupted."); } System.out.println(); }
From source file:org.apache.bookkeeper.bookie.BookieShell.java
/** * Scan over an entry log file for entries in the given position range * /* ww w .j a v a 2s.c o m*/ * @param logId * Entry Log File id. * @param rangeStartPos * Start position of the entry we are looking for * @param rangeEndPos * End position of the entry we are looking for (-1 for till the end of the entrylog) * @param printMsg * Whether printing the entry data. * @throws Exception */ protected void scanEntryLogForPositionRange(long logId, final long rangeStartPos, final long rangeEndPos, final boolean printMsg) throws Exception { System.out.println("Scan entry log " + logId + " (" + Long.toHexString(logId) + ".log)" + " for PositionRange: " + rangeStartPos + " - " + rangeEndPos); final MutableBoolean entryFound = new MutableBoolean(false); scanEntryLog(logId, new EntryLogScanner() { private MutableBoolean stopScanning = new MutableBoolean(false); @Override public boolean accept(long ledgerId) { return !stopScanning.booleanValue(); } @Override public void process(long ledgerId, long entryStartPos, ByteBuffer entry) { if (!stopScanning.booleanValue()) { if ((rangeEndPos != -1) && (entryStartPos > rangeEndPos)) { stopScanning.setValue(true); } else { int entrySize = entry.limit(); /** * entrySize of an entry (inclusive of payload and * header) value is stored as int value in log file, but * it is not counted in the entrySize, hence for calculating * the end position of the entry we need to add additional * 4 (intsize of entrySize). Please check * EntryLogger.scanEntryLog. */ long entryEndPos = entryStartPos + entrySize + 4 - 1; if (((rangeEndPos == -1) || (entryStartPos <= rangeEndPos)) && (rangeStartPos <= entryEndPos)) { formatEntry(entryStartPos, entry, printMsg); entryFound.setValue(true); } } } } }); if (!entryFound.booleanValue()) { System.out.println("Entry log " + logId + " (" + Long.toHexString(logId) + ".log) doesn't has any entry in the range " + rangeStartPos + " - " + rangeEndPos + ". Probably the position range, you have provided is lesser than the LOGFILE_HEADER_SIZE (1024) " + "or greater than the current log filesize."); } }
From source file:com.healthmarketscience.jackcess.Table.java
/** * Update the row on which the given rowState is currently positioned. * <p>/*from ww w.j a va 2s . c o m*/ * Note, this method is not generally meant to be used directly. You should * use the {@link #updateCurrentRow} method or use the Cursor class, which * allows for more complex table interactions, e.g. * {@link Cursor#setCurrentRowValue} and {@link Cursor#updateCurrentRow}. * @usage _advanced_method_ */ public void updateRow(RowState rowState, RowId rowId, Object... row) throws IOException { requireValidRowId(rowId); // ensure that the relevant row state is up-to-date ByteBuffer rowBuffer = positionAtRowData(rowState, rowId); int oldRowSize = rowBuffer.remaining(); requireNonDeletedRow(rowState, rowId); // we need to make sure the row is the right length & type (fill with // null if too short). if ((row.length < _columns.size()) || (row.getClass() != Object[].class)) { row = dupeRow(row, _columns.size()); } // fill in any auto-numbers (we don't allow autonumber values to be // modified) handleAutoNumbersForUpdate(row, rowBuffer, rowState); // hang on to the raw values of var length columns we are "keeping". this // will allow us to re-use pre-written var length data, which can save // space for things like long value columns. Map<Column, byte[]> rawVarValues = (!_varColumns.isEmpty() ? new HashMap<Column, byte[]>() : null); // fill in any "keep value" fields for (Column column : _columns) { if (column.getRowValue(row) == Column.KEEP_VALUE) { column.setRowValue(row, getRowColumn(getFormat(), rowBuffer, column, rowState, rawVarValues)); } } // generate new row bytes ByteBuffer newRowData = createRow(row, _singleRowBufferH.getPageBuffer(getPageChannel()), oldRowSize, rawVarValues); if (newRowData.limit() > getFormat().MAX_ROW_SIZE) { throw new IOException("Row size " + newRowData.limit() + " is too large"); } if (!_indexDatas.isEmpty()) { Object[] oldRowValues = rowState.getRowValues(); // delete old values from indexes for (IndexData indexData : _indexDatas) { indexData.deleteRow(oldRowValues, rowId); } } // see if we can squeeze the new row data into the existing row rowBuffer.reset(); int rowSize = newRowData.remaining(); ByteBuffer dataPage = null; int pageNumber = PageChannel.INVALID_PAGE_NUMBER; if (oldRowSize >= rowSize) { // awesome, slap it in! rowBuffer.put(newRowData); // grab the page we just updated dataPage = rowState.getFinalPage(); pageNumber = rowState.getFinalRowId().getPageNumber(); } else { // bummer, need to find a new page for the data dataPage = findFreeRowSpace(rowSize, null, PageChannel.INVALID_PAGE_NUMBER); pageNumber = _addRowBufferH.getPageNumber(); RowId headerRowId = rowState.getHeaderRowId(); ByteBuffer headerPage = rowState.getHeaderPage(); if (pageNumber == headerRowId.getPageNumber()) { // new row is on the same page as header row, share page dataPage = headerPage; } // write out the new row data (set the deleted flag on the new data row // so that it is ignored during normal table traversal) int rowNum = addDataPageRow(dataPage, rowSize, getFormat(), DELETED_ROW_MASK); dataPage.put(newRowData); // write the overflow info into the header row and clear out the // remaining header data rowBuffer = PageChannel.narrowBuffer(headerPage, findRowStart(headerPage, headerRowId.getRowNumber(), getFormat()), findRowEnd(headerPage, headerRowId.getRowNumber(), getFormat())); rowBuffer.put((byte) rowNum); ByteUtil.put3ByteInt(rowBuffer, pageNumber); ByteUtil.clearRemaining(rowBuffer); // set the overflow flag on the header row int headerRowIndex = getRowStartOffset(headerRowId.getRowNumber(), getFormat()); headerPage.putShort(headerRowIndex, (short) (headerPage.getShort(headerRowIndex) | OVERFLOW_ROW_MASK)); if (pageNumber != headerRowId.getPageNumber()) { writeDataPage(headerPage, headerRowId.getPageNumber()); } } // update the indexes for (IndexData indexData : _indexDatas) { indexData.addRow(row, rowId); } writeDataPage(dataPage, pageNumber); updateTableDefinition(0); }
From source file:org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2.java
private void writeDataAndReadFromHFile(Path hfilePath, Algorithm compressAlgo, int entryCount, boolean findMidKey) throws IOException { HFileContext context = new HFileContextBuilder().withBlockSize(4096).withCompression(compressAlgo).build(); HFileWriterV2 writer = (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, new CacheConfig(conf)) .withPath(fs, hfilePath).withFileContext(context).create(); Random rand = new Random(9713312); // Just a fixed seed. List<KeyValue> keyValues = new ArrayList<KeyValue>(entryCount); for (int i = 0; i < entryCount; ++i) { byte[] keyBytes = randomOrderedKey(rand, i); // A random-length random value. byte[] valueBytes = randomValue(rand); KeyValue keyValue = new KeyValue(keyBytes, null, null, valueBytes); writer.append(keyValue);/*from w w w .ja v a 2s .c o m*/ keyValues.add(keyValue); } // Add in an arbitrary order. They will be sorted lexicographically by // the key. writer.appendMetaBlock("CAPITAL_OF_USA", new Text("Washington, D.C.")); writer.appendMetaBlock("CAPITAL_OF_RUSSIA", new Text("Moscow")); writer.appendMetaBlock("CAPITAL_OF_FRANCE", new Text("Paris")); writer.close(); FSDataInputStream fsdis = fs.open(hfilePath); // A "manual" version of a new-format HFile reader. This unit test was // written before the V2 reader was fully implemented. long fileSize = fs.getFileStatus(hfilePath).getLen(); FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, fileSize); assertEquals(2, trailer.getMajorVersion()); assertEquals(entryCount, trailer.getEntryCount()); HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(false) .withIncludesTags(false).withCompression(compressAlgo).build(); HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(fsdis, fileSize, meta); // Comparator class name is stored in the trailer in version 2. KVComparator comparator = trailer.createComparator(); HFileBlockIndex.BlockIndexReader dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator, trailer.getNumDataIndexLevels()); HFileBlockIndex.BlockIndexReader metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader( KeyValue.RAW_COMPARATOR, 1); HFileBlock.BlockIterator blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), fileSize - trailer.getTrailerSize()); // Data index. We also read statistics about the block index written after // the root level. dataBlockIndexReader.readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); if (findMidKey) { byte[] midkey = dataBlockIndexReader.midkey(); assertNotNull("Midkey should not be null", midkey); } // Meta index. metaBlockIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).getByteStream(), trailer.getMetaIndexCount()); // File info FileInfo fileInfo = new FileInfo(); fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); byte[] keyValueFormatVersion = fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION); boolean includeMemstoreTS = keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0; // Counters for the number of key/value pairs and the number of blocks int entriesRead = 0; int blocksRead = 0; long memstoreTS = 0; // Scan blocks the way the reader would scan them fsdis.seek(0); long curBlockPos = 0; while (curBlockPos <= trailer.getLastDataBlockOffset()) { HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false); assertEquals(BlockType.DATA, block.getBlockType()); ByteBuffer buf = block.getBufferWithoutHeader(); while (buf.hasRemaining()) { int keyLen = buf.getInt(); int valueLen = buf.getInt(); byte[] key = new byte[keyLen]; buf.get(key); byte[] value = new byte[valueLen]; buf.get(value); if (includeMemstoreTS) { ByteArrayInputStream byte_input = new ByteArrayInputStream(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining()); DataInputStream data_input = new DataInputStream(byte_input); memstoreTS = WritableUtils.readVLong(data_input); buf.position(buf.position() + WritableUtils.getVIntSize(memstoreTS)); } // A brute-force check to see that all keys and values are correct. assertTrue(Bytes.compareTo(key, keyValues.get(entriesRead).getKey()) == 0); assertTrue(Bytes.compareTo(value, keyValues.get(entriesRead).getValue()) == 0); ++entriesRead; } ++blocksRead; curBlockPos += block.getOnDiskSizeWithHeader(); } LOG.info("Finished reading: entries=" + entriesRead + ", blocksRead=" + blocksRead); assertEquals(entryCount, entriesRead); // Meta blocks. We can scan until the load-on-open data offset (which is // the root block index offset in version 2) because we are not testing // intermediate-level index blocks here. int metaCounter = 0; while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) { LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " + trailer.getLoadOnOpenDataOffset()); HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false); assertEquals(BlockType.META, block.getBlockType()); Text t = new Text(); ByteBuffer buf = block.getBufferWithoutHeader(); if (Writables.getWritable(buf.array(), buf.arrayOffset(), buf.limit(), t) == null) { throw new IOException( "Failed to deserialize block " + this + " into a " + t.getClass().getSimpleName()); } Text expectedText = (metaCounter == 0 ? new Text("Paris") : metaCounter == 1 ? new Text("Moscow") : new Text("Washington, D.C.")); assertEquals(expectedText, t); LOG.info("Read meta block data: " + t); ++metaCounter; curBlockPos += block.getOnDiskSizeWithHeader(); } fsdis.close(); }
From source file:org.apache.hadoop.hdfs.BlockReaderLocalLegacy.java
@Override public synchronized int read(ByteBuffer buf) throws IOException { int nRead = 0; if (verifyChecksum) { // A 'direct' read actually has three phases. The first drains any // remaining bytes from the slow read buffer. After this the read is // guaranteed to be on a checksum chunk boundary. If there are still bytes // to read, the fast direct path is used for as many remaining bytes as // possible, up to a multiple of the checksum chunk size. Finally, any // 'odd' bytes remaining at the end of the read cause another slow read to // be issued, which involves an extra copy. // Every 'slow' read tries to fill the slow read buffer in one go for // efficiency's sake. As described above, all non-checksum-chunk-aligned // reads will be served from the slower read path. if (slowReadBuff.hasRemaining()) { // There are remaining bytes from a small read available. This usually // means this read is unaligned, which falls back to the slow path. int fromSlowReadBuff = Math.min(buf.remaining(), slowReadBuff.remaining()); writeSlice(slowReadBuff, buf, fromSlowReadBuff); nRead += fromSlowReadBuff;//from ww w . j a v a 2 s .co m } if (buf.remaining() >= bytesPerChecksum && offsetFromChunkBoundary == 0) { // Since we have drained the 'small read' buffer, we are guaranteed to // be chunk-aligned int len = buf.remaining() - (buf.remaining() % bytesPerChecksum); // There's only enough checksum buffer space available to checksum one // entire slow read buffer. This saves keeping the number of checksum // chunks around. len = Math.min(len, slowReadBuff.capacity()); int oldlimit = buf.limit(); buf.limit(buf.position() + len); int readResult = 0; try { readResult = doByteBufferRead(buf); } finally { buf.limit(oldlimit); } if (readResult == -1) { return nRead; } else { nRead += readResult; buf.position(buf.position() + readResult); } } // offsetFromChunkBoundary > 0 => unaligned read, use slow path to read // until chunk boundary if ((buf.remaining() > 0 && buf.remaining() < bytesPerChecksum) || offsetFromChunkBoundary > 0) { int toRead = Math.min(buf.remaining(), bytesPerChecksum - offsetFromChunkBoundary); int readResult = fillSlowReadBuffer(toRead); if (readResult == -1) { return nRead; } else { int fromSlowReadBuff = Math.min(readResult, buf.remaining()); writeSlice(slowReadBuff, buf, fromSlowReadBuff); nRead += fromSlowReadBuff; } } } else { // Non-checksummed reads are much easier; we can just fill the buffer directly. nRead = doByteBufferRead(buf); if (nRead > 0) { buf.position(buf.position() + nRead); } } return nRead; }
From source file:com.healthmarketscience.jackcess.impl.TableImpl.java
/** * Update the row for the given rowId.//from w ww . ja v a 2s . c om * @usage _advanced_method_ */ public Object[] updateRow(RowState rowState, RowIdImpl rowId, Object... row) throws IOException { requireValidRowId(rowId); getPageChannel().startWrite(); try { // ensure that the relevant row state is up-to-date ByteBuffer rowBuffer = positionAtRowData(rowState, rowId); int oldRowSize = rowBuffer.remaining(); requireNonDeletedRow(rowState, rowId); // we need to make sure the row is the right length & type (fill with // null if too short). if ((row.length < _columns.size()) || (row.getClass() != Object[].class)) { row = dupeRow(row, _columns.size()); } // hang on to the raw values of var length columns we are "keeping". this // will allow us to re-use pre-written var length data, which can save // space for things like long value columns. Map<ColumnImpl, byte[]> keepRawVarValues = (!_varColumns.isEmpty() ? new HashMap<ColumnImpl, byte[]>() : null); // handle various value massaging activities for (ColumnImpl column : _columns) { Object rowValue = null; if (column.isAutoNumber()) { // fill in any auto-numbers (we don't allow autonumber values to be // modified) rowValue = getRowColumn(getFormat(), rowBuffer, column, rowState, null); } else { rowValue = column.getRowValue(row); if (rowValue == Column.KEEP_VALUE) { // fill in any "keep value" fields (restore old value) rowValue = getRowColumn(getFormat(), rowBuffer, column, rowState, keepRawVarValues); } else { // set oldValue to something that could not possibly be a real value Object oldValue = Column.KEEP_VALUE; if (_indexColumns.contains(column)) { // read (old) row value to help update indexes oldValue = getRowColumn(getFormat(), rowBuffer, column, rowState, null); } else { oldValue = rowState.getRowCacheValue(column.getColumnIndex()); } // if the old value was passed back in, we don't need to validate if (oldValue != rowValue) { // pass input value through column validator rowValue = column.validate(rowValue); } } } column.setRowValue(row, rowValue); } // generate new row bytes ByteBuffer newRowData = createRow(row, _writeRowBufferH.getPageBuffer(getPageChannel()), oldRowSize, keepRawVarValues); if (newRowData.limit() > getFormat().MAX_ROW_SIZE) { throw new IOException("Row size " + newRowData.limit() + " is too large"); } if (!_indexDatas.isEmpty()) { IndexData.PendingChange idxChange = null; try { Object[] oldRowValues = rowState.getRowCacheValues(); // check foreign keys before actually updating _fkEnforcer.updateRow(oldRowValues, row); // prepare index updates for (IndexData indexData : _indexDatas) { idxChange = indexData.prepareUpdateRow(oldRowValues, rowId, row, idxChange); } // complete index updates IndexData.commitAll(idxChange); } catch (ConstraintViolationException ce) { IndexData.rollbackAll(idxChange); throw ce; } } // see if we can squeeze the new row data into the existing row rowBuffer.reset(); int rowSize = newRowData.remaining(); ByteBuffer dataPage = null; int pageNumber = PageChannel.INVALID_PAGE_NUMBER; if (oldRowSize >= rowSize) { // awesome, slap it in! rowBuffer.put(newRowData); // grab the page we just updated dataPage = rowState.getFinalPage(); pageNumber = rowState.getFinalRowId().getPageNumber(); } else { // bummer, need to find a new page for the data dataPage = findFreeRowSpace(rowSize, null, PageChannel.INVALID_PAGE_NUMBER); pageNumber = _addRowBufferH.getPageNumber(); RowIdImpl headerRowId = rowState.getHeaderRowId(); ByteBuffer headerPage = rowState.getHeaderPage(); if (pageNumber == headerRowId.getPageNumber()) { // new row is on the same page as header row, share page dataPage = headerPage; } // write out the new row data (set the deleted flag on the new data row // so that it is ignored during normal table traversal) int rowNum = addDataPageRow(dataPage, rowSize, getFormat(), DELETED_ROW_MASK); dataPage.put(newRowData); // write the overflow info into the header row and clear out the // remaining header data rowBuffer = PageChannel.narrowBuffer(headerPage, findRowStart(headerPage, headerRowId.getRowNumber(), getFormat()), findRowEnd(headerPage, headerRowId.getRowNumber(), getFormat())); rowBuffer.put((byte) rowNum); ByteUtil.put3ByteInt(rowBuffer, pageNumber); ByteUtil.clearRemaining(rowBuffer); // set the overflow flag on the header row int headerRowIndex = getRowStartOffset(headerRowId.getRowNumber(), getFormat()); headerPage.putShort(headerRowIndex, (short) (headerPage.getShort(headerRowIndex) | OVERFLOW_ROW_MASK)); if (pageNumber != headerRowId.getPageNumber()) { writeDataPage(headerPage, headerRowId.getPageNumber()); } } writeDataPage(dataPage, pageNumber); updateTableDefinition(0); } finally { getPageChannel().finishWrite(); } return row; }
From source file:com.healthmarketscience.jackcess.Table.java
/** * Reads the column data from the given row buffer. Leaves limit unchanged. * Caches the returned value in the rowState. */// w w w . java 2 s . co m private static Object getRowColumn(JetFormat format, ByteBuffer rowBuffer, Column column, RowState rowState, Map<Column, byte[]> rawVarValues) throws IOException { byte[] columnData = null; try { NullMask nullMask = rowState.getNullMask(rowBuffer); boolean isNull = nullMask.isNull(column); if (column.getType() == DataType.BOOLEAN) { // Boolean values are stored in the null mask. see note about // caching below return rowState.setRowValue(column.getColumnIndex(), Boolean.valueOf(!isNull)); } else if (isNull) { // well, that's easy! (no need to update cache w/ null) return null; } // reset position to row start rowBuffer.reset(); // locate the column data bytes int rowStart = rowBuffer.position(); int colDataPos = 0; int colDataLen = 0; if (!column.isVariableLength()) { // read fixed length value (non-boolean at this point) int dataStart = rowStart + format.OFFSET_COLUMN_FIXED_DATA_ROW_OFFSET; colDataPos = dataStart + column.getFixedDataOffset(); colDataLen = column.getType().getFixedSize(column.getLength()); } else { int varDataStart; int varDataEnd; if (format.SIZE_ROW_VAR_COL_OFFSET == 2) { // read simple var length value int varColumnOffsetPos = (rowBuffer.limit() - nullMask.byteSize() - 4) - (column.getVarLenTableIndex() * 2); varDataStart = rowBuffer.getShort(varColumnOffsetPos); varDataEnd = rowBuffer.getShort(varColumnOffsetPos - 2); } else { // read jump-table based var length values short[] varColumnOffsets = readJumpTableVarColOffsets(rowState, rowBuffer, rowStart, nullMask); varDataStart = varColumnOffsets[column.getVarLenTableIndex()]; varDataEnd = varColumnOffsets[column.getVarLenTableIndex() + 1]; } colDataPos = rowStart + varDataStart; colDataLen = varDataEnd - varDataStart; } // grab the column data columnData = new byte[colDataLen]; rowBuffer.position(colDataPos); rowBuffer.get(columnData); if ((rawVarValues != null) && column.isVariableLength()) { // caller wants raw value as well rawVarValues.put(column, columnData); } // parse the column data. we cache the row values in order to be able // to update the index on row deletion. note, most of the returned // values are immutable, except for binary data (returned as byte[]), // but binary data shouldn't be indexed anyway. return rowState.setRowValue(column.getColumnIndex(), column.read(columnData)); } catch (Exception e) { // cache "raw" row value. see note about caching above rowState.setRowValue(column.getColumnIndex(), Column.rawDataWrapper(columnData)); return rowState.handleRowError(column, columnData, e); } }
From source file:com.healthmarketscience.jackcess.impl.TableImpl.java
/** * Reads the null mask from the given row buffer. Leaves limit unchanged. *//*from www . j ava 2 s.c o m*/ private NullMask getRowNullMask(ByteBuffer rowBuffer) throws IOException { // reset position to row start rowBuffer.reset(); // Number of columns in this row int columnCount = ByteUtil.getUnsignedVarInt(rowBuffer, getFormat().SIZE_ROW_COLUMN_COUNT); // read null mask NullMask nullMask = new NullMask(columnCount); rowBuffer.position(rowBuffer.limit() - nullMask.byteSize()); //Null mask at end nullMask.read(rowBuffer); return nullMask; }
From source file:org.apache.hadoop.hbase.io.hfile.TestHFileWriterV3.java
private void writeDataAndReadFromHFile(Path hfilePath, Algorithm compressAlgo, int entryCount, boolean findMidKey, boolean useTags) throws IOException { HFileContext context = new HFileContextBuilder().withBlockSize(4096).withIncludesTags(useTags) .withCompression(compressAlgo).build(); HFileWriterV3 writer = (HFileWriterV3) new HFileWriterV3.WriterFactoryV3(conf, new CacheConfig(conf)) .withPath(fs, hfilePath).withFileContext(context).withComparator(KeyValue.COMPARATOR).create(); Random rand = new Random(9713312); // Just a fixed seed. List<KeyValue> keyValues = new ArrayList<KeyValue>(entryCount); for (int i = 0; i < entryCount; ++i) { byte[] keyBytes = TestHFileWriterV2.randomOrderedKey(rand, i); // A random-length random value. byte[] valueBytes = TestHFileWriterV2.randomValue(rand); KeyValue keyValue = null; if (useTags) { ArrayList<Tag> tags = new ArrayList<Tag>(); for (int j = 0; j < 1 + rand.nextInt(4); j++) { byte[] tagBytes = new byte[16]; rand.nextBytes(tagBytes); tags.add(new Tag((byte) 1, tagBytes)); }/* w w w. ja v a 2s .c o m*/ keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes, tags); } else { keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes); } writer.append(keyValue); keyValues.add(keyValue); } // Add in an arbitrary order. They will be sorted lexicographically by // the key. writer.appendMetaBlock("CAPITAL_OF_USA", new Text("Washington, D.C.")); writer.appendMetaBlock("CAPITAL_OF_RUSSIA", new Text("Moscow")); writer.appendMetaBlock("CAPITAL_OF_FRANCE", new Text("Paris")); writer.close(); FSDataInputStream fsdis = fs.open(hfilePath); long fileSize = fs.getFileStatus(hfilePath).getLen(); FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, fileSize); assertEquals(3, trailer.getMajorVersion()); assertEquals(entryCount, trailer.getEntryCount()); HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo).withIncludesMvcc(false) .withIncludesTags(useTags).withHBaseCheckSum(true).build(); HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(fsdis, fileSize, meta); // Comparator class name is stored in the trailer in version 2. KVComparator comparator = trailer.createComparator(); HFileBlockIndex.BlockIndexReader dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator, trailer.getNumDataIndexLevels()); HFileBlockIndex.BlockIndexReader metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader( KeyValue.RAW_COMPARATOR, 1); HFileBlock.BlockIterator blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), fileSize - trailer.getTrailerSize()); // Data index. We also read statistics about the block index written after // the root level. dataBlockIndexReader.readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount()); if (findMidKey) { byte[] midkey = dataBlockIndexReader.midkey(); assertNotNull("Midkey should not be null", midkey); } // Meta index. metaBlockIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).getByteStream(), trailer.getMetaIndexCount()); // File info FileInfo fileInfo = new FileInfo(); fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); byte[] keyValueFormatVersion = fileInfo.get(HFileWriterV3.KEY_VALUE_VERSION); boolean includeMemstoreTS = keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0; // Counters for the number of key/value pairs and the number of blocks int entriesRead = 0; int blocksRead = 0; long memstoreTS = 0; // Scan blocks the way the reader would scan them fsdis.seek(0); long curBlockPos = 0; while (curBlockPos <= trailer.getLastDataBlockOffset()) { HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false); assertEquals(BlockType.DATA, block.getBlockType()); ByteBuffer buf = block.getBufferWithoutHeader(); int keyLen = -1; while (buf.hasRemaining()) { keyLen = buf.getInt(); int valueLen = buf.getInt(); byte[] key = new byte[keyLen]; buf.get(key); byte[] value = new byte[valueLen]; buf.get(value); byte[] tagValue = null; if (useTags) { int tagLen = buf.getShort(); tagValue = new byte[tagLen]; buf.get(tagValue); } if (includeMemstoreTS) { ByteArrayInputStream byte_input = new ByteArrayInputStream(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining()); DataInputStream data_input = new DataInputStream(byte_input); memstoreTS = WritableUtils.readVLong(data_input); buf.position(buf.position() + WritableUtils.getVIntSize(memstoreTS)); } // A brute-force check to see that all keys and values are correct. assertTrue(Bytes.compareTo(key, keyValues.get(entriesRead).getKey()) == 0); assertTrue(Bytes.compareTo(value, keyValues.get(entriesRead).getValue()) == 0); if (useTags) { assertNotNull(tagValue); KeyValue tkv = keyValues.get(entriesRead); assertEquals(tagValue.length, tkv.getTagsLength()); assertTrue(Bytes.compareTo(tagValue, 0, tagValue.length, tkv.getTagsArray(), tkv.getTagsOffset(), tkv.getTagsLength()) == 0); } ++entriesRead; } ++blocksRead; curBlockPos += block.getOnDiskSizeWithHeader(); } LOG.info("Finished reading: entries=" + entriesRead + ", blocksRead=" + blocksRead); assertEquals(entryCount, entriesRead); // Meta blocks. We can scan until the load-on-open data offset (which is // the root block index offset in version 2) because we are not testing // intermediate-level index blocks here. int metaCounter = 0; while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) { LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " + trailer.getLoadOnOpenDataOffset()); HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false); assertEquals(BlockType.META, block.getBlockType()); Text t = new Text(); ByteBuffer buf = block.getBufferWithoutHeader(); if (Writables.getWritable(buf.array(), buf.arrayOffset(), buf.limit(), t) == null) { throw new IOException( "Failed to deserialize block " + this + " into a " + t.getClass().getSimpleName()); } Text expectedText = (metaCounter == 0 ? new Text("Paris") : metaCounter == 1 ? new Text("Moscow") : new Text("Washington, D.C.")); assertEquals(expectedText, t); LOG.info("Read meta block data: " + t); ++metaCounter; curBlockPos += block.getOnDiskSizeWithHeader(); } fsdis.close(); }