List of usage examples for java.nio ByteBuffer putShort
public abstract ByteBuffer putShort(int index, short value);
From source file:com.healthmarketscience.jackcess.Table.java
/** * Update the row on which the given rowState is currently positioned. * <p>//from w w w . j a v a 2 s . c o m * Note, this method is not generally meant to be used directly. You should * use the {@link #updateCurrentRow} method or use the Cursor class, which * allows for more complex table interactions, e.g. * {@link Cursor#setCurrentRowValue} and {@link Cursor#updateCurrentRow}. * @usage _advanced_method_ */ public void updateRow(RowState rowState, RowId rowId, Object... row) throws IOException { requireValidRowId(rowId); // ensure that the relevant row state is up-to-date ByteBuffer rowBuffer = positionAtRowData(rowState, rowId); int oldRowSize = rowBuffer.remaining(); requireNonDeletedRow(rowState, rowId); // we need to make sure the row is the right length & type (fill with // null if too short). if ((row.length < _columns.size()) || (row.getClass() != Object[].class)) { row = dupeRow(row, _columns.size()); } // fill in any auto-numbers (we don't allow autonumber values to be // modified) handleAutoNumbersForUpdate(row, rowBuffer, rowState); // hang on to the raw values of var length columns we are "keeping". this // will allow us to re-use pre-written var length data, which can save // space for things like long value columns. Map<Column, byte[]> rawVarValues = (!_varColumns.isEmpty() ? new HashMap<Column, byte[]>() : null); // fill in any "keep value" fields for (Column column : _columns) { if (column.getRowValue(row) == Column.KEEP_VALUE) { column.setRowValue(row, getRowColumn(getFormat(), rowBuffer, column, rowState, rawVarValues)); } } // generate new row bytes ByteBuffer newRowData = createRow(row, _singleRowBufferH.getPageBuffer(getPageChannel()), oldRowSize, rawVarValues); if (newRowData.limit() > getFormat().MAX_ROW_SIZE) { throw new IOException("Row size " + newRowData.limit() + " is too large"); } if (!_indexDatas.isEmpty()) { Object[] oldRowValues = rowState.getRowValues(); // delete old values from indexes for (IndexData indexData : _indexDatas) { indexData.deleteRow(oldRowValues, rowId); } } // see if we can squeeze the new row data into the existing row rowBuffer.reset(); int rowSize = newRowData.remaining(); ByteBuffer dataPage = null; int pageNumber = PageChannel.INVALID_PAGE_NUMBER; if (oldRowSize >= rowSize) { // awesome, slap it in! rowBuffer.put(newRowData); // grab the page we just updated dataPage = rowState.getFinalPage(); pageNumber = rowState.getFinalRowId().getPageNumber(); } else { // bummer, need to find a new page for the data dataPage = findFreeRowSpace(rowSize, null, PageChannel.INVALID_PAGE_NUMBER); pageNumber = _addRowBufferH.getPageNumber(); RowId headerRowId = rowState.getHeaderRowId(); ByteBuffer headerPage = rowState.getHeaderPage(); if (pageNumber == headerRowId.getPageNumber()) { // new row is on the same page as header row, share page dataPage = headerPage; } // write out the new row data (set the deleted flag on the new data row // so that it is ignored during normal table traversal) int rowNum = addDataPageRow(dataPage, rowSize, getFormat(), DELETED_ROW_MASK); dataPage.put(newRowData); // write the overflow info into the header row and clear out the // remaining header data rowBuffer = PageChannel.narrowBuffer(headerPage, findRowStart(headerPage, headerRowId.getRowNumber(), getFormat()), findRowEnd(headerPage, headerRowId.getRowNumber(), getFormat())); rowBuffer.put((byte) rowNum); ByteUtil.put3ByteInt(rowBuffer, pageNumber); ByteUtil.clearRemaining(rowBuffer); // set the overflow flag on the header row int headerRowIndex = getRowStartOffset(headerRowId.getRowNumber(), getFormat()); headerPage.putShort(headerRowIndex, (short) (headerPage.getShort(headerRowIndex) | OVERFLOW_ROW_MASK)); if (pageNumber != headerRowId.getPageNumber()) { writeDataPage(headerPage, headerRowId.getPageNumber()); } } // update the indexes for (IndexData indexData : _indexDatas) { indexData.addRow(row, rowId); } writeDataPage(dataPage, pageNumber); updateTableDefinition(0); }
From source file:com.healthmarketscience.jackcess.impl.TableImpl.java
/** * Update the row for the given rowId./* w ww .j a v a 2 s . c o m*/ * @usage _advanced_method_ */ public Object[] updateRow(RowState rowState, RowIdImpl rowId, Object... row) throws IOException { requireValidRowId(rowId); getPageChannel().startWrite(); try { // ensure that the relevant row state is up-to-date ByteBuffer rowBuffer = positionAtRowData(rowState, rowId); int oldRowSize = rowBuffer.remaining(); requireNonDeletedRow(rowState, rowId); // we need to make sure the row is the right length & type (fill with // null if too short). if ((row.length < _columns.size()) || (row.getClass() != Object[].class)) { row = dupeRow(row, _columns.size()); } // hang on to the raw values of var length columns we are "keeping". this // will allow us to re-use pre-written var length data, which can save // space for things like long value columns. Map<ColumnImpl, byte[]> keepRawVarValues = (!_varColumns.isEmpty() ? new HashMap<ColumnImpl, byte[]>() : null); // handle various value massaging activities for (ColumnImpl column : _columns) { Object rowValue = null; if (column.isAutoNumber()) { // fill in any auto-numbers (we don't allow autonumber values to be // modified) rowValue = getRowColumn(getFormat(), rowBuffer, column, rowState, null); } else { rowValue = column.getRowValue(row); if (rowValue == Column.KEEP_VALUE) { // fill in any "keep value" fields (restore old value) rowValue = getRowColumn(getFormat(), rowBuffer, column, rowState, keepRawVarValues); } else { // set oldValue to something that could not possibly be a real value Object oldValue = Column.KEEP_VALUE; if (_indexColumns.contains(column)) { // read (old) row value to help update indexes oldValue = getRowColumn(getFormat(), rowBuffer, column, rowState, null); } else { oldValue = rowState.getRowCacheValue(column.getColumnIndex()); } // if the old value was passed back in, we don't need to validate if (oldValue != rowValue) { // pass input value through column validator rowValue = column.validate(rowValue); } } } column.setRowValue(row, rowValue); } // generate new row bytes ByteBuffer newRowData = createRow(row, _writeRowBufferH.getPageBuffer(getPageChannel()), oldRowSize, keepRawVarValues); if (newRowData.limit() > getFormat().MAX_ROW_SIZE) { throw new IOException("Row size " + newRowData.limit() + " is too large"); } if (!_indexDatas.isEmpty()) { IndexData.PendingChange idxChange = null; try { Object[] oldRowValues = rowState.getRowCacheValues(); // check foreign keys before actually updating _fkEnforcer.updateRow(oldRowValues, row); // prepare index updates for (IndexData indexData : _indexDatas) { idxChange = indexData.prepareUpdateRow(oldRowValues, rowId, row, idxChange); } // complete index updates IndexData.commitAll(idxChange); } catch (ConstraintViolationException ce) { IndexData.rollbackAll(idxChange); throw ce; } } // see if we can squeeze the new row data into the existing row rowBuffer.reset(); int rowSize = newRowData.remaining(); ByteBuffer dataPage = null; int pageNumber = PageChannel.INVALID_PAGE_NUMBER; if (oldRowSize >= rowSize) { // awesome, slap it in! rowBuffer.put(newRowData); // grab the page we just updated dataPage = rowState.getFinalPage(); pageNumber = rowState.getFinalRowId().getPageNumber(); } else { // bummer, need to find a new page for the data dataPage = findFreeRowSpace(rowSize, null, PageChannel.INVALID_PAGE_NUMBER); pageNumber = _addRowBufferH.getPageNumber(); RowIdImpl headerRowId = rowState.getHeaderRowId(); ByteBuffer headerPage = rowState.getHeaderPage(); if (pageNumber == headerRowId.getPageNumber()) { // new row is on the same page as header row, share page dataPage = headerPage; } // write out the new row data (set the deleted flag on the new data row // so that it is ignored during normal table traversal) int rowNum = addDataPageRow(dataPage, rowSize, getFormat(), DELETED_ROW_MASK); dataPage.put(newRowData); // write the overflow info into the header row and clear out the // remaining header data rowBuffer = PageChannel.narrowBuffer(headerPage, findRowStart(headerPage, headerRowId.getRowNumber(), getFormat()), findRowEnd(headerPage, headerRowId.getRowNumber(), getFormat())); rowBuffer.put((byte) rowNum); ByteUtil.put3ByteInt(rowBuffer, pageNumber); ByteUtil.clearRemaining(rowBuffer); // set the overflow flag on the header row int headerRowIndex = getRowStartOffset(headerRowId.getRowNumber(), getFormat()); headerPage.putShort(headerRowIndex, (short) (headerPage.getShort(headerRowIndex) | OVERFLOW_ROW_MASK)); if (pageNumber != headerRowId.getPageNumber()) { writeDataPage(headerPage, headerRowId.getPageNumber()); } } writeDataPage(dataPage, pageNumber); updateTableDefinition(0); } finally { getPageChannel().finishWrite(); } return row; }
From source file:com.healthmarketscience.jackcess.impl.TableImpl.java
/** * Delete the row for the given rowId./*from w ww .j a va 2s. c om*/ * @usage _advanced_method_ */ public void deleteRow(RowState rowState, RowIdImpl rowId) throws IOException { requireValidRowId(rowId); getPageChannel().startWrite(); try { // ensure that the relevant row state is up-to-date ByteBuffer rowBuffer = positionAtRowHeader(rowState, rowId); if (rowState.isDeleted()) { // don't care about duplicate deletion return; } requireNonDeletedRow(rowState, rowId); // delete flag always gets set in the "header" row (even if data is on // overflow row) int pageNumber = rowState.getHeaderRowId().getPageNumber(); int rowNumber = rowState.getHeaderRowId().getRowNumber(); // attempt to fill in index column values Object[] rowValues = null; if (!_indexDatas.isEmpty()) { // move to row data to get index values rowBuffer = positionAtRowData(rowState, rowId); for (ColumnImpl idxCol : _indexColumns) { getRowColumn(getFormat(), rowBuffer, idxCol, rowState, null); } // use any read rowValues to help update the indexes rowValues = rowState.getRowCacheValues(); // check foreign keys before proceeding w/ deletion _fkEnforcer.deleteRow(rowValues); // move back to the header rowBuffer = positionAtRowHeader(rowState, rowId); } // finally, pull the trigger int rowIndex = getRowStartOffset(rowNumber, getFormat()); rowBuffer.putShort(rowIndex, (short) (rowBuffer.getShort(rowIndex) | DELETED_ROW_MASK | OVERFLOW_ROW_MASK)); writeDataPage(rowBuffer, pageNumber); // update the indexes for (IndexData indexData : _indexDatas) { indexData.deleteRow(rowValues, rowId); } // make sure table def gets updated updateTableDefinition(-1); } finally { getPageChannel().finishWrite(); } }