List of usage examples for java.io IOError IOError
public IOError(Throwable cause)
From source file:com.newatlanta.appengine.nio.file.GaePath.java
@Override public URI toUri() { try {//from w w w . jav a 2 s. c o m return new URI(fileObject.getName().getURI()); } catch (Exception e) { throw new IOError(e); } }
From source file:org.apache.cassandra.db.CompactionManager.java
/** * Deserialize everything in the CFS and re-serialize w/ the newest version. Also attempts to recover * from bogus row keys / sizes using data from the index, and skips rows with garbage columns that resulted * from early ByteBuffer bugs.//w ww . j a va2 s .co m * * @throws IOException */ private void doScrub(ColumnFamilyStore cfs, Collection<SSTableReader> sstables) throws IOException { assert !cfs.isIndex(); for (final SSTableReader sstable : sstables) { logger.info("Scrubbing " + sstable); // Calculate the expected compacted filesize String compactionFileLocation = cfs.table.getDataFileLocation(sstable.length()); if (compactionFileLocation == null) throw new IOException("disk full"); int expectedBloomFilterSize = Math.max(DatabaseDescriptor.getIndexInterval(), (int) (SSTableReader.getApproximateKeyCount(Arrays.asList(sstable)))); // loop through each row, deserializing to check for damage. // we'll also loop through the index at the same time, using the position from the index to recover if the // row header (key or data size) is corrupt. (This means our position in the index file will be one row // "ahead" of the data file.) final BufferedRandomAccessFile dataFile = BufferedRandomAccessFile .getUncachingReader(sstable.getFilename()); String indexFilename = sstable.descriptor.filenameFor(Component.PRIMARY_INDEX); BufferedRandomAccessFile indexFile = BufferedRandomAccessFile.getUncachingReader(indexFilename); ByteBuffer nextIndexKey = ByteBufferUtil.readWithShortLength(indexFile); { // throw away variable so we don't have a side effect in the assert long firstRowPositionFromIndex = indexFile.readLong(); assert firstRowPositionFromIndex == 0 : firstRowPositionFromIndex; } SSTableWriter writer = maybeCreateWriter(cfs, compactionFileLocation, expectedBloomFilterSize, null, Collections.singletonList(sstable)); executor.beginCompaction(new ScrubInfo(dataFile, sstable)); int goodRows = 0, badRows = 0, emptyRows = 0; while (!dataFile.isEOF()) { long rowStart = dataFile.getFilePointer(); if (logger.isDebugEnabled()) logger.debug("Reading row at " + rowStart); DecoratedKey key = null; long dataSize = -1; try { key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, ByteBufferUtil.readWithShortLength(dataFile)); dataSize = sstable.descriptor.hasIntRowSize ? dataFile.readInt() : dataFile.readLong(); if (logger.isDebugEnabled()) logger.debug( String.format("row %s is %s bytes", ByteBufferUtil.bytesToHex(key.key), dataSize)); } catch (Throwable th) { throwIfFatal(th); // check for null key below } ByteBuffer currentIndexKey = nextIndexKey; long nextRowPositionFromIndex; try { nextIndexKey = indexFile.isEOF() ? null : ByteBufferUtil.readWithShortLength(indexFile); nextRowPositionFromIndex = indexFile.isEOF() ? dataFile.length() : indexFile.readLong(); } catch (Throwable th) { logger.warn("Error reading index file", th); nextIndexKey = null; nextRowPositionFromIndex = dataFile.length(); } long dataStart = dataFile.getFilePointer(); long dataStartFromIndex = currentIndexKey == null ? -1 : rowStart + 2 + currentIndexKey.remaining() + (sstable.descriptor.hasIntRowSize ? 4 : 8); long dataSizeFromIndex = nextRowPositionFromIndex - dataStartFromIndex; assert currentIndexKey != null || indexFile.isEOF(); if (logger.isDebugEnabled() && currentIndexKey != null) logger.debug(String.format("Index doublecheck: row %s is %s bytes", ByteBufferUtil.bytesToHex(currentIndexKey), dataSizeFromIndex)); writer.mark(); try { if (key == null) throw new IOError(new IOException("Unable to read row key from data file")); if (dataSize > dataFile.length()) throw new IOError(new IOException("Impossible row size " + dataSize)); SSTableIdentityIterator row = new SSTableIdentityIterator(sstable, dataFile, key, dataStart, dataSize, true); AbstractCompactedRow compactedRow = getCompactedRow(row, sstable.descriptor, true); if (compactedRow.isEmpty()) { emptyRows++; } else { writer.append(compactedRow); goodRows++; } if (!key.key.equals(currentIndexKey) || dataStart != dataStartFromIndex) logger.warn( "Row scrubbed successfully but index file contains a different key or row size; consider rebuilding the index as described in http://www.mail-archive.com/user@cassandra.apache.org/msg03325.html"); } catch (Throwable th) { throwIfFatal(th); logger.warn("Non-fatal error reading row (stacktrace follows)", th); writer.reset(); if (currentIndexKey != null && (key == null || !key.key.equals(currentIndexKey) || dataStart != dataStartFromIndex || dataSize != dataSizeFromIndex)) { logger.info(String.format("Retrying from row index; data is %s bytes starting at %s", dataSizeFromIndex, dataStartFromIndex)); key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, currentIndexKey); try { SSTableIdentityIterator row = new SSTableIdentityIterator(sstable, dataFile, key, dataStartFromIndex, dataSizeFromIndex, true); AbstractCompactedRow compactedRow = getCompactedRow(row, sstable.descriptor, true); if (compactedRow.isEmpty()) { emptyRows++; } else { writer.append(compactedRow); goodRows++; } } catch (Throwable th2) { throwIfFatal(th2); logger.warn("Retry failed too. Skipping to next row (retry's stacktrace follows)", th2); writer.reset(); dataFile.seek(nextRowPositionFromIndex); badRows++; } } else { logger.warn("Row at " + dataStart + " is unreadable; skipping to next"); if (currentIndexKey != null) dataFile.seek(nextRowPositionFromIndex); badRows++; } } } if (writer.getFilePointer() > 0) { SSTableReader newSstable = writer.closeAndOpenReader(sstable.maxDataAge); cfs.replaceCompactedSSTables(Arrays.asList(sstable), Arrays.asList(newSstable)); logger.info("Scrub of " + sstable + " complete: " + goodRows + " rows in new sstable and " + emptyRows + " empty (tombstoned) rows dropped"); if (badRows > 0) logger.warn("Unable to recover " + badRows + " rows that were skipped. You can attempt manual recovery from the pre-scrub snapshot. You can also run nodetool repair to transfer the data from a healthy replica, if any"); } else { cfs.markCompacted(Arrays.asList(sstable)); if (badRows > 0) logger.warn("No valid rows found while scrubbing " + sstable + "; it is marked for deletion now. If you want to attempt manual recovery, you can find a copy in the pre-scrub snapshot"); else logger.info("Scrub of " + sstable + " complete; looks like all " + emptyRows + " rows were tombstoned"); } } }
From source file:org.apache.cassandra.db.compaction.CompactionManager.java
private void scrubOne(ColumnFamilyStore cfs, SSTableReader sstable) throws IOException { logger.info("Scrubbing " + sstable); CompactionController controller = new CompactionController(cfs, Collections.singletonList(sstable), getDefaultGcBefore(cfs), true); boolean isCommutative = cfs.metadata.getDefaultValidator().isCommutative(); // Calculate the expected compacted filesize String compactionFileLocation = cfs.table.getDataFileLocation(sstable.length()); if (compactionFileLocation == null) throw new IOException("disk full"); int expectedBloomFilterSize = Math.max(DatabaseDescriptor.getIndexInterval(), (int) (SSTableReader.getApproximateKeyCount(Arrays.asList(sstable)))); // loop through each row, deserializing to check for damage. // we'll also loop through the index at the same time, using the position from the index to recover if the // row header (key or data size) is corrupt. (This means our position in the index file will be one row // "ahead" of the data file.) final BufferedRandomAccessFile dataFile = BufferedRandomAccessFile .getUncachingReader(sstable.getFilename()); String indexFilename = sstable.descriptor.filenameFor(Component.PRIMARY_INDEX); BufferedRandomAccessFile indexFile = BufferedRandomAccessFile.getUncachingReader(indexFilename); try {/*from w w w .ja v a 2 s . c o m*/ ByteBuffer nextIndexKey = ByteBufferUtil.readWithShortLength(indexFile); { // throw away variable so we don't have a side effect in the assert long firstRowPositionFromIndex = indexFile.readLong(); assert firstRowPositionFromIndex == 0 : firstRowPositionFromIndex; } SSTableWriter writer = maybeCreateWriter(cfs, compactionFileLocation, expectedBloomFilterSize, null, Collections.singletonList(sstable)); executor.beginCompaction(new ScrubInfo(dataFile, sstable)); int goodRows = 0, badRows = 0, emptyRows = 0; while (!dataFile.isEOF()) { long rowStart = dataFile.getFilePointer(); if (logger.isDebugEnabled()) logger.debug("Reading row at " + rowStart); DecoratedKey key = null; long dataSize = -1; try { key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, ByteBufferUtil.readWithShortLength(dataFile)); dataSize = sstable.descriptor.hasIntRowSize ? dataFile.readInt() : dataFile.readLong(); if (logger.isDebugEnabled()) logger.debug( String.format("row %s is %s bytes", ByteBufferUtil.bytesToHex(key.key), dataSize)); } catch (Throwable th) { throwIfFatal(th); // check for null key below } ByteBuffer currentIndexKey = nextIndexKey; long nextRowPositionFromIndex; try { nextIndexKey = indexFile.isEOF() ? null : ByteBufferUtil.readWithShortLength(indexFile); nextRowPositionFromIndex = indexFile.isEOF() ? dataFile.length() : indexFile.readLong(); } catch (Throwable th) { logger.warn("Error reading index file", th); nextIndexKey = null; nextRowPositionFromIndex = dataFile.length(); } long dataStart = dataFile.getFilePointer(); long dataStartFromIndex = currentIndexKey == null ? -1 : rowStart + 2 + currentIndexKey.remaining() + (sstable.descriptor.hasIntRowSize ? 4 : 8); long dataSizeFromIndex = nextRowPositionFromIndex - dataStartFromIndex; assert currentIndexKey != null || indexFile.isEOF(); if (logger.isDebugEnabled() && currentIndexKey != null) logger.debug(String.format("Index doublecheck: row %s is %s bytes", ByteBufferUtil.bytesToHex(currentIndexKey), dataSizeFromIndex)); writer.mark(); try { if (key == null) throw new IOError(new IOException("Unable to read row key from data file")); if (dataSize > dataFile.length()) throw new IOError(new IOException("Impossible row size " + dataSize)); SSTableIdentityIterator row = new SSTableIdentityIterator(sstable, dataFile, key, dataStart, dataSize, true); AbstractCompactedRow compactedRow = controller.getCompactedRow(row); if (compactedRow.isEmpty()) { emptyRows++; } else { writer.append(compactedRow); goodRows++; } if (!key.key.equals(currentIndexKey) || dataStart != dataStartFromIndex) logger.warn("Index file contained a different key or row size; using key from data file"); } catch (Throwable th) { throwIfFatal(th); logger.warn("Non-fatal error reading row (stacktrace follows)", th); writer.reset(); if (currentIndexKey != null && (key == null || !key.key.equals(currentIndexKey) || dataStart != dataStartFromIndex || dataSize != dataSizeFromIndex)) { logger.info(String.format("Retrying from row index; data is %s bytes starting at %s", dataSizeFromIndex, dataStartFromIndex)); key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, currentIndexKey); try { SSTableIdentityIterator row = new SSTableIdentityIterator(sstable, dataFile, key, dataStartFromIndex, dataSizeFromIndex, true); AbstractCompactedRow compactedRow = controller.getCompactedRow(row); if (compactedRow.isEmpty()) { emptyRows++; } else { writer.append(compactedRow); goodRows++; } } catch (Throwable th2) { throwIfFatal(th2); // Skipping rows is dangerous for counters (see CASSANDRA-2759) if (isCommutative) throw new IOError(th2); logger.warn("Retry failed too. Skipping to next row (retry's stacktrace follows)", th2); writer.reset(); dataFile.seek(nextRowPositionFromIndex); badRows++; } } else { // Skipping rows is dangerous for counters (see CASSANDRA-2759) if (isCommutative) throw new IOError(th); logger.warn("Row at " + dataStart + " is unreadable; skipping to next"); if (currentIndexKey != null) dataFile.seek(nextRowPositionFromIndex); badRows++; } } } if (writer.getFilePointer() > 0) { SSTableReader newSstable = writer.closeAndOpenReader(sstable.maxDataAge); cfs.replaceCompactedSSTables(Arrays.asList(sstable), Arrays.asList(newSstable)); logger.info("Scrub of " + sstable + " complete: " + goodRows + " rows in new sstable and " + emptyRows + " empty (tombstoned) rows dropped"); if (badRows > 0) logger.warn("Unable to recover " + badRows + " rows that were skipped. You can attempt manual recovery from the pre-scrub snapshot. You can also run nodetool repair to transfer the data from a healthy replica, if any"); } else { cfs.markCompacted(Arrays.asList(sstable)); if (badRows > 0) logger.warn("No valid rows found while scrubbing " + sstable + "; it is marked for deletion now. If you want to attempt manual recovery, you can find a copy in the pre-scrub snapshot"); else logger.info("Scrub of " + sstable + " complete; looks like all " + emptyRows + " rows were tombstoned"); } } finally { FileUtils.closeQuietly(dataFile); FileUtils.closeQuietly(indexFile); } }
From source file:org.apache.cassandra.db.ColumnFamilyStore.java
/** * Fetch a range of rows and columns from memtables/sstables. * /*from www . jav a 2s . c om*/ * @param superColumn optional SuperColumn to slice subcolumns of; null to slice top-level columns * @param range Either a Bounds, which includes start key, or a Range, which does not. * @param maxResults Maximum rows to return * @param columnFilter description of the columns we're interested in for each row * @return true if we found all keys we were looking for, otherwise false */ public List<Row> getRangeSlice(ByteBuffer superColumn, final AbstractBounds range, int maxResults, IFilter columnFilter) throws ExecutionException, InterruptedException { assert range instanceof Bounds || (!((Range) range).isWrapAround() || range.right.equals(StorageService.getPartitioner().getMinimumToken())) : range; DecoratedKey startWith = new DecoratedKey(range.left, null); DecoratedKey stopAt = new DecoratedKey(range.right, null); QueryFilter filter = new QueryFilter(null, new QueryPath(columnFamily, superColumn, null), columnFilter); int gcBefore = (int) (System.currentTimeMillis() / 1000) - metadata.getGcGraceSeconds(); DataTracker.View currentView = data.getView(); Collection<Memtable> memtables = new ArrayList<Memtable>(); memtables.add(currentView.memtable); memtables.addAll(currentView.memtablesPendingFlush); // It is fine to aliases the View.sstables since it's an unmodifiable collection Collection<SSTableReader> sstables = currentView.sstables; RowIterator iterator = RowIteratorFactory.getIterator(memtables, sstables, startWith, stopAt, filter, getComparator(), this); List<Row> rows = new ArrayList<Row>(); try { // pull rows out of the iterator boolean first = true; while (iterator.hasNext()) { Row current = iterator.next(); DecoratedKey key = current.key; if (!stopAt.isEmpty() && stopAt.compareTo(key) < 0) return rows; // skip first one if (range instanceof Bounds || !first || !key.equals(startWith)) { // TODO this is necessary because when we collate supercolumns together, we don't check // their subcolumns for relevance, so we need to do a second prune post facto here. rows.add(current.cf != null && current.cf.isSuper() ? new Row(current.key, ColumnFamilyStore.removeDeleted(current.cf, gcBefore)) : current); if (logger.isDebugEnabled()) logger.debug("scanned " + key); } first = false; if (rows.size() >= maxResults) return rows; } } finally { try { iterator.close(); } catch (IOException e) { throw new IOError(e); } } return rows; }
From source file:org.apache.cassandra.db.ColumnFamilyStore.java
private void snapshotWithoutFlush(String snapshotName) { for (ColumnFamilyStore cfs : concatWithIndexes()) { for (SSTableReader ssTable : cfs.data.getSSTables()) { try { // mkdir File dataDirectory = ssTable.descriptor.directory.getParentFile(); String snapshotDirectoryPath = Table.getSnapshotPath(dataDirectory.getAbsolutePath(), table.name, snapshotName); FileUtils.createDirectory(snapshotDirectoryPath); // hard links ssTable.createLinks(snapshotDirectoryPath); if (logger.isDebugEnabled()) logger.debug("Snapshot for " + table + " keyspace data file " + ssTable.getFilename() + " created in " + snapshotDirectoryPath); } catch (IOException e) { throw new IOError(e); }// w w w . j a v a 2 s.c o m } } }
From source file:com.bigdata.dastor.service.StorageService.java
@Override public DiskSpaceLoad getTableDiskSpaceLoad(String tableName) { DiskSpaceLoad load = new DiskSpaceLoad(); Table table = null;/* w w w . j ava 2 s.c o m*/ try { table = Table.open(tableName); } catch (IOException e) { throw new IOError(e); } for (String cfName : table.getColumnFamilies()) { ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName); load.net += cfs.getTotalDiskSpaceUsed(); load.gross += cfs.getTotalDiskSpaceUsed(); } return load; }