protected Collection<SSTableReader> openSSTables(final Map<InetAddress, Collection<Range<Token>>> ranges) outputHandler.output("Opening sstables and calculating sections to stream");
private void saveOutOfOrderRow(DecoratedKey prevKey, DecoratedKey key, UnfilteredRowIterator iterator) { // TODO bitch if the row is too large? if it is there's not much we can do ... outputHandler.warn(String.format("Out of order row detected (%s found after %s)", key, prevKey)); outOfOrder.add(ImmutableBTreePartition.create(iterator)); }
outputHandler.output(String.format("Scrubbing %s (%s)", sstable, FBUtilities.prettyPrintMemory(dataFile.length()))); try (SSTableRewriter writer = SSTableRewriter.construct(cfs, transaction, false, sstable.maxDataAge); Refs<SSTableReader> refs = Refs.ref(Collections.singleton(sstable))) outputHandler.debug("Reading row at " + rowStart); outputHandler.debug(String.format("row %s is %s", keyName, FBUtilities.prettyPrintMemory(dataSizeFromIndex))); outputHandler.warn(String.format("Data file row position %d differs from index file row position %d", dataStart, dataStartFromIndex)); outputHandler.warn("Error reading row (stacktrace follows):", th); outputHandler.output(String.format("Retrying from row index; data is %s bytes starting at %s", dataSizeFromIndex, dataStartFromIndex)); key = sstable.decorateKey(currentIndexKey); outputHandler.warn("Retry failed too. Skipping to next row (retry's stacktrace follows)", th2); badRows++; seekToNextRow(); outputHandler.warn("Row starting at position " + dataStart + " is unreadable; skipping to next"); badRows++; if (currentIndexKey != null) outputHandler.warn(String.format("%d out of order rows found while scrubbing %s; Those have been written (in order) to a new sstable (%s)", outOfOrder.size(), sstable, newInOrderSstable)); outputHandler.output("Scrub of " + sstable + " complete: " + goodRows + " rows in new sstable and " + emptyRows + " empty (tombstoned) rows dropped"); if (negativeLocalDeletionInfoMetrics.fixedRows > 0) outputHandler.output("Fixed " + negativeLocalDeletionInfoMetrics.fixedRows + " rows with overflowed local deletion time.");
outputHandler.output(String.format("Verifying %s (%s)", sstable, FBUtilities.prettyPrintMemory(dataFile.length()))); outputHandler.output(String.format("Deserializing sstable metadata for %s ", sstable)); try outputHandler.debug(t.getMessage()); markAndThrow(false); outputHandler.output(String.format("Checking computed hash of %s ", sstable)); outputHandler.output("Data digest missing, assuming extended verification of disk values"); extended = true; outputHandler.debug(e.getMessage()); markAndThrow(); return; outputHandler.output("Extended Verify requested, proceeding to inspect values"); outputHandler.debug("Reading row at " + rowStart); outputHandler.debug(String.format("row %s is %s", keyName, FBUtilities.prettyPrintMemory(dataSize))); outputHandler.debug(String.format("Row %s at %s valid, moving to next row at %s ", goodRows, rowStart, nextRowPositionFromIndex)); dataFile.seek(nextRowPositionFromIndex); outputHandler.output("Verify of " + sstable + " succeeded. All " + goodRows + " rows read successfully");
protected Unfiltered computeNext() { if (!iterator.hasNext()) return endOfData(); Unfiltered next = iterator.next(); if (!next.isRow()) return next; if (hasNegativeLocalExpirationTime((Row) next)) { outputHandler.debug(String.format("Found row with negative local expiration time: %s", next.toString(metadata(), false))); negativeLocalExpirationTimeMetrics.fixedRows++; return fixNegativeLocalExpirationTime((Row) next); } return next; }
outputHandler.output(String.format("Scrubbing %s (%s)", sstable, FBUtilities.prettyPrintMemory(dataFile.length()))); try (SSTableRewriter writer = SSTableRewriter.construct(cfs, transaction, false, sstable.maxDataAge); Refs<SSTableReader> refs = Refs.ref(Collections.singleton(sstable))) outputHandler.debug("Reading row at " + rowStart); outputHandler.debug(String.format("row %s is %s", keyName, FBUtilities.prettyPrintMemory(dataSizeFromIndex))); outputHandler.warn(String.format("Data file row position %d differs from index file row position %d", dataStart, dataStartFromIndex)); outputHandler.warn("Error reading row (stacktrace follows):", th); outputHandler.output(String.format("Retrying from row index; data is %s bytes starting at %s", dataSizeFromIndex, dataStartFromIndex)); key = sstable.decorateKey(currentIndexKey); outputHandler.warn("Retry failed too. Skipping to next row (retry's stacktrace follows)", th2); badRows++; seekToNextRow(); outputHandler.warn("Row starting at position " + dataStart + " is unreadable; skipping to next"); badRows++; if (currentIndexKey != null) outputHandler.warn(String.format("%d out of order rows found while scrubbing %s; Those have been written (in order) to a new sstable (%s)", outOfOrder.size(), sstable, newInOrderSstable)); outputHandler.output("Scrub of " + sstable + " complete: " + goodRows + " rows in new sstable and " + emptyRows + " empty (tombstoned) rows dropped"); if (negativeLocalDeletionInfoMetrics.fixedRows > 0) outputHandler.output("Fixed " + negativeLocalDeletionInfoMetrics.fixedRows + " rows with overflowed local deletion time.");
outputHandler.output(String.format("Verifying %s (%s)", sstable, FBUtilities.prettyPrintMemory(dataFile.length()))); outputHandler.output(String.format("Deserializing sstable metadata for %s ", sstable)); try outputHandler.debug(t.getMessage()); markAndThrow(false); outputHandler.output(String.format("Checking computed hash of %s ", sstable)); outputHandler.output("Data digest missing, assuming extended verification of disk values"); extended = true; outputHandler.debug(e.getMessage()); markAndThrow(); return; outputHandler.output("Extended Verify requested, proceeding to inspect values"); outputHandler.debug("Reading row at " + rowStart); outputHandler.debug(String.format("row %s is %s", keyName, FBUtilities.prettyPrintMemory(dataSize))); outputHandler.debug(String.format("Row %s at %s valid, moving to next row at %s ", goodRows, rowStart, nextRowPositionFromIndex)); dataFile.seek(nextRowPositionFromIndex); outputHandler.output("Verify of " + sstable + " succeeded. All " + goodRows + " rows read successfully");
protected Unfiltered computeNext() { if (!iterator.hasNext()) return endOfData(); Unfiltered next = iterator.next(); if (!next.isRow()) return next; if (hasNegativeLocalExpirationTime((Row) next)) { outputHandler.debug(String.format("Found row with negative local expiration time: %s", next.toString(metadata(), false))); negativeLocalExpirationTimeMetrics.fixedRows++; return fixNegativeLocalExpirationTime((Row) next); } return next; }
outputHandler.output(String.format("Scrubbing %s (%s)", sstable, FBUtilities.prettyPrintMemory(dataFile.length()))); try (SSTableRewriter writer = SSTableRewriter.construct(cfs, transaction, false, sstable.maxDataAge); Refs<SSTableReader> refs = Refs.ref(Collections.singleton(sstable))) outputHandler.debug("Reading row at " + rowStart); outputHandler.debug(String.format("row %s is %s", keyName, FBUtilities.prettyPrintMemory(dataSizeFromIndex))); outputHandler.warn(String.format("Data file row position %d differs from index file row position %d", dataStart, dataStartFromIndex)); outputHandler.warn("Error reading row (stacktrace follows):", th); outputHandler.output(String.format("Retrying from row index; data is %s bytes starting at %s", dataSizeFromIndex, dataStartFromIndex)); key = sstable.decorateKey(currentIndexKey); outputHandler.warn("Retry failed too. Skipping to next row (retry's stacktrace follows)", th2); badRows++; seekToNextRow(); outputHandler.warn("Row starting at position " + dataStart + " is unreadable; skipping to next"); badRows++; if (currentIndexKey != null) outputHandler.warn(String.format("%d out of order rows found while scrubbing %s; Those have been written (in order) to a new sstable (%s)", outOfOrder.size(), sstable, newInOrderSstable)); outputHandler.output("Scrub of " + sstable + " complete: " + goodRows + " rows in new sstable and " + emptyRows + " empty (tombstoned) rows dropped"); if (negativeLocalDeletionInfoMetrics.fixedRows > 0) outputHandler.output("Fixed " + negativeLocalDeletionInfoMetrics.fixedRows + " rows with overflowed local deletion time.");
private static void listFiles(Options options, CFMetaData metadata, OutputHandler handler) throws IOException { Directories directories = new Directories(metadata, ColumnFamilyStore.getInitialDirectories()); for (File dir : directories.getCFDirectories()) { for (File file : LifecycleTransaction.getFiles(dir.toPath(), getFilter(options), Directories.OnTxnErr.THROW)) handler.output(file.getCanonicalPath()); } }
private void saveOutOfOrderRow(DecoratedKey prevKey, DecoratedKey key, UnfilteredRowIterator iterator) { // TODO bitch if the row is too large? if it is there's not much we can do ... outputHandler.warn(String.format("Out of order row detected (%s found after %s)", key, prevKey)); outOfOrder.add(ImmutableBTreePartition.create(iterator)); }
outputHandler.output(String.format("Verifying %s (%s)", sstable, FBUtilities.prettyPrintMemory(dataFile.length()))); outputHandler.output(String.format("Deserializing sstable metadata for %s ", sstable)); try outputHandler.debug(t.getMessage()); markAndThrow(false); outputHandler.output(String.format("Checking computed hash of %s ", sstable)); outputHandler.output("Data digest missing, assuming extended verification of disk values"); extended = true; outputHandler.debug(e.getMessage()); markAndThrow(); return; outputHandler.output("Extended Verify requested, proceeding to inspect values"); outputHandler.debug("Reading row at " + rowStart); outputHandler.debug(String.format("row %s is %s", keyName, FBUtilities.prettyPrintMemory(dataSize))); outputHandler.debug(String.format("Row %s at %s valid, moving to next row at %s ", goodRows, rowStart, nextRowPositionFromIndex)); dataFile.seek(nextRowPositionFromIndex); outputHandler.output("Verify of " + sstable + " succeeded. All " + goodRows + " rows read successfully");
protected Unfiltered computeNext() { if (!iterator.hasNext()) return endOfData(); Unfiltered next = iterator.next(); if (!next.isRow()) return next; if (hasNegativeLocalExpirationTime((Row) next)) { outputHandler.debug(String.format("Found row with negative local expiration time: %s", next.toString(metadata(), false))); negativeLocalExpirationTimeMetrics.fixedRows++; return fixNegativeLocalExpirationTime((Row) next); } return next; }
outputHandler.output(String.format("Scrubbing %s (%s bytes)", sstable, dataFile.length())); Set<SSTableReader> oldSSTable = Sets.newHashSet(sstable); SSTableRewriter writer = new SSTableRewriter(cfs, oldSSTable, sstable.maxDataAge, isOffline); outputHandler.debug("Reading row at " + rowStart); outputHandler.debug(String.format("row %s is %s bytes", keyName, dataSize)); outputHandler.warn(String.format("Data file row position %d differs from index file row position %d", dataStart, dataStartFromIndex)); outputHandler.warn(String.format("Data file row size %d different from index file row size %d", dataSize, dataSizeFromIndex)); outputHandler.warn("Error reading row (stacktrace follows):", th); outputHandler.output(String.format("Retrying from row index; data is %s bytes starting at %s", dataSizeFromIndex, dataStartFromIndex)); key = sstable.partitioner.decorateKey(currentIndexKey); outputHandler.warn("Retry failed too. Skipping to next row (retry's stacktrace follows)", th2); badRows++; seekToNextRow(); outputHandler.warn("Row starting at position " + dataStart + " is unreadable; skipping to next"); badRows++; if (currentIndexKey != null) else if (newInOrderSstable != null) newInOrderSstable.selfRef().release(); outputHandler.warn(String.format("%d out of order rows found while scrubbing %s; Those have been written (in order) to a new sstable (%s)", outOfOrderRows.size(), sstable, newInOrderSstable));
private static void listFiles(Options options, CFMetaData metadata, OutputHandler handler) throws IOException { Directories directories = new Directories(metadata, ColumnFamilyStore.getInitialDirectories()); for (File dir : directories.getCFDirectories()) { for (File file : LifecycleTransaction.getFiles(dir.toPath(), getFilter(options), Directories.OnTxnErr.THROW)) handler.output(file.getCanonicalPath()); } }
private void throwIfCommutative(DecoratedKey key, Throwable th) { if (isCommutative && !skipCorrupted) { outputHandler.warn(String.format("An error occurred while scrubbing the row with key '%s'. Skipping corrupt " + "rows in counter tables will result in undercounts for the affected " + "counters (see CASSANDRA-2759 for more details), so by default the scrub will " + "stop at this point. If you would like to skip the row anyway and continue " + "scrubbing, re-run the scrub with the --skip-corrupted option.", key)); throw new IOError(th); } }
private static void listFiles(Options options, CFMetaData metadata, OutputHandler handler) throws IOException { Directories directories = new Directories(metadata, ColumnFamilyStore.getInitialDirectories()); for (File dir : directories.getCFDirectories()) { for (File file : LifecycleTransaction.getFiles(dir.toPath(), getFilter(options), Directories.OnTxnErr.THROW)) handler.output(file.getCanonicalPath()); } }