public static LogRecord makeCommit(long updateTime) { return new LogRecord(Type.COMMIT, updateTime); }
public static LogRecord make(Type type, SSTable table) { // CASSANDRA-13294: add the sstable component separator because for legacy (2.1) files // there is no separator after the generation number, and this would cause files of sstables with // a higher generation number that starts with the same number, to be incorrectly classified as files // of this record sstable String absoluteTablePath = absolutePath(table.descriptor.baseFilename()); return make(type, getExistingFiles(absoluteTablePath), table.getAllFilePaths().size(), absoluteTablePath); }
static void verifyRecord(LogRecord record) { if (record.checksum != record.computeChecksum()) { record.setError(String.format("Invalid checksum for sstable [%s]: [%d] should have been [%d]", record.fileName(), record.checksum, record.computeChecksum())); return; } if (record.type != Type.REMOVE) return; // Paranoid sanity checks: we create another record by looking at the files as they are // on disk right now and make sure the information still matches. We don't want to delete // files by mistake if the user has copied them from backup and forgot to remove a txn log // file that obsoleted the very same files. So we check the latest update time and make sure // it matches. Because we delete files from oldest to newest, the latest update time should // always match. record.status.onDiskRecord = record.withExistingFiles(); if (record.updateTime != record.status.onDiskRecord.updateTime && record.status.onDiskRecord.updateTime > 0) { record.setError(String.format("Unexpected files detected for sstable [%s]: " + "last update time [%tT] should have been [%tT]", record.fileName(), record.status.onDiskRecord.updateTime, record.updateTime)); } }
static void verifyRecordWithCorruptedLastRecord(LogRecord record) { if (record.type == Type.REMOVE && record.status.onDiskRecord.numFiles < record.numFiles) { // if we found a corruption in the last record, then we continue only // if the number of files matches exactly for all previous records. record.setError(String.format("Incomplete fileset detected for sstable [%s]: " + "number of files [%d] should have been [%d].", record.fileName(), record.status.onDiskRecord.numFiles, record.numFiles)); } }
public LogRecord withExistingFiles() { return make(type, getExistingFiles(), 0, absolutePath.get()); }
LogRecord record = LogRecord.make(firstLine); if (records.contains(record)) logger.error("Found duplicate record {} for {}, giving up", record, record.fileName()); setError(record, "Duplicated record"); return false; record.setPartial(); if (record.isFinal() && i != (maxNumLines - 1)) logger.error("Found too many lines for {}, giving up", record.fileName()); setError(record, "This record should have been the last one in all replicas"); return false;
public static LogRecord make(String line) { try { Matcher matcher = REGEX.matcher(line); if (!matcher.matches()) return new LogRecord(Type.UNKNOWN, null, 0, 0, 0, line) .setError(String.format("Failed to parse [%s]", line)); Type type = Type.fromPrefix(matcher.group(1)); return new LogRecord(type, matcher.group(2) + Component.separator, // see comment on CASSANDRA-13294 below Long.parseLong(matcher.group(3)), Integer.parseInt(matcher.group(4)), Long.parseLong(matcher.group(5)), line); } catch (IllegalArgumentException e) { return new LogRecord(Type.UNKNOWN, null, 0, 0, 0, line) .setError(String.format("Failed to parse line: %s", e.getMessage())); } }
private LogRecord(Type type, String absolutePath, long updateTime, int numFiles, long checksum, String raw) { assert !type.hasFile() || absolutePath != null : "Expected file path for file records"; this.type = type; this.absolutePath = type.hasFile() ? Optional.of(absolutePath) : Optional.empty(); this.updateTime = type == Type.REMOVE ? updateTime : 0; this.numFiles = type.hasFile() ? numFiles : 0; this.status = new Status(); if (raw == null) { assert checksum == 0; this.checksum = computeChecksum(); this.raw = format(); } else { this.checksum = checksum; this.raw = raw; } }
private String format() { return String.format("%s:[%s,%d,%d][%d]", type.toString(), absolutePath(), updateTime, numFiles, checksum); }
public List<File> getExistingFiles() { assert absolutePath.isPresent() : "Expected a path in order to get existing files"; return getExistingFiles(absolutePath.get()); }
void setErrorInReplicas(LogRecord record) { replicas().forEach(r -> r.setError(record.raw, record.error())); }
private static Set<File> getRecordFiles(NavigableSet<File> files, LogRecord record) { String fileName = record.fileName(); return files.stream().filter(f -> f.getName().startsWith(fileName)).collect(Collectors.toSet()); }
/** * Add the record to all the replicas: if it is a final record then we throw only if we fail to write it * to all, otherwise we throw if we fail to write it to any file, see CASSANDRA-10421 for details */ void append(LogRecord record) { Throwable err = Throwables.perform(null, replicas().stream().map(r -> () -> r.append(record))); if (err != null) { if (!record.isFinal() || err.getSuppressed().length == replicas().size() -1) Throwables.maybeFail(err); logger.error("Failed to add record '{}' to some replicas '{}'", record, this); } }
/** * Extract from the files passed in all those that are of the given type. * * Scan all records and select those that are of the given type, valid, and * located in the same folder. For each such record extract from the files passed in * those that belong to this record. * * @return a map linking each mapped record to its files, where the files where passed in as parameters. */ Map<LogRecord, Set<File>> getFilesOfType(Path folder, NavigableSet<File> files, Type type) { Map<LogRecord, Set<File>> ret = new HashMap<>(); records.stream() .filter(type::matches) .filter(LogRecord::isValid) .filter(r -> r.isInFolder(folder)) .forEach((r) -> ret.put(r, getRecordFiles(files, r))); return ret; }
LogRecord record = LogRecord.make(firstLine); if (records.contains(record)) logger.error("Found duplicate record {} for {}, giving up", record, record.fileName()); setError(record, "Duplicated record"); return false; record.setPartial(); if (record.isFinal() && i != (maxNumLines - 1)) logger.error("Found too many lines for {}, giving up", record.fileName()); setError(record, "This record should have been the last one in all replicas"); return false;
public LogRecord withExistingFiles() { return make(type, getExistingFiles(), 0, absolutePath.get()); }
static void verifyRecordWithCorruptedLastRecord(LogRecord record) { if (record.type == Type.REMOVE && record.status.onDiskRecord.numFiles < record.numFiles) { // if we found a corruption in the last record, then we continue only // if the number of files matches exactly for all previous records. record.setError(String.format("Incomplete fileset detected for sstable [%s]: " + "number of files [%d] should have been [%d].", record.fileName(), record.status.onDiskRecord.numFiles, record.numFiles)); } }
public static LogRecord make(String line) { try { Matcher matcher = REGEX.matcher(line); if (!matcher.matches()) return new LogRecord(Type.UNKNOWN, null, 0, 0, 0, line) .setError(String.format("Failed to parse [%s]", line)); Type type = Type.fromPrefix(matcher.group(1)); return new LogRecord(type, matcher.group(2) + Component.separator, // see comment on CASSANDRA-13294 below Long.parseLong(matcher.group(3)), Integer.parseInt(matcher.group(4)), Long.parseLong(matcher.group(5)), line); } catch (IllegalArgumentException e) { return new LogRecord(Type.UNKNOWN, null, 0, 0, 0, line) .setError(String.format("Failed to parse line: %s", e.getMessage())); } }