public Location readPayload(DataInput dataIn) throws IOException { Location rc = new Location(); rc.setDataFileId(dataIn.readInt()); rc.setOffset(dataIn.readInt()); return rc; }
@Override public Location readPayload(DataInput dataIn) throws IOException { Location rc = new Location(); rc.setDataFileId(dataIn.readInt()); rc.setOffset(dataIn.readInt()); if (metadata.version >= 6) { rc.setSize(dataIn.readInt()); } return rc; }
protected void process(KahaRewrittenDataFileCommand command, Location location) throws IOException { final TreeSet<Integer> completeFileSet = new TreeSet<>(journal.getFileMap().keySet()); // Mark the current journal file as a compacted file so that gc checks can skip // over logs that are smaller compaction type logs. DataFile current = journal.getDataFileById(location.getDataFileId()); current.setTypeCode(command.getRewriteType()); if (completeFileSet.contains(command.getSourceDataFileId()) && command.getSkipIfSourceExists()) { // Move offset so that next location read jumps to next file. location.setOffset(journalMaxFileLength); } }
public void append(Journal.WriteCommand write) throws IOException { this.writes.addLast(write); write.location.setDataFileId(dataFile.getDataFileId()); write.location.setOffset(offset + size); int s = write.location.getSize(); size += s; dataFile.incrementLength(s); journal.addToTotalLength(s); } }
public void append(Journal.WriteCommand write) throws IOException { this.writes.addLast(write); write.location.setDataFileId(dataFile.getDataFileId()); write.location.setOffset(offset+size); int s = write.location.getSize(); size += s; dataFile.incrementLength(s); journal.addToTotalLength(s); } }
public void corruptRecoveryLocation(Location recoveryPosition) throws IOException { DataFile dataFile = getDataFile(recoveryPosition); // with corruption on recovery we have no faith in the content - slip to the next batch record or eof DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile); try { RandomAccessFile randomAccessFile = reader.getRaf().getRaf(); randomAccessFile.seek(recoveryPosition.getOffset() + 1); byte[] data = new byte[getWriteBatchSize()]; ByteSequence bs = new ByteSequence(data, 0, randomAccessFile.read(data)); int nextOffset = 0; if (findNextBatchRecord(bs, randomAccessFile) >= 0) { nextOffset = Math.toIntExact(randomAccessFile.getFilePointer() - bs.remaining()); } else { nextOffset = Math.toIntExact(randomAccessFile.length()); } Sequence sequence = new Sequence(recoveryPosition.getOffset(), nextOffset - 1); LOG.warn("Corrupt journal records found in '{}' between offsets: {}", dataFile.getFile(), sequence); // skip corruption on getNextLocation recoveryPosition.setOffset(nextOffset); recoveryPosition.setSize(-1); dataFile.corruptedBlocks.add(sequence); } catch (IOException e) { } finally { accessorPool.closeDataFileAccessor(reader); } }
public Location getFirstLocation() throws IllegalStateException, IOException { if( dataFiles.isEmpty() ) { return null; } DataFile first = dataFiles.getHead(); Location cur = new Location(); cur.setDataFileId(first.getDataFileId()); cur.setOffset(0); cur.setSize(0); return getNextLocation(cur); }
protected Location recoveryCheck(DataFile dataFile) throws IOException { Location location = new Location(); location.setDataFileId(dataFile.getDataFileId()); location.setOffset(0); location.setOffset(location.getOffset() + BATCH_CONTROL_RECORD_SIZE + size); } else { LOG.warn("Corrupt journal records found in '{}' between offsets: {}", dataFile.getFile(), sequence); dataFile.corruptedBlocks.add(sequence); location.setOffset(nextOffset); } else { break;
cur.setOffset(0); } else { } else { cur = new Location(location); cur.setOffset(location.getOffset() + location.getSize()); cur.setOffset(cur.getOffset() + cur.getSize()); } else { cur.setDataFileId(dataFile.getDataFileId().intValue()); cur.setOffset(0); if (limit != null && cur.compareTo(limit) >= 0) { LOG.trace("reached limit: {} at: {}", limit, cur); cur.setOffset(Math.max(maxFileLength, dataFile.getLength())); } else if (cur.getType() == USER_RECORD_TYPE) {
public Location readPayload(DataInput dataIn) throws IOException { Location rc = new Location(); rc.setDataFileId(dataIn.readInt()); rc.setOffset(dataIn.readInt()); return rc; }
public Location readPayload(DataInput dataIn) throws IOException { Location rc = new Location(); rc.setDataFileId(dataIn.readInt()); rc.setOffset(dataIn.readInt()); return rc; }
@Override public Location readPayload(DataInput dataIn) throws IOException { Location rc = new Location(); rc.setDataFileId(dataIn.readInt()); rc.setOffset(dataIn.readInt()); if (metadata.version >= 6) { rc.setSize(dataIn.readInt()); } return rc; }
protected void process(KahaRewrittenDataFileCommand command, Location location) throws IOException { final TreeSet<Integer> completeFileSet = new TreeSet<>(journal.getFileMap().keySet()); // Mark the current journal file as a compacted file so that gc checks can skip // over logs that are smaller compaction type logs. DataFile current = journal.getDataFileById(location.getDataFileId()); current.setTypeCode(command.getRewriteType()); if (completeFileSet.contains(command.getSourceDataFileId()) && command.getSkipIfSourceExists()) { // Move offset so that next location read jumps to next file. location.setOffset(journalMaxFileLength); } }
protected void process(KahaRewrittenDataFileCommand command, Location location) throws IOException { final TreeSet<Integer> completeFileSet = new TreeSet<>(journal.getFileMap().keySet()); // Mark the current journal file as a compacted file so that gc checks can skip // over logs that are smaller compaction type logs. DataFile current = journal.getDataFileById(location.getDataFileId()); current.setTypeCode(command.getRewriteType()); if (completeFileSet.contains(command.getSourceDataFileId()) && command.getSkipIfSourceExists()) { // Move offset so that next location read jumps to next file. location.setOffset(journalMaxFileLength); } }
protected void process(KahaRewrittenDataFileCommand command, Location location) throws IOException { final TreeSet<Integer> completeFileSet = new TreeSet<>(journal.getFileMap().keySet()); // Mark the current journal file as a compacted file so that gc checks can skip // over logs that are smaller compaction type logs. DataFile current = journal.getDataFileById(location.getDataFileId()); current.setTypeCode(command.getRewriteType()); if (completeFileSet.contains(command.getSourceDataFileId()) && command.getSkipIfSourceExists()) { // Move offset so that next location read jumps to next file. location.setOffset(journalMaxFileLength); } }
public void append(Journal.WriteCommand write) throws IOException { this.writes.addLast(write); write.location.setDataFileId(dataFile.getDataFileId()); write.location.setOffset(offset+size); int s = write.location.getSize(); size += s; dataFile.incrementLength(s); journal.addToTotalLength(s); } }
public void append(Journal.WriteCommand write) throws IOException { this.writes.addLast(write); write.location.setDataFileId(dataFile.getDataFileId()); write.location.setOffset(offset + size); int s = write.location.getSize(); size += s; dataFile.incrementLength(s); journal.addToTotalLength(s); } }
public void append(Journal.WriteCommand write) throws IOException { this.writes.addLast(write); write.location.setDataFileId(dataFile.getDataFileId()); write.location.setOffset(offset + size); int s = write.location.getSize(); size += s; dataFile.incrementLength(s); journal.addToTotalLength(s); } }
public void append(Journal.WriteCommand write) throws IOException { this.writes.addLast(write); write.location.setDataFileId(dataFile.getDataFileId()); write.location.setOffset(offset+size); int s = write.location.getSize(); size += s; dataFile.incrementLength(s); journal.addToTotalLength(s); } }
public Location getFirstLocation() throws IllegalStateException, IOException { if( dataFiles.isEmpty() ) { return null; } DataFile first = dataFiles.getHead(); Location cur = new Location(); cur.setDataFileId(first.getDataFileId()); cur.setOffset(0); cur.setSize(0); return getNextLocation(cur); }