if (writer.getFilePointer() != 0) SSTableReader reader = writer.finish(SSTableWriter.FinishType.NORMAL, maxAge, -1); finishedReaders.add(reader); SSTableReader reader = writer.finish(SSTableWriter.FinishType.EARLY, maxAge, -1); replaceEarlyOpenedFile(currentlyOpenedEarly, reader); moveStarts(reader, reader.last, false); writer.abort();
protected void writeRow(DecoratedKey key, ColumnFamily columnFamily) { writer.append(key, columnFamily); }
protected void writeRow(DecoratedKey key, SSTableWriter writer, DataInput in, ColumnFamilyStore cfs) throws IOException { writer.appendFromStream(key, cfs.metadata, in, inputVersion); } }
writer.append((DecoratedKey)entry.getKey(), cf); if (writer.getFilePointer() > 0) writer.getFilename(), FBUtilities.prettyPrintMemory(writer.getOnDiskFilePointer()), context)); writer.isolateReferences(); ssTable = writer.closeAndOpenReader(); writer.getFilename(), context); writer.abort(); ssTable = null; writer.abort(); throw Throwables.propagate(e);
public void append(DecoratedKey decoratedKey, ColumnFamily cf) { if (decoratedKey.getKey().remaining() > FBUtilities.MAX_UNSIGNED_SHORT) { logger.error("Key size {} exceeds maximum of {}, skipping row", decoratedKey.getKey().remaining(), FBUtilities.MAX_UNSIGNED_SHORT); return; } long startPosition = beforeAppend(decoratedKey); long endPosition; try { RowIndexEntry entry = rawAppend(cf, startPosition, decoratedKey, dataFile.stream); endPosition = dataFile.getFilePointer(); afterAppend(decoratedKey, endPosition, entry); } catch (IOException e) { throw new FSWriteError(e, dataFile.getPath()); } long rowSize = endPosition - startPosition; maybeLogLargePartitionWarning(decoratedKey, rowSize); sstableMetadataCollector.update(rowSize, cf.getColumnStats()); }
SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport, ActiveRepairService.UNREPAIRED_SSTABLE); writer.append(row.getKey(), columnFamily); columnFamily.clear(); writer.closeAndOpenReader();
public void run() { SSTableWriter writer = null; try { while (true) { Buffer b = writeQueue.take(); if (b == SENTINEL) return; writer = getWriter(); for (Map.Entry<DecoratedKey, ColumnFamily> entry : b.entrySet()) { if (entry.getValue().getColumnCount() > 0) writer.append(entry.getKey(), entry.getValue()); else if (!entry.getKey().equals(b.getFirstInsertedKey())) throw new AssertionError("Empty partition"); } writer.close(); } } catch (Throwable e) { JVMStabilityInspector.inspectThrowable(e); if (writer != null) writer.abort(); exception = e; } } }
/** * @param row * @return null if the row was compacted away entirely; otherwise, the PK index entry for this row */ public RowIndexEntry append(AbstractCompactedRow row) { long startPosition = beforeAppend(row.key); RowIndexEntry entry; try { entry = row.write(startPosition, dataFile.stream); if (entry == null) return null; } catch (IOException e) { throw new FSWriteError(e, dataFile.getPath()); } long endPosition = dataFile.getFilePointer(); long rowSize = endPosition - startPosition; maybeLogLargePartitionWarning(row.key, rowSize); sstableMetadataCollector.update(rowSize, row.columnStats()); afterAppend(row.key, endPosition, entry); return entry; }
Pair<Descriptor, StatsMetadata> p; p = close(finishType, repairedAt < 0 ? this.repairedAt : repairedAt); Descriptor desc = p.left; StatsMetadata metadata = p.right; desc = makeTmpLinks(); metadata, finishType.openReason); sstable.first = getMinimalKey(first); sstable.last = getMinimalKey(last);
public SSTableWriter createFlushWriter(String filename) throws ExecutionException, InterruptedException { MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.metadata.comparator).replayPosition(context); return new SSTableWriter(filename, rows.size(), ActiveRepairService.UNREPAIRED_SSTABLE, cfs.metadata, cfs.partitioner, sstableMetadataCollector); } }
public SSTableWriter(String filename, long keyCount, long repairedAt, CFMetaData metadata, IPartitioner partitioner, MetadataCollector sstableMetadataCollector) { super(Descriptor.fromFilename(filename), components(metadata), metadata, partitioner); this.repairedAt = repairedAt; if (compression) { dataFile = SequentialWriter.open(getFilename(), descriptor.filenameFor(Component.COMPRESSION_INFO), metadata.compressionParameters(), sstableMetadataCollector); dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile); } else { dataFile = SequentialWriter.open(new File(getFilename()), new File(descriptor.filenameFor(Component.CRC))); dbuilder = SegmentedFile.getBuilder(DatabaseDescriptor.getDiskAccessMode()); } iwriter = new IndexWriter(keyCount, dataFile); this.sstableMetadataCollector = sstableMetadataCollector; }
writer.abort(); task.sstables.clear(); task.session.taskCompleted(task); List<SSTableReader> readers = new ArrayList<>(); for (SSTableWriter writer : task.sstables) readers.add(writer.closeAndOpenReader()); lockfile.delete(); task.sstables.clear();
SSTableWriter inOrderWriter = CompactionManager.createWriter(cfs, destination, expectedBloomFilterSize, repairedAt, sstable); for (Row row : outOfOrderRows) inOrderWriter.append(row.key, row.cf); newInOrderSstable = inOrderWriter.closeAndOpenReader(sstable.maxDataAge); if (!isOffline) cfs.getDataTracker().addSSTables(Collections.singleton(newInOrderSstable));
long currentPosition = beforeAppend(key); .updateMaxColumnNames(maxColumnNames) .updateHasLegacyCounterShards(hasLegacyCounterShards); afterAppend(key, currentPosition, RowIndexEntry.create(currentPosition, cf.deletionInfo().getTopLevelDeletion(), columnIndexer.build())); return currentPosition;
/** * Abort this task. * If the task already received all files and * {@link org.apache.cassandra.streaming.StreamReceiveTask.OnCompletionRunnable} task is submitted, * then task cannot be aborted. */ public synchronized void abort() { if (done) return; done = true; for (SSTableWriter writer : sstables) writer.abort(); sstables.clear(); } }
public SSTableReader closeAndOpenReader() { return closeAndOpenReader(System.currentTimeMillis()); }
public SSTableReader closeAndOpenReader(long maxDataAge) { return finish(FinishType.NORMAL, maxDataAge, this.repairedAt); }
public Pair<Descriptor, StatsMetadata> close() { return close(FinishType.CLOSE, this.repairedAt); }
SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport, ActiveRepairService.UNREPAIRED_SSTABLE); writer.append(currentKey, columnFamily); columnFamily.clear(); writer.closeAndOpenReader();