private void addSSTables(UUID cfId, Collection<SSTableReader> sstables) { for (SSTableReader sstable : sstables) sstableMap.get(cfId).add(sstable.getFilename()); }
public void markSuspect() { if (logger.isDebugEnabled()) logger.debug("Marking {} as a suspect for blacklisting.", getFilename()); isSuspect.getAndSet(true); }
public Map<String, Integer> getIndexIntervals() { List<SSTableReader> sstables = getAllSSTables(); Map<String, Integer> intervals = new HashMap<>(sstables.size()); for (SSTableReader sstable : sstables) intervals.put(sstable.getFilename(), (int) Math.round(sstable.getEffectiveIndexInterval())); return intervals; }
private Set<SSTableReader> getActiveSSTables(UUID cfId) { if (failed) return Collections.emptySet(); if (!columnFamilyStores.containsKey(cfId)) return null; Set<String> repairedSSTables = sstableMap.get(cfId); Set<SSTableReader> activeSSTables = new HashSet<>(); Set<String> activeSSTableNames = new HashSet<>(); for (SSTableReader sstable : columnFamilyStores.get(cfId).getSSTables()) { if (repairedSSTables.contains(sstable.getFilename())) { activeSSTables.add(sstable); activeSSTableNames.add(sstable.getFilename()); } } sstableMap.put(cfId, activeSSTableNames); return activeSSTables; }
/** * Mark the sstable as obsolete, i.e., compacted into newer sstables. * * When calling this function, the caller must ensure that the SSTableReader is not referenced anywhere * except for threads holding a reference. * * @return true if the this is the first time the file was marked obsolete. Calling this * multiple times is usually buggy (see exceptions in DataTracker.unmarkCompacting and removeOldSSTablesSize). */ public boolean markObsolete(DataTracker tracker) { if (logger.isDebugEnabled()) logger.debug("Marking {} compacted", getFilename()); synchronized (tidy.global) { assert !tidy.isReplaced; } if (!tidy.global.isCompacted.getAndSet(true)) { tidy.type.markObsolete(this, tracker); return true; } return false; }
/** * @param cfs * @param sstables must be marked compacting */ public AbstractCompactionTask(ColumnFamilyStore cfs, Set<SSTableReader> sstables) { this.cfs = cfs; this.sstables = sstables; this.isUserDefined = false; this.compactionType = OperationType.COMPACTION; // enforce contract that caller should mark sstables compacting Set<SSTableReader> compacting = cfs.getDataTracker().getCompacting(); for (SSTableReader sstable : sstables) assert compacting.contains(sstable) : sstable.getFilename() + " is not correctly marked compacting"; }
public Upgrader(ColumnFamilyStore cfs, SSTableReader sstable, OutputHandler outputHandler) { this.cfs = cfs; this.sstable = sstable; this.outputHandler = outputHandler; this.directory = new File(sstable.getFilename()).getParentFile(); this.controller = new UpgradeController(cfs); this.strategy = cfs.getCompactionStrategy(); long estimatedTotalKeys = Math.max(cfs.metadata.getMinIndexInterval(), SSTableReader.getApproximateKeyCount(Arrays.asList(this.sstable))); long estimatedSSTables = Math.max(1, SSTableReader.getTotalBytes(Arrays.asList(this.sstable)) / strategy.getMaxSSTableBytes()); this.estimatedRows = (long) Math.ceil((double) estimatedTotalKeys / estimatedSSTables); }
@VisibleForTesting public SSTableRewriter(ColumnFamilyStore cfs, Set<SSTableReader> rewriting, long maxAge, boolean isOffline, long preemptiveOpenInterval) { this.rewriting = rewriting; for (SSTableReader sstable : rewriting) { originalStarts.put(sstable.descriptor, sstable.first); fileDescriptors.put(sstable.descriptor, CLibrary.getfd(sstable.getFilename())); } this.dataTracker = cfs.getDataTracker(); this.cfs = cfs; this.maxAge = maxAge; this.isOffline = isOffline; this.preemptiveOpenInterval = preemptiveOpenInterval; }
public static ISSTableScanner getScanner(SSTableReader sstable, Collection<Range<Token>> tokenRanges, RateLimiter limiter) { // We want to avoid allocating a SSTableScanner if the range don't overlap the sstable (#5249) List<Pair<Long, Long>> positions = sstable.getPositionsForRanges(tokenRanges); if (positions.isEmpty()) return new EmptySSTableScanner(sstable.getFilename()); return new SSTableScanner(sstable, tokenRanges, limiter); }
cardinalities.add(cardinality); else logger.debug("Got a null cardinality estimator in: "+sstable.getFilename());
public SSTableNamesIterator(SSTableReader sstable, FileDataInput file, DecoratedKey key, SortedSet<CellName> columns, RowIndexEntry indexEntry) { assert columns != null; this.sstable = sstable; this.columns = columns; this.key = key; try { read(sstable, file, indexEntry); } catch (IOException e) { sstable.markSuspect(); throw new CorruptSSTableException(e, sstable.getFilename()); } }
/** * Gets the repairing sstables for anticompaction. * * Note that validation and streaming uses the real unrepaired sstables. * * @param cfId * @return */ @SuppressWarnings("resource") public synchronized Refs<SSTableReader> getActiveRepairedSSTableRefsForAntiCompaction(UUID cfId) { assert marked.contains(cfId); ImmutableMap.Builder<SSTableReader, Ref<SSTableReader>> references = ImmutableMap.builder(); Iterable<SSTableReader> sstables = getActiveSSTables(cfId); if (sstables == null) throw new RuntimeException("Not possible to get sstables for anticompaction for " + cfId); for (SSTableReader sstable : sstables) { Ref<SSTableReader> ref = sstable.tryRef(); if (ref == null) sstableMap.get(cfId).remove(sstable.getFilename()); else references.put(sstable, ref); } return new Refs<>(references.build()); }
public OutgoingFileMessage(Ref<SSTableReader> ref, int sequenceNumber, long estimatedKeys, List<Pair<Long, Long>> sections, long repairedAt) { super(Type.FILE); this.ref = ref; SSTableReader sstable = ref.get(); filename = sstable.getFilename(); this.header = new FileMessageHeader(sstable.metadata.cfId, sequenceNumber, sstable.descriptor.version.toString(), estimatedKeys, sections, sstable.compression ? sstable.getCompressionMetadata() : null, repairedAt); }
public List<String> getSSTablesForKey(String key) { DecoratedKey dk = partitioner.decorateKey(metadata.getKeyValidator().fromString(key)); try (OpOrder.Group op = readOrdering.start()) { List<String> files = new ArrayList<>(); for (SSTableReader sstr : select(viewFilter(dk)).sstables) { // check if the key actually exists in this sstable, without updating cache and stats if (sstr.getPosition(dk, SSTableReader.Operator.EQ, false) != null) files.add(sstr.getFilename()); } return files; } }
sstable.getFilename(), session.peer, sstable.getSSTableMetadata().repairedAt, totalSize); RandomAccessReader file = sstable.openDataReader(); FileChannel fc = file.getChannel(); session.planId(), sstable.getFilename(), session.peer, progress, totalSize);
/** * @param ephemeral If this flag is set to true, the snapshot will be cleaned during next startup */ public void snapshotWithoutFlush(String snapshotName, Predicate<SSTableReader> predicate, boolean ephemeral) { for (ColumnFamilyStore cfs : concatWithIndexes()) { final JSONArray filesJSONArr = new JSONArray(); try (RefViewFragment currentView = cfs.selectAndReference(CANONICAL_SSTABLES)) { for (SSTableReader ssTable : currentView.sstables) { if (predicate != null && !predicate.apply(ssTable)) continue; File snapshotDirectory = Directories.getSnapshotDirectory(ssTable.descriptor, snapshotName); ssTable.createLinks(snapshotDirectory.getPath()); // hard links filesJSONArr.add(ssTable.descriptor.relativeFilenameFor(Component.DATA)); if (logger.isDebugEnabled()) logger.debug("Snapshot for {} keyspace data file {} created in {}", keyspace, ssTable.getFilename(), snapshotDirectory); } writeSnapshotManifest(filesJSONArr, snapshotName); } } if (ephemeral) createEphemeralSnapshotMarkerFile(snapshotName); }
public SSTableNamesIterator(SSTableReader sstable, DecoratedKey key, SortedSet<CellName> columns) { assert columns != null; this.sstable = sstable; this.columns = columns; this.key = key; RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ); if (indexEntry == null) return; try { read(sstable, null, indexEntry); } catch (IOException e) { sstable.markSuspect(); throw new CorruptSSTableException(e, sstable.getFilename()); } finally { if (fileToClose != null) FileUtils.closeQuietly(fileToClose); } }
sstable.getFilename(), session.peer, sstable.getSSTableMetadata().repairedAt, totalSize); RandomAccessReader file = sstable.openDataReader(); ChecksumValidator validator = new File(sstable.descriptor.filenameFor(Component.CRC)).exists() session.planId(), sstable.getFilename(), session.peer, progress, totalSize);
public SimpleSliceReader(SSTableReader sstable, RowIndexEntry indexEntry, FileDataInput input, Composite finishColumn) { Tracing.trace("Seeking to partition beginning in data file"); this.finishColumn = finishColumn; this.comparator = sstable.metadata.comparator; try { if (input == null) { this.file = sstable.getFileDataInput(indexEntry.position); this.needsClosing = true; } else { this.file = input; input.seek(indexEntry.position); this.needsClosing = false; } // Skip key and data size ByteBufferUtil.skipShortLength(file); emptyColumnFamily = ArrayBackedSortedColumns.factory.create(sstable.metadata); emptyColumnFamily.delete(DeletionTime.serializer.deserialize(file)); atomIterator = emptyColumnFamily.metadata().getOnDiskIterator(file, sstable.descriptor.version); } catch (IOException e) { sstable.markSuspect(); throw new CorruptSSTableException(e, sstable.getFilename()); } }
private void seekToCurrentRangeStart() { long indexPosition = sstable.getIndexScanPosition(currentRange.left); ifile.seek(indexPosition); try { while (!ifile.isEOF()) { indexPosition = ifile.getFilePointer(); DecoratedKey indexDecoratedKey = sstable.partitioner.decorateKey(ByteBufferUtil.readWithShortLength(ifile)); if (indexDecoratedKey.compareTo(currentRange.left) > 0 || currentRange.contains(indexDecoratedKey)) { // Found, just read the dataPosition and seek into index and data files long dataPosition = ifile.readLong(); ifile.seek(indexPosition); dfile.seek(dataPosition); break; } else { RowIndexEntry.Serializer.skip(ifile); } } } catch (IOException e) { sstable.markSuspect(); throw new CorruptSSTableException(e, sstable.getFilename()); } }