/** * Acquire a reference to all of the provided objects, or none */ public static <T extends RefCounted<T>> Refs<T> tryRef(Iterable<T> reference) { HashMap<T, Ref<T>> refs = new HashMap<>(); for (T rc : reference) { Ref<T> ref = rc.tryRef(); if (ref == null) { release(refs.values()); return null; } refs.put(rc, ref); } return new Refs<T>(refs); }
public static <T extends RefCounted<T>> Refs<T> ref(Iterable<T> reference) { Refs<T> refs = tryRef(reference); if (refs != null) return refs; throw new IllegalStateException(); }
public static void rebuildSecondaryIndex(String ksName, String cfName, String... idxNames) { ColumnFamilyStore cfs = Keyspace.open(ksName).getColumnFamilyStore(cfName); Set<String> indexes = new HashSet<String>(Arrays.asList(idxNames)); Iterable<SSTableReader> sstables = cfs.getSSTables(SSTableSet.CANONICAL); try (Refs<SSTableReader> refs = Refs.ref(sstables)) { logger.info("User Requested secondary index re-build for {}/{} indexes: {}", ksName, cfName, Joiner.on(',').join(idxNames)); cfs.indexManager.rebuildIndexesBlocking(refs, indexes); } }
private void refreshOverlaps() { if (NEVER_PURGE_TOMBSTONES) return; if (this.overlappingSSTables != null) overlappingSSTables.release(); if (compacting == null) overlappingSSTables = Refs.tryRef(Collections.<SSTableReader>emptyList()); else overlappingSSTables = cfs.getAndReferenceOverlappingSSTables(compacting); this.overlappingTree = DataTracker.buildIntervalTree(overlappingSSTables); }
@VisibleForTesting public static List<SSTableStreamingSections> getSSTableSectionsForRanges(Collection<Range<Token>> ranges, Collection<ColumnFamilyStore> stores, long overriddenRepairedAt, final boolean isIncremental) Refs<SSTableReader> refs = new Refs<>(); try for (Range<Token> range : ranges) rowBoundsList.add(range.toRowBounds()); refs.addAll(cfStore.selectAndReference(new Function<DataTracker.View, List<SSTableReader>>() List<SSTableStreamingSections> sections = new ArrayList<>(refs.size()); for (SSTableReader sstable : refs) if (overriddenRepairedAt == ActiveRepairService.UNREPAIRED_SSTABLE) repairedAt = sstable.getSSTableMetadata().repairedAt; sections.add(new SSTableStreamingSections(refs.get(sstable), sstable.getPositionsForRanges(ranges), sstable.estimatedKeysForRanges(ranges), refs.release(); throw t;
/** * A special kind of replacement for SSTableReaders that were cloned with a new index summary sampling level (see * SSTableReader.cloneWithNewSummarySamplingLevel and CASSANDRA-5519). This does not mark the old reader * as compacted. * @param oldSSTables replaced readers * @param newSSTables replacement readers */ private void replaceReaders(Collection<SSTableReader> oldSSTables, Collection<SSTableReader> newSSTables, boolean notify) { View currentView, newView; do { currentView = view.get(); newView = currentView.replace(oldSSTables, newSSTables); } while (!view.compareAndSet(currentView, newView)); if (!oldSSTables.isEmpty() && notify) notifySSTablesChanged(oldSSTables, newSSTables, OperationType.UNKNOWN); for (SSTableReader sstable : newSSTables) sstable.setupKeyCache(); Refs.release(Refs.selfRefs(oldSSTables)); }
/** * See {@link Refs#release()} */ public void close() { release(); }
try (Refs<SSTableReader> refs = Refs.ref(actuallyCompact); AbstractCompactionStrategy.ScannerList scanners = strategy.getScanners(actuallyCompact); CompactionIterator ci = new CompactionIterator(compactionType, scanners.scanners, controller, nowInSec, taskId)) Refs.release(Refs.selfRefs(newSStables));
UUID parentRepairSession) throws InterruptedException, IOException logger.info("[repair #{}] Starting anticompaction for {}.{} on {}/{} sstables", parentRepairSession, cfs.keyspace.getName(), cfs.getTableName(), validatedForRepair.size(), cfs.getLiveSSTables()); logger.trace("[repair #{}] Starting anticompaction for ranges {}", parentRepairSession, ranges); Set<SSTableReader> sstables = new HashSet<>(validatedForRepair); validatedForRepair.release(Sets.union(nonAnticompacting, mutatedRepairStatuses)); assert txn.originals().equals(sstables); if (!sstables.isEmpty()) validatedForRepair.release(); txn.close();
@Override public void runMayThrow() throws Exception { boolean success = false; while (!success) { for (SSTableReader compactingSSTable : cfs.getDataTracker().getCompacting()) sstables.releaseIfHolds(compactingSSTable); Set<SSTableReader> compactedSSTables = new HashSet<>(); for (SSTableReader sstable : sstables) if (sstable.isMarkedCompacted()) compactedSSTables.add(sstable); sstables.release(compactedSSTables); success = sstables.isEmpty() || cfs.getDataTracker().markCompacting(sstables); } performAnticompaction(cfs, ranges, sstables, repairedAt); } };
@Override @SuppressWarnings("resource") public void runMayThrow() throws Exception { LifecycleTransaction modifier = null; while (modifier == null) { for (SSTableReader compactingSSTable : cfs.getTracker().getCompacting()) sstables.releaseIfHolds(compactingSSTable); Set<SSTableReader> compactedSSTables = new HashSet<>(); for (SSTableReader sstable : sstables) if (sstable.isMarkedCompacted()) compactedSSTables.add(sstable); sstables.release(compactedSSTables); modifier = cfs.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION); } performAnticompaction(cfs, ranges, sstables, modifier, repairedAt, parentRepairSession); } };
references.put(sstable, ref); return new Refs<>(references.build());
private void buildBlocking() { baseCfs.forceBlockingFlush(); try (ColumnFamilyStore.RefViewFragment viewFragment = baseCfs.selectAndReference(View.selectFunction(SSTableSet.CANONICAL)); Refs<SSTableReader> sstables = viewFragment.refs) { if (sstables.isEmpty()) { logger.info("No SSTable data for {}.{} to build index {} from, marking empty index as built", baseCfs.metadata.ksName, baseCfs.metadata.cfName, metadata.name); baseCfs.indexManager.markIndexBuilt(metadata.name); return; } logger.info("Submitting index build of {} for data in {}", metadata.name, getSSTableNames(sstables)); SecondaryIndexBuilder builder = new CollatedViewIndexBuilder(baseCfs, Collections.singleton(this), new ReducingKeyIterator(sstables)); Future<?> future = CompactionManager.instance.submitIndexBuild(builder); FBUtilities.waitOnFuture(future); indexCfs.forceBlockingFlush(); baseCfs.indexManager.markIndexBuilt(metadata.name); } logger.info("Index build of {} complete", metadata.name); }
@VisibleForTesting public static List<SSTableStreamingSections> getSSTableSectionsForRanges(Collection<Range<Token>> ranges, Collection<ColumnFamilyStore> stores, long overriddenRepairedAt, final boolean isIncremental) Refs<SSTableReader> refs = new Refs<>(); try for (Range<Token> range : ranges) keyRanges.add(Range.makeRowRange(range)); refs.addAll(cfStore.selectAndReference(view -> { Set<SSTableReader> sstables = Sets.newHashSet(); SSTableIntervalTree intervalTree = SSTableIntervalTree.build(view.select(SSTableSet.CANONICAL)); List<SSTableStreamingSections> sections = new ArrayList<>(refs.size()); for (SSTableReader sstable : refs) if (overriddenRepairedAt == ActiveRepairService.UNREPAIRED_SSTABLE) repairedAt = sstable.getSSTableMetadata().repairedAt; sections.add(new SSTableStreamingSections(refs.get(sstable), sstable.getPositionsForRanges(ranges), sstable.estimatedKeysForRanges(ranges), refs.release(); throw t;
/** * point of no return: commit all changes, but leave all readers marked as compacting */ public Throwable doCommit(Throwable accumulate) { assert staged.isEmpty() : "must be no actions introduced between prepareToCommit and a commit"; if (logger.isTraceEnabled()) logger.trace("Committing transaction over {} staged: {}, logged: {}", originals, staged, logged); // accumulate must be null if we have been used correctly, so fail immediately if it is not maybeFail(accumulate); // transaction log commit failure means we must abort; safe commit is not possible maybeFail(log.commit(null)); // this is now the point of no return; we cannot safely rollback, so we ignore exceptions until we're done // we restore state by obsoleting our obsolete files, releasing our references to them, and updating our size // and notification status for the obsolete and new files accumulate = markObsolete(obsoletions, accumulate); accumulate = tracker.updateSizeTracking(logged.obsolete, logged.update, accumulate); accumulate = release(selfRefs(logged.obsolete), accumulate); //accumulate = tracker.notifySSTablesChanged(originals, logged.update, log.type(), accumulate); return accumulate; }
active.put(sstable.descriptor.generation, sstable); Map<Descriptor, Set<Component>> snapshots = getDirectories().sstableLister(Directories.OnTxnErr.IGNORE).snapshots(tag).list(); Refs<SSTableReader> refs = new Refs<>(); try if (sstable == null || !refs.tryRef(sstable)) refs.tryRef(sstable); refs.release(); throw e;
public void close() { refs.release(); } }
try (Refs<SSTableReader> refs = Refs.ref(actuallyCompact); AbstractCompactionStrategy.ScannerList scanners = strategy.getScanners(actuallyCompact); CompactionIterator ci = new CompactionIterator(compactionType, scanners.scanners, controller, nowInSec, taskId)) Refs.release(Refs.selfRefs(newSStables));
UUID parentRepairSession) throws InterruptedException, IOException logger.info("[repair #{}] Starting anticompaction for {}.{} on {}/{} sstables", parentRepairSession, cfs.keyspace.getName(), cfs.getTableName(), validatedForRepair.size(), cfs.getLiveSSTables()); logger.trace("[repair #{}] Starting anticompaction for ranges {}", parentRepairSession, ranges); Set<SSTableReader> sstables = new HashSet<>(validatedForRepair); validatedForRepair.release(Sets.union(nonAnticompacting, mutatedRepairStatuses)); assert txn.originals().equals(sstables); if (!sstables.isEmpty()) validatedForRepair.release(); txn.close();