@Override public void execute(LifecycleTransaction txn) { AbstractCompactionTask task = cfs.getCompactionStrategyManager().getCompactionTask(txn, NO_GC, Long.MAX_VALUE); task.setUserDefined(true); task.setCompactionType(OperationType.UPGRADE_SSTABLES); task.execute(metrics); } }, jobs, OperationType.UPGRADE_SSTABLES);
protected void runMayThrow() { task.execute(metrics); } };
/** * executes the task and unmarks sstables compacting */ public int execute(CompactionExecutorStatsCollector collector) { try { return executeInternal(collector); } finally { cfs.getDataTracker().unmarkCompacting(sstables); } }
protected void runMayThrow() { task.execute(metrics); } };
/** * executes the task and unmarks sstables compacting */ public int execute(CompactionExecutorStatsCollector collector) { try { return executeInternal(collector); } catch(FSDiskFullWriteError e) { RuntimeException cause = new RuntimeException("Converted from FSDiskFullWriteError: " + e.getMessage()); cause.setStackTrace(e.getStackTrace()); throw new RuntimeException("Throwing new Runtime to bypass exception handler when disk is full", cause); } finally { transaction.close(); } } public abstract CompactionAwareWriter getCompactionAwareWriter(ColumnFamilyStore cfs, Directories directories, LifecycleTransaction txn, Set<SSTableReader> nonExpiredSSTables);
@Override public void execute(LifecycleTransaction txn) { AbstractCompactionTask task = cfs.getCompactionStrategyManager().getCompactionTask(txn, NO_GC, Long.MAX_VALUE); task.setUserDefined(true); task.setCompactionType(OperationType.UPGRADE_SSTABLES); task.execute(metrics); } }, jobs, OperationType.UPGRADE_SSTABLES);
protected void runMayThrow() throws IOException { task.execute(metrics); } };
/** * executes the task and unmarks sstables compacting */ public int execute(CompactionExecutorStatsCollector collector) { try { return executeInternal(collector); } catch(FSDiskFullWriteError e) { RuntimeException cause = new RuntimeException("Converted from FSDiskFullWriteError: " + e.getMessage()); cause.setStackTrace(e.getStackTrace()); throw new RuntimeException("Throwing new Runtime to bypass exception handler when disk is full", cause); } finally { transaction.close(); } } public abstract CompactionAwareWriter getCompactionAwareWriter(ColumnFamilyStore cfs, Directories directories, LifecycleTransaction txn, Set<SSTableReader> nonExpiredSSTables);
@Override public void execute(SSTableReader input) throws IOException { AbstractCompactionTask task = cfs.getCompactionStrategy().getCompactionTask(Collections.singleton(input), NO_GC, Long.MAX_VALUE); task.setUserDefined(true); task.setCompactionType(OperationType.UPGRADE_SSTABLES); task.execute(metrics); } }, jobs);
protected void runMayThrow() { for (AbstractCompactionTask task : tasks) if (task != null) task.execute(metrics); } };
/** * executes the task and unmarks sstables compacting */ public int execute(CompactionExecutorStatsCollector collector) { try { return executeInternal(collector); } catch(FSDiskFullWriteError e) { RuntimeException cause = new RuntimeException("Converted from FSDiskFullWriteError: " + e.getMessage()); cause.setStackTrace(e.getStackTrace()); throw new RuntimeException("Throwing new Runtime to bypass exception handler when disk is full", cause); } finally { transaction.close(); } } public abstract CompactionAwareWriter getCompactionAwareWriter(ColumnFamilyStore cfs, Directories directories, LifecycleTransaction txn, Set<SSTableReader> nonExpiredSSTables);
@Override public void execute(LifecycleTransaction txn) { AbstractCompactionTask task = cfs.getCompactionStrategyManager().getCompactionTask(txn, NO_GC, Long.MAX_VALUE); task.setUserDefined(true); task.setCompactionType(OperationType.UPGRADE_SSTABLES); task.execute(metrics); } }, jobs, OperationType.UPGRADE_SSTABLES);
protected void runMayThrow() { for (AbstractCompactionTask task : tasks) if (task != null) task.execute(metrics); } };
@Override public void execute(LifecycleTransaction txn) { logger.debug("Relocating {}", txn.originals()); AbstractCompactionTask task = cfs.getCompactionStrategyManager().getCompactionTask(txn, NO_GC, Long.MAX_VALUE); task.setUserDefined(true); task.setCompactionType(OperationType.RELOCATE); task.execute(metrics); } }, jobs, OperationType.RELOCATE);
protected void runMayThrow() { task.execute(metrics); } };
@Override public void execute(LifecycleTransaction txn) { logger.debug("Relocating {}", txn.originals()); AbstractCompactionTask task = cfs.getCompactionStrategyManager().getCompactionTask(txn, NO_GC, Long.MAX_VALUE); task.setUserDefined(true); task.setCompactionType(OperationType.RELOCATE); task.execute(metrics); } }, jobs, OperationType.RELOCATE);
protected void runMayThrow() { for (AbstractCompactionTask task : tasks) if (task != null) task.execute(metrics); } };
@Override public void execute(LifecycleTransaction txn) { logger.debug("Relocating {}", txn.originals()); AbstractCompactionTask task = cfs.getCompactionStrategyManager().getCompactionTask(txn, NO_GC, Long.MAX_VALUE); task.setUserDefined(true); task.setCompactionType(OperationType.RELOCATE); task.execute(metrics); } }, jobs, OperationType.RELOCATE);
protected void runMayThrow() throws IOException { // look up the sstables now that we're on the compaction executor, so we don't try to re-compact // something that was already being compacted earlier. Collection<SSTableReader> sstables = new ArrayList<SSTableReader>(dataFiles.size()); for (Descriptor desc : dataFiles) { // inefficient but not in a performance sensitive path SSTableReader sstable = lookupSSTable(cfs, desc); if (sstable == null) { logger.info("Will not compact {}: it is not an active sstable", desc); } else { sstables.add(sstable); } } if (sstables.isEmpty()) { logger.info("No files to compact for user defined compaction"); } else { AbstractCompactionTask task = cfs.getCompactionStrategy().getUserDefinedTask(sstables, gcBefore); if (task != null) task.execute(metrics); } } };
task.execute(metrics);