public static void cleanupAndLeaveDirs() throws IOException { mkdirs(); cleanup(); mkdirs(); CommitLog.instance.resetUnsafe(); // cleanup screws w/ CommitLog, this brings it back to safe state }
private void calculateSize() { try { // The Arrays.stream approach is considerably slower on Windows than linux sizeInProgress = 0; Files.walkFileTree(path.toPath(), this); size = sizeInProgress; } catch (IOException ie) { CommitLog.instance.handleCommitError("Failed CDC Size Calculation", ie); } }
/** * FOR TESTING PURPOSES * @return the number of files recovered */ public int resetUnsafe(boolean deleteSegments) throws IOException { stopUnsafe(deleteSegments); resetConfiguration(); return restartUnsafe(); }
private static void cleanupAndLeaveDirs() throws IOException { CommitLog.instance.stopUnsafe(true); DatabaseDescriptor.createAllDirectories(); cleanup(); DatabaseDescriptor.createAllDirectories(); CommitLog.instance.restartUnsafe(); }
commitLog.sync(true); lastSyncedAt = pollStarted; syncComplete.signalAll(); commitLog.sync(false); if (!CommitLog.handleCommitError("Failed to persist commits to disk", t)) return false;
CommitLog.instance.forceRecycleAllSegments(); CommitLog.instance.shutdownBlocking();
private static void dropColumnFamily(String ksName, String cfName) { KSMetaData ksm = Schema.instance.getKSMetaData(ksName); assert ksm != null; ColumnFamilyStore cfs = Keyspace.open(ksName).getColumnFamilyStore(cfName); assert cfs != null; // reinitialize the keyspace. CFMetaData cfm = ksm.cfMetaData().get(cfName); Schema.instance.purge(cfm); Schema.instance.setKeyspaceDefinition(makeNewKeyspaceDefinition(ksm, cfm)); CompactionManager.instance.interruptCompactionFor(Arrays.asList(cfm), true); if (!StorageService.instance.isClientMode()) { if (DatabaseDescriptor.isAutoSnapshot()) cfs.snapshot(Keyspace.getTimestampedSnapshotName(cfs.name)); Keyspace.open(ksm.name).dropCf(cfm.cfId); MigrationManager.instance.notifyDropColumnFamily(cfm); CommitLog.instance.forceRecycleAllSegments(Collections.singleton(cfm.cfId)); } }
public CommitLogPosition call() { try { // we wait on the latch for the commitLogUpperBound to be set, and so that waiters // on this task can rely on all prior flushes being complete latch.await(); } catch (InterruptedException e) { throw new IllegalStateException(); } CommitLogPosition commitLogUpperBound = CommitLogPosition.NONE; // If a flush errored out but the error was ignored, make sure we don't discard the commit log. if (flushFailure == null && !memtables.isEmpty()) { Memtable memtable = memtables.get(0); commitLogUpperBound = memtable.getCommitLogUpperBound(); CommitLog.instance.discardCompletedSegments(metadata.cfId, memtable.getCommitLogLowerBound(), commitLogUpperBound); } metric.pendingFlushes.dec(); if (flushFailure != null) throw Throwables.propagate(flushFailure); return commitLogUpperBound; } }
commitLogPosition = CommitLog.instance.add(mutation);
/** * FOR TESTING PURPOSES. See CommitLogAllocator. */ public void resetUnsafe() { sync(true); allocator.resetUnsafe(); }
CommitLog.instance.shutdownBlocking();
commitLog.sync(shutdown); lastSyncedAt = syncStarted; syncComplete.signalAll(); if (!CommitLog.handleCommitError("Failed to persist commits to disk", t)) break;
CommitLog.instance.forceRecycleAllSegments(); CommitLog.instance.shutdownBlocking();
public void dropTable(String ksName, String tableName) { KeyspaceMetadata oldKsm = getKSMetaData(ksName); assert oldKsm != null; ColumnFamilyStore cfs = Keyspace.open(ksName).getColumnFamilyStore(tableName); assert cfs != null; // make sure all the indexes are dropped, or else. cfs.indexManager.markAllIndexesRemoved(); // reinitialize the keyspace. CFMetaData cfm = oldKsm.tables.get(tableName).get(); KeyspaceMetadata newKsm = oldKsm.withSwapped(oldKsm.tables.without(tableName)); unload(cfm); setKeyspaceMetadata(newKsm); CompactionManager.instance.interruptCompactionFor(Collections.singleton(cfm), true); if (DatabaseDescriptor.isAutoSnapshot()) cfs.snapshot(Keyspace.getTimestampedSnapshotNameWithPrefix(cfs.name, ColumnFamilyStore.SNAPSHOT_DROP_PREFIX)); Keyspace.open(ksName).dropCf(cfm.cfId); MigrationManager.instance.notifyDropColumnFamily(cfm); CommitLog.instance.forceRecycleAllSegments(Collections.singleton(cfm.cfId)); }
public CommitLogPosition call() { try { // we wait on the latch for the commitLogUpperBound to be set, and so that waiters // on this task can rely on all prior flushes being complete latch.await(); } catch (InterruptedException e) { throw new IllegalStateException(); } CommitLogPosition commitLogUpperBound = CommitLogPosition.NONE; // If a flush errored out but the error was ignored, make sure we don't discard the commit log. if (flushFailure == null && !memtables.isEmpty()) { Memtable memtable = memtables.get(0); commitLogUpperBound = memtable.getCommitLogUpperBound(); CommitLog.instance.discardCompletedSegments(metadata.cfId, memtable.getCommitLogLowerBound(), commitLogUpperBound); } metric.pendingFlushes.dec(); if (flushFailure != null) throw Throwables.propagate(flushFailure); return commitLogUpperBound; } }
commitLog.requestExtraSync();
commitLogPosition = CommitLog.instance.add(mutation);
/** * FOR TESTING PURPOSES * @return the number of files recovered */ public int resetUnsafe(boolean deleteSegments) throws IOException { stopUnsafe(deleteSegments); resetConfiguration(); return restartUnsafe(); }
private static void cleanupAndLeaveDirs() throws IOException { mkdirs(); cleanup(); mkdirs(); CommitLog commitLog = CommitLog.instance; commitLog.resetUnsafe(true); // cleanup screws w/ CommitLog, this brings it back to safe state }