private boolean canCompactTopOfStack(ReftableConfig cfg) throws IOException { ReftableStack stack = refdb.stack(); List<Reftable> readers = stack.readers(); if (readers.isEmpty()) { return false; } int lastIdx = readers.size() - 1; DfsReftable last = stack.files().get(lastIdx); DfsPackDescription desc = last.getPackDescription(); if (desc.getPackSource() != PackSource.INSERT || !packOnlyContainsReftable(desc)) { return false; } Reftable table = readers.get(lastIdx); int bs = cfg.getRefBlockSize(); return table instanceof ReftableReader && ((ReftableReader) table).size() <= 3 * bs; }
private void applyUpdates(RevWalk rw, List<ReceiveCommand> pending) throws IOException { List<Ref> newRefs = toNewRefs(rw, pending); long updateIndex = nextUpdateIndex(); Set<DfsPackDescription> prune = Collections.emptySet(); DfsPackDescription pack = odb.newPack(PackSource.INSERT); try (DfsOutputStream out = odb.writeFile(pack, REFTABLE)) { ReftableConfig cfg = DfsPackCompactor .configureReftable(reftableConfig, out); ReftableWriter.Stats stats; if (refdb.compactDuringCommit() && newRefs.size() * AVG_BYTES <= cfg.getRefBlockSize() && canCompactTopOfStack(cfg)) { ByteArrayOutputStream tmp = new ByteArrayOutputStream(); write(tmp, cfg, updateIndex, newRefs, pending); stats = compactTopOfStack(out, cfg, tmp.toByteArray()); prune = toPruneTopOfStack(); } else { stats = write(out, cfg, updateIndex, newRefs, pending); } pack.addFileExt(REFTABLE); pack.setReftableStats(stats); } odb.commitPack(Collections.singleton(pack), prune); odb.addReftable(pack, prune); refdb.clearCache(); }
refBlockSize = config.getRefBlockSize(); logBlockSize = config.getLogBlockSize(); restartInterval = config.getRestartInterval();
private boolean canCompactTopOfStack(ReftableConfig cfg) throws IOException { ReftableStack stack = refdb.stack(); List<Reftable> readers = stack.readers(); if (readers.isEmpty()) { return false; } int lastIdx = readers.size() - 1; DfsReftable last = stack.files().get(lastIdx); DfsPackDescription desc = last.getPackDescription(); if (desc.getPackSource() != PackSource.INSERT || !packOnlyContainsReftable(desc)) { return false; } Reftable table = readers.get(lastIdx); int bs = cfg.getRefBlockSize(); return table instanceof ReftableReader && ((ReftableReader) table).size() <= 3 * bs; }
private void applyUpdates(RevWalk rw, List<ReceiveCommand> pending) throws IOException { List<Ref> newRefs = toNewRefs(rw, pending); long updateIndex = nextUpdateIndex(); Set<DfsPackDescription> prune = Collections.emptySet(); DfsPackDescription pack = odb.newPack(PackSource.INSERT); try (DfsOutputStream out = odb.writeFile(pack, REFTABLE)) { ReftableConfig cfg = DfsPackCompactor .configureReftable(reftableConfig, out); ReftableWriter.Stats stats; if (refdb.compactDuringCommit() && newRefs.size() * AVG_BYTES <= cfg.getRefBlockSize() && canCompactTopOfStack(cfg)) { ByteArrayOutputStream tmp = new ByteArrayOutputStream(); write(tmp, cfg, updateIndex, newRefs, pending); stats = compactTopOfStack(out, cfg, tmp.toByteArray()); prune = toPruneTopOfStack(); } else { stats = write(out, cfg, updateIndex, newRefs, pending); } pack.addFileExt(REFTABLE); pack.setReftableStats(stats); } odb.commitPack(Collections.singleton(pack), prune); odb.addReftable(pack, prune); refdb.clearCache(); }
refBlockSize = config.getRefBlockSize(); logBlockSize = config.getLogBlockSize(); restartInterval = config.getRestartInterval();