private ReftableWriter.Stats compactTopOfStack(OutputStream out, ReftableConfig cfg, byte[] newTable) throws IOException { List<Reftable> stack = refdb.stack().readers(); Reftable last = stack.get(stack.size() - 1); List<Reftable> tables = new ArrayList<>(2); tables.add(last); tables.add(new ReftableReader(BlockSource.from(newTable))); ReftableCompactor compactor = new ReftableCompactor(); compactor.setConfig(cfg); compactor.setIncludeDeletes(true); compactor.addAll(tables); compactor.compact(out); return compactor.getStats(); }
/** * Add all of the tables, in the specified order. * <p> * Unconditionally adds all tables, ignoring the * {@link #setCompactBytesLimit(long)}. * * @param readers * tables to compact. Tables should be ordered oldest first/most * recent last so that the more recent tables can shadow the * older results. Caller is responsible for closing the readers. * @throws java.io.IOException * update indexes of a reader cannot be accessed. */ public void addAll(List<? extends Reftable> readers) throws IOException { tables.addAll(readers); for (Reftable r : readers) { if (r instanceof ReftableReader) { adjustUpdateIndexes((ReftableReader) r); } } }
/** * Write a compaction to {@code out}. * * @param out * stream to write the compacted tables to. Caller is responsible * for closing {@code out}. * @throws java.io.IOException * if tables cannot be read, or cannot be written. */ public void compact(OutputStream out) throws IOException { MergedReftable mr = new MergedReftable(new ArrayList<>(tables)); mr.setIncludeDeletes(includeDeletes); writer.setMinUpdateIndex(Math.max(minUpdateIndex, 0)); writer.setMaxUpdateIndex(maxUpdateIndex); writer.begin(out); mergeRefs(mr); mergeLogs(mr); writer.finish(); stats = writer.getStats(); }
private void compactReftable(DfsPackDescription pack, ReftableCompactor compact) throws IOException { try (DfsOutputStream out = objdb.writeFile(pack, REFTABLE)) { compact.setConfig(configureReftable(reftableConfig, out)); compact.compact(out); pack.addFileExt(REFTABLE); pack.setReftableStats(compact.getStats()); } } }
private void writeReftable(DfsPackDescription pack) throws IOException { if (convertToReftable && !hasGcReftable()) { writeReftable(pack, refsBefore); return; } try (ReftableStack stack = ReftableStack.open(ctx, reftablesBefore)) { ReftableCompactor compact = new ReftableCompactor(); compact.addAll(stack.readers()); compact.setIncludeDeletes(includeDeletes); compactReftable(pack, compact); } }
private void writeReftable(DfsObjDatabase objdb, DfsPackDescription pack, ReftableCompactor compact) throws IOException { try (DfsOutputStream out = objdb.writeFile(pack, REFTABLE)) { compact.setConfig(configureReftable(reftableConfig, out)); compact.compact(out); pack.addFileExt(REFTABLE); pack.setReftableStats(compact.getStats()); } }
private void compactReftables(DfsReader ctx) throws IOException { DfsObjDatabase objdb = repo.getObjectDatabase(); Collections.sort(srcReftables, objdb.reftableComparator()); try (ReftableStack stack = ReftableStack.open(ctx, srcReftables)) { initOutDesc(objdb); ReftableCompactor compact = new ReftableCompactor(); compact.addAll(stack.readers()); compact.setIncludeDeletes(true); writeReftable(objdb, outDesc, compact); } }
private ReftableWriter.Stats compactTopOfStack(OutputStream out, ReftableConfig cfg, byte[] newTable) throws IOException { List<Reftable> stack = refdb.stack().readers(); Reftable last = stack.get(stack.size() - 1); List<Reftable> tables = new ArrayList<>(2); tables.add(last); tables.add(new ReftableReader(BlockSource.from(newTable))); ReftableCompactor compactor = new ReftableCompactor(); compactor.setConfig(cfg); compactor.setIncludeDeletes(true); compactor.addAll(tables); compactor.compact(out); return compactor.getStats(); }
private void compactReftable(DfsPackDescription pack, ReftableCompactor compact) throws IOException { try (DfsOutputStream out = objdb.writeFile(pack, REFTABLE)) { compact.setConfig(configureReftable(reftableConfig, out)); compact.compact(out); pack.addFileExt(REFTABLE); pack.setReftableStats(compact.getStats()); } } }
private void writeReftable(DfsPackDescription pack) throws IOException { if (convertToReftable && !hasGcReftable()) { writeReftable(pack, refsBefore); return; } try (ReftableStack stack = ReftableStack.open(ctx, reftablesBefore)) { ReftableCompactor compact = new ReftableCompactor(); compact.addAll(stack.readers()); compact.setIncludeDeletes(includeDeletes); compactReftable(pack, compact); } }
/** * Write a compaction to {@code out}. * * @param out * stream to write the compacted tables to. Caller is responsible * for closing {@code out}. * @throws java.io.IOException * if tables cannot be read, or cannot be written. */ public void compact(OutputStream out) throws IOException { MergedReftable mr = new MergedReftable(new ArrayList<>(tables)); mr.setIncludeDeletes(includeDeletes); writer.setMinUpdateIndex(Math.max(minUpdateIndex, 0)); writer.setMaxUpdateIndex(maxUpdateIndex); writer.begin(out); mergeRefs(mr); mergeLogs(mr); writer.finish(); stats = writer.getStats(); }
/** * Try to add this reader at the bottom of the stack. * <p> * A reader may be rejected by returning {@code false} if the compactor is * already rewriting its {@link #setCompactBytesLimit(long)}. When this * happens the caller should stop trying to add tables, and execute the * compaction. * * @param reader * the reader to insert at the bottom of the stack. Caller is * responsible for closing the reader. * @return {@code true} if the compactor accepted this table; {@code false} * if the compactor has reached its limit. * @throws java.io.IOException * if size of {@code reader}, or its update indexes cannot be read. */ public boolean tryAddFirst(ReftableReader reader) throws IOException { long sz = reader.size(); if (compactBytesLimit > 0 && bytesToCompact + sz > compactBytesLimit) { return false; } bytesToCompact += sz; adjustUpdateIndexes(reader); tables.addFirst(reader); return true; }
private void writeReftable(DfsObjDatabase objdb, DfsPackDescription pack, ReftableCompactor compact) throws IOException { try (DfsOutputStream out = objdb.writeFile(pack, REFTABLE)) { compact.setConfig(configureReftable(reftableConfig, out)); compact.compact(out); pack.addFileExt(REFTABLE); pack.setReftableStats(compact.getStats()); } }
private void compactReftables(DfsReader ctx) throws IOException { DfsObjDatabase objdb = repo.getObjectDatabase(); Collections.sort(srcReftables, objdb.reftableComparator()); try (ReftableStack stack = ReftableStack.open(ctx, srcReftables)) { initOutDesc(objdb); ReftableCompactor compact = new ReftableCompactor(); compact.addAll(stack.readers()); compact.setIncludeDeletes(true); writeReftable(objdb, outDesc, compact); } }
/** * Add all of the tables, in the specified order. * <p> * Unconditionally adds all tables, ignoring the * {@link #setCompactBytesLimit(long)}. * * @param readers * tables to compact. Tables should be ordered oldest first/most * recent last so that the more recent tables can shadow the * older results. Caller is responsible for closing the readers. * @throws java.io.IOException * update indexes of a reader cannot be accessed. */ public void addAll(List<? extends Reftable> readers) throws IOException { tables.addAll(readers); for (Reftable r : readers) { if (r instanceof ReftableReader) { adjustUpdateIndexes((ReftableReader) r); } } }
/** * Try to add this reader at the bottom of the stack. * <p> * A reader may be rejected by returning {@code false} if the compactor is * already rewriting its {@link #setCompactBytesLimit(long)}. When this * happens the caller should stop trying to add tables, and execute the * compaction. * * @param reader * the reader to insert at the bottom of the stack. Caller is * responsible for closing the reader. * @return {@code true} if the compactor accepted this table; {@code false} * if the compactor has reached its limit. * @throws java.io.IOException * if size of {@code reader}, or its update indexes cannot be read. */ public boolean tryAddFirst(ReftableReader reader) throws IOException { long sz = reader.size(); if (compactBytesLimit > 0 && bytesToCompact + sz > compactBytesLimit) { return false; } bytesToCompact += sz; adjustUpdateIndexes(reader); tables.addFirst(reader); return true; }