private static void writeIndex(DfsObjDatabase objdb, DfsPackDescription pack, PackWriter pw) throws IOException { try (DfsOutputStream out = objdb.writeFile(pack, INDEX)) { CountingOutputStream cnt = new CountingOutputStream(out); pw.writeIndex(cnt); pack.addFileExt(INDEX); pack.setFileSize(INDEX, cnt.getCount()); pack.setBlockSize(INDEX, out.blockSize()); pack.setIndexVersion(pw.getIndexVersion()); } }
/** {@inheritDoc} */ @Override public long getDeltaCount() throws IOException { return getPackDescription().getDeltaCount(); }
/** {@inheritDoc} */ @Override public String toString() { return getFileName(PackExt.PACK); } }
private boolean packIsExpiredGarbage(DfsPackDescription d, long now) { // Consider the garbage pack as expired when it's older than // garbagePackTtl. This check gives concurrent inserter threads // sufficient time to identify an object is not in the graph and should // have a new copy written, rather than relying on something from an // UNREACHABLE_GARBAGE pack. return d.getPackSource() == UNREACHABLE_GARBAGE && garbageTtlMillis > 0 && now - d.getLastModified() >= garbageTtlMillis; }
static Comparator<DfsPackDescription> reuseComparator() { return (a, b) -> { PackSource as = a.getPackSource(); PackSource bs = b.getPackSource(); if (as == bs && DfsPackDescription.isGC(as)) { // Push smaller GC files last; these likely have higher quality // delta compression and the contained representation should be // favored over other files. return Long.signum(b.getFileSize(PACK) - a.getFileSize(PACK)); } // DfsPackDescription.compareTo already did a reasonable sort. // Rely on Arrays.sort being stable, leaving equal elements. return 0; }; }
static Comparator<DfsPackDescription> reftableComparator() { return (a, b) -> { // GC, COMPACT reftables first by reversing default order. int c = PackSource.DEFAULT_COMPARATOR.reversed() .compare(a.getPackSource(), b.getPackSource()); if (c != 0) { return c; } // Lower maxUpdateIndex first. c = Long.signum(a.getMaxUpdateIndex() - b.getMaxUpdateIndex()); if (c != 0) { return c; } // Older reftable first. return Long.signum(a.getLastModified() - b.getLastModified()); }; }
pack.addFileExt(PACK); pack.setBlockSize(PACK, out.blockSize()); CountingOutputStream cnt = new CountingOutputStream(out); pw.writeIndex(cnt); pack.addFileExt(INDEX); pack.setFileSize(INDEX, cnt.getCount()); pack.setBlockSize(INDEX, out.blockSize()); pack.setIndexVersion(pw.getIndexVersion()); CountingOutputStream cnt = new CountingOutputStream(out); pw.writeBitmapIndex(cnt); pack.addFileExt(BITMAP_INDEX); pack.setFileSize(BITMAP_INDEX, cnt.getCount()); pack.setBlockSize(BITMAP_INDEX, out.blockSize()); pack.setPackStats(stats); pack.setLastModified(startTimeMillis); newPackDesc.add(pack); newPackStats.add(stats);
DfsPackDescription::getPackSource, packSourceComparator) .thenComparing((a, b) -> { PackSource as = a.getPackSource(); PackSource bs = b.getPackSource(); if (as == bs && isGC(as)) { int cmp = Long.signum(a.getFileSize(PACK) - b.getFileSize(PACK)); if (cmp != 0) { return cmp; int cmp = Long.signum(b.getLastModified() - a.getLastModified()); if (cmp != 0) { return cmp; return Long.signum(a.getObjectCount() - b.getObjectCount()); });
DfsPackDescription setPackStats(PackStatistics stats) { this.packStats = stats; setFileSize(PACK, stats.getTotalBytes()); setObjectCount(stats.getTotalObjects()); setDeltaCount(stats.getTotalDeltas()); return this; }
private long estimateGcPackSize(PackSource first, PackSource... rest) { EnumSet<PackSource> sourceSet = EnumSet.of(first, rest); // Every pack file contains 12 bytes of header and 20 bytes of trailer. // Include the final pack file header and trailer size here and ignore // the same from individual pack files. long size = 32; for (DfsPackDescription pack : getSourcePacks()) { if (sourceSet.contains(pack.getPackSource())) { size += pack.getFileSize(PACK) - 32; } } return size; }
private void writeReftable(DfsObjDatabase objdb, DfsPackDescription pack, ReftableCompactor compact) throws IOException { try (DfsOutputStream out = objdb.writeFile(pack, REFTABLE)) { compact.setConfig(configureReftable(reftableConfig, out)); compact.compact(out); pack.addFileExt(REFTABLE); pack.setReftableStats(compact.getStats()); } }
if (d.getPackSource() != UNREACHABLE_GARBAGE || d.getFileSize(PackExt.PACK) >= coalesceGarbageLimit) { return false; long lastModified = d.getLastModified(); long dayStartLastModified = dayStartInMillis(lastModified); long dayStartToday = dayStartInMillis(now);
PackBitmapIndex getBitmapIndex(DfsReader ctx) throws IOException { if (invalid || isGarbage() || !desc.hasFileExt(BITMAP_INDEX)) return null; DfsStreamKey bitmapKey = desc.getStreamKey(BITMAP_INDEX); idxref = cache.getRef(bitmapKey); if (idxref != null) { throw new IOException(MessageFormat.format( DfsText.get().shortReadOfIndex, desc.getFileName(BITMAP_INDEX)), e); } catch (IOException e) { throw new IOException(MessageFormat.format( DfsText.get().cannotReadIndex, desc.getFileName(BITMAP_INDEX)), e);
/** * Construct a reader for an existing, packfile. * * @param cache * cache that owns the pack data. * @param desc * description of the pack within the DFS. */ DfsPackFile(DfsBlockCache cache, DfsPackDescription desc) { super(cache, desc, PACK); int bs = desc.getBlockSize(PACK); if (bs > 0) { setBlockSize(bs); } long sz = desc.getFileSize(PACK); length = sz > 0 ? sz : -1; }
/** * Sort packs according to the optimal lookup ordering. * <p> * This method tries to position packs in the order readers should examine * them when looking for objects by SHA-1. The default tries to sort packs * with more recent modification dates before older packs, and packs with * fewer objects before packs with more objects. * * @param b * the other pack. */ public int compareTo(DfsPackDescription b) { // Cluster by PackSource, pushing UNREACHABLE_GARBAGE to the end. PackSource as = getPackSource(); PackSource bs = b.getPackSource(); if (as != null && bs != null) { int cmp = as.category - bs.category; if (cmp != 0) return cmp; } // Newer packs should sort first. int cmp = Long.signum(b.getLastModified() - getLastModified()); if (cmp != 0) return cmp; // Break ties on smaller index. Readers may get lucky and find // the object they care about in the smaller index. This also pushes // big historical packs to the end of the list, due to more objects. return Long.signum(getObjectCount() - b.getObjectCount()); }
currBuf = null; readBlock = null; packDsc.addFileExt(PACK); packDsc.setFileSize(PACK, packEnd); packDsc.setBlockSize(PACK, blockSize);
private static void writeIndex(DfsObjDatabase objdb, DfsPackDescription pack, PackWriter pw) throws IOException { DfsOutputStream out = objdb.writeFile(pack, INDEX); try { CountingOutputStream cnt = new CountingOutputStream(out); pw.writeIndex(cnt); pack.addFileExt(INDEX); pack.setFileSize(INDEX, cnt.getCount()); pack.setIndexVersion(pw.getIndexVersion()); } finally { out.close(); } }
final boolean isGarbage() { return desc.getPackSource() == UNREACHABLE_GARBAGE; }
private int objectsBefore() { int cnt = 0; for (DfsPackFile p : packsBefore) cnt += p.getPackDescription().getObjectCount(); return cnt; }
/** * Get cache key for use by the block cache. * * @param ext * the file extension. * @return cache key for use by the block cache. */ public DfsStreamKey getStreamKey(PackExt ext) { return DfsStreamKey.of(getRepositoryDescription(), getFileName(ext), ext); }