@Override public boolean equals(Object o) { return o instanceof ForReverseIndex && idxKey.equals(((ForReverseIndex) o).idxKey); } }
/** * Get cache key for use by the block cache. * * @param ext * the file extension. * @return cache key for use by the block cache. */ public DfsStreamKey getStreamKey(PackExt ext) { return DfsStreamKey.of(getRepositoryDescription(), getFileName(ext), ext); }
boolean contains(DfsStreamKey want, long pos) { return stream.equals(want) && start <= pos && pos < end; }
@SuppressWarnings("unchecked") private DfsBlockCache(DfsBlockCacheConfig cfg) { tableSize = tableSize(cfg); if (tableSize < 1) throw new IllegalArgumentException(JGitText.get().tSizeMustBeGreaterOrEqual1); table = new AtomicReferenceArray<>(tableSize); loadLocks = new ReentrantLock[cfg.getConcurrencyLevel()]; for (int i = 0; i < loadLocks.length; i++) loadLocks[i] = new ReentrantLock(true /* fair */); maxBytes = cfg.getBlockLimit(); maxStreamThroughCache = (long) (maxBytes * cfg.getStreamRatio()); blockSize = cfg.getBlockSize(); blockSizeShift = Integer.numberOfTrailingZeros(blockSize); clockLock = new ReentrantLock(true /* fair */); String none = ""; //$NON-NLS-1$ clockHand = new Ref<>( DfsStreamKey.of(new DfsRepositoryDescription(none), none, null), -1, 0, null); clockHand.next = clockHand; statHit = new AtomicReference<>(newCounters()); statMiss = new AtomicReference<>(newCounters()); statEvict = new AtomicReference<>(newCounters()); liveBytes = new AtomicReference<>(newCounters()); }
@SuppressWarnings("unchecked") private <T> Ref<T> scanRef(HashEntry n, DfsStreamKey key, long position) { for (; n != null; n = n.next) { Ref<T> r = n.ref; if (r.position == position && r.key.equals(key)) return r.get() != null ? r : null; } return null; }
/** * Get cache key for use by the block cache. * * @param ext * the file extension. * @return cache key for use by the block cache. */ public DfsStreamKey getStreamKey(PackExt ext) { return DfsStreamKey.of(getRepositoryDescription(), getFileName(ext), ext); }
Entry get(DfsStreamKey key, long position) { Entry e = table[hash(position)]; for (; e != null; e = e.tableNext) { if (e.offset == position && key.equals(e.pack)) { moveToHead(e); return e; } } return null; }
@SuppressWarnings("unchecked") private DfsBlockCache(DfsBlockCacheConfig cfg) { tableSize = tableSize(cfg); if (tableSize < 1) throw new IllegalArgumentException(JGitText.get().tSizeMustBeGreaterOrEqual1); table = new AtomicReferenceArray<>(tableSize); loadLocks = new ReentrantLock[cfg.getConcurrencyLevel()]; for (int i = 0; i < loadLocks.length; i++) loadLocks[i] = new ReentrantLock(true /* fair */); maxBytes = cfg.getBlockLimit(); maxStreamThroughCache = (long) (maxBytes * cfg.getStreamRatio()); blockSize = cfg.getBlockSize(); blockSizeShift = Integer.numberOfTrailingZeros(blockSize); clockLock = new ReentrantLock(true /* fair */); String none = ""; //$NON-NLS-1$ clockHand = new Ref<>( DfsStreamKey.of(new DfsRepositoryDescription(none), none, null), -1, 0, null); clockHand.next = clockHand; statHit = new AtomicReference<>(newCounters()); statMiss = new AtomicReference<>(newCounters()); statEvict = new AtomicReference<>(newCounters()); liveBytes = new AtomicReference<>(newCounters()); }
void addPack(DfsPackFile newPack) throws IOException { PackList o, n; do { o = packList.get(); if (o == NO_PACKS) { // The repository may not have needed any existing objects to // complete the current task of creating a pack (e.g. push of a // pack with no external deltas). Because we don't scan for // newly added packs on missed object lookups, scan now to // make sure all older packs are available in the packList. o = scanPacks(o); // Its possible the scan identified the pack we were asked to // add, as the pack was already committed via commitPack(). // If this is the case return without changing the list. for (DfsPackFile p : o.packs) { if (p.key.equals(newPack.key)) { return; } } } DfsPackFile[] packs = new DfsPackFile[1 + o.packs.length]; packs[0] = newPack; System.arraycopy(o.packs, 0, packs, 1, o.packs.length); n = new PackListImpl(packs, o.reftables); } while (!packList.compareAndSet(o, n)); }
@Override public boolean equals(Object o) { return o instanceof ForReverseIndex && idxKey.equals(((ForReverseIndex) o).idxKey); } }
boolean contains(DfsStreamKey want, long pos) { return stream.equals(want) && start <= pos && pos < end; }
@SuppressWarnings("unchecked") private <T> Ref<T> scanRef(HashEntry n, DfsStreamKey key, long position) { for (; n != null; n = n.next) { Ref<T> r = n.ref; if (r.position == position && r.key.equals(key)) return r.get() != null ? r : null; } return null; }
Entry get(DfsStreamKey key, long position) { Entry e = table[hash(position)]; for (; e != null; e = e.tableNext) { if (e.offset == position && key.equals(e.pack)) { moveToHead(e); return e; } } return null; }
void addPack(DfsPackFile newPack) throws IOException { PackList o, n; do { o = packList.get(); if (o == NO_PACKS) { // The repository may not have needed any existing objects to // complete the current task of creating a pack (e.g. push of a // pack with no external deltas). Because we don't scan for // newly added packs on missed object lookups, scan now to // make sure all older packs are available in the packList. o = scanPacks(o); // Its possible the scan identified the pack we were asked to // add, as the pack was already committed via commitPack(). // If this is the case return without changing the list. for (DfsPackFile p : o.packs) { if (p.key.equals(newPack.key)) { return; } } } DfsPackFile[] packs = new DfsPackFile[1 + o.packs.length]; packs[0] = newPack; System.arraycopy(o.packs, 0, packs, 1, o.packs.length); n = new PackListImpl(packs, o.reftables); } while (!packList.compareAndSet(o, n)); }