@Override public ObjectReader newReader() { return new DhtReader(this); }
@Override public void walkAdviceBeginCommits(RevWalk rw, Collection<RevCommit> roots) throws IOException { endPrefetch(); // Don't assign the prefetcher right away. Delay until its // configured as push might invoke our own methods that may // try to call back into the active prefetcher. // Prefetcher p = prefetch(OBJ_COMMIT, readerOptions.getWalkCommitsPrefetchRatio()); p.push(this, roots); prefetcher = p; }
QueueObjectLookup(DhtReader reader, boolean reportMissing) { this.repo = reader.getRepositoryKey(); this.db = reader.getDatabase(); this.reader = reader; this.options = reader.getOptions(); this.reportMissing = reportMissing; this.tmp = new ArrayList<ObjectInfo>(4); this.context = Context.FAST_MISSING_OK; this.toRetry = new ArrayList<T>(); this.concurrentBatches = options.getObjectIndexConcurrentBatches(); }
RecentChunks(DhtReader reader) { this.reader = reader; this.stats = reader.getStatistics(); this.byKey = new HashMap<ChunkKey, Node>(); this.maxBytes = reader.getOptions().getChunkLimit(); }
Prefetcher(DhtReader reader, int objectType, int prefetchLimitInBytes) { this.db = reader.getDatabase(); this.stats = reader.getStatistics(); this.objectType = objectType; this.ready = new HashMap<ChunkKey, PackChunk>(); this.status = new HashMap<ChunkKey, Status>(); this.queue = new LinkedList<ChunkKey>(); this.followEdgeHints = reader.getOptions().isPrefetchFollowEdgeHints(); this.averageChunkSize = reader.getInserterOptions().getChunkSize(); this.highWaterMark = prefetchLimitInBytes; int lwm = (highWaterMark / averageChunkSize) - 4; if (lwm <= 0) lwm = (highWaterMark / averageChunkSize) / 2; lowWaterMark = lwm * averageChunkSize; }
@Override public void release() { reader.getRecentChunks().setMaxBytes(reader.getOptions().getChunkLimit()); prefetcher = null; currChunk = null; }
toFind.add(ObjectIndexKey.create(ctx.getRepositoryKey(), start)); toFind.add(ObjectIndexKey.create(ctx.getRepositoryKey(), end)); db.objectIndex().get(Context.READ_REPAIR, toFind, sync); trees = sync.get(ctx.getOptions().getTimeout()); } catch (InterruptedException e) { throw new DhtTimeoutException(e); throw DhtReader.missing(start, OBJ_TREE); throw DhtReader.missing(end, OBJ_TREE);
PackChunk chunk = load(key); if (chunk != null && chunk.hasIndex()) { int pos = chunk.findOffset(repo, objId); for (ObjectInfo link : find(objId)) { PackChunk chunk; chunk = prefetcher.get(link.getChunkKey()); if (chunk == null) { chunk = load(link.getChunkKey()); if (chunk == null) continue; chunk = load(link.getChunkKey()); if (chunk == null) continue; throw missing(objId, typeHint);
@Override public ObjectLoader open(AnyObjectId objId, int typeHint) throws MissingObjectException, IncorrectObjectTypeException, IOException { ObjectLoader ldr = recentChunks.open(repo, objId, typeHint); if (ldr != null) return ldr; ChunkAndOffset p = getChunk(objId, typeHint, false); ldr = PackChunk.read(p.chunk, p.offset, this, typeHint); recentChunk(p.chunk); return ldr; }
ObjectWriter(DhtReader ctx, Prefetcher prefetch) { this.ctx = ctx; this.prefetch = prefetch; batchSize = ctx.getOptions().getObjectIndexBatchSize(); metaBatches = new Semaphore(batchSize); metaError = new AtomicReference<DhtException>(); allVisits = new LinkedHashMap<ChunkKey, Integer>(); allMeta = new HashMap<ChunkKey, ChunkMeta>(); metaMissing = new HashSet<ChunkKey>(); metaToRead = new HashSet<ChunkKey>(); curVisit = 1; }
private void startBatch(Context context) throws InterruptedException, DhtTimeoutException { Timeout to = ctx.getOptions().getTimeout(); if (!metaBatches.tryAcquire(1, to.getTime(), to.getUnit())) throw new DhtTimeoutException(DhtText.get().timeoutChunkMeta); Set<ChunkKey> keys = metaToRead; ctx.getDatabase().chunk().getMeta( context, keys, new MetaLoader(context, keys)); metaToRead = new HashSet<ChunkKey>(); }
@Override public ObjectStream openStream() throws MissingObjectException, IOException { PackChunk pc = firstChunk; if (pc != null) firstChunk = null; else pc = ctx.getChunk(ChunkKey.fromString(meta.getFragment(0))); InputStream in = new ChunkInputStream(meta, ctx, pos, pc); in = new BufferedInputStream(new InflaterInputStream(in), 8192); return new ObjectStream.Filter(type, sz, in); }
void putBase(DhtReader ctx, int type, byte[] data) { ctx.getDeltaBaseCache().put(baseChunk, basePos, type, data); } }
@Override public boolean has(AnyObjectId objId, int typeHint) throws IOException { if (objId instanceof RefDataUtil.IdWithChunk) return true; if (recentChunks.has(repo, objId)) return true; if (repository.getRefDatabase().findChunk(objId) != null) return true; return !find(objId).isEmpty(); }
@Override public void walkAdviceEnd() { endPrefetch(); }
private RefData newData() throws IOException { RefData.Builder d = RefData.newBuilder(oldData); clearRefData(d); updateSequence(d); ObjectId newId = getNewObjectId(); d.getTargetBuilder().setObjectName(newId.name()); try { DhtReader ctx = (DhtReader) rw.getObjectReader(); RevObject obj = rw.parseAny(newId); ChunkKey oKey = ctx.findChunk(newId); if (oKey != null) d.getTargetBuilder().setChunkKey(oKey.asString()); if (obj instanceof RevTag) { ObjectId pId = rw.peel(obj); ChunkKey pKey = ctx.findChunk(pId); if (pKey != null) d.getPeeledBuilder().setChunkKey(pKey.asString()); d.getPeeledBuilder().setObjectName(pId.name()); } } catch (MissingObjectException e) { // Automatic peeling failed. Ignore the problem and deal with it // during reading later, this is the classical Git behavior on disk. } return d.build(); }
void push(DhtReader ctx, Collection<RevCommit> roots) { // Approximate walk by using hints from the most recent commit. // Since the commits were recently parsed by the reader, we can // ask the reader for their chunk locations and most likely get // cache hits. int time = -1; PackChunk chunk = null; for (RevCommit cmit : roots) { if (time < cmit.getCommitTime()) { ChunkAndOffset p = ctx.getChunkGently(cmit); if (p != null && p.chunk.getMeta() != null) { time = cmit.getCommitTime(); chunk = p.chunk; } } } if (chunk != null) { synchronized (this) { status.put(chunk.getChunkKey(), Status.DONE); push(chunk.getMeta()); } } }
QueueObjectLookup(DhtReader reader, boolean reportMissing) { this.repo = reader.getRepositoryKey(); this.db = reader.getDatabase(); this.reader = reader; this.options = reader.getOptions(); this.reportMissing = reportMissing; this.tmp = new ArrayList<ObjectInfo>(4); this.context = Context.FAST_MISSING_OK; this.toRetry = new ArrayList<T>(); this.concurrentBatches = options.getObjectIndexConcurrentBatches(); }