/** * @return the PackChunk instance. * @throws DhtException * if early validation indicates the chunk data is corrupt * or not recognized by this version of the library. */ public PackChunk build() throws DhtException { ChunkIndex i; if (indexBuf != null) i = ChunkIndex.fromBytes(chunkKey, indexBuf, indexPtr, indexLen); else i = null; return new PackChunk(chunkKey, dataBuf, dataPtr, dataLen, i, meta); } }
synchronized ChunkAndOffset find(RepositoryKey repo, AnyObjectId objId) { for (PackChunk c : ready.values()) { int p = c.findOffset(repo, objId); if (0 <= p) return new ChunkAndOffset(useReadyChunk(c.getChunkKey()), p); } return null; }
private PackChunk useReadyChunk(ChunkKey key) { PackChunk chunk = ready.remove(key); status.put(chunk.getChunkKey(), Status.DONE); bytesReady -= chunk.getTotalSize(); if (automaticallyPushHints) { push(chunk.getMeta()); maybeStartGet(); } return chunk; }
private void chunkIsReady(PackChunk chunk) { ChunkKey key = chunk.getChunkKey(); ready.put(key, chunk); bytesReady += chunk.getTotalSize(); if (status.put(key, Status.READY) == Status.WAITING) notifyAll(); }
@Override protected int readDatabase(byte[] dst, int pos, int cnt) throws IOException { int n = dbChunk.read(dbPtr, dst, pos, cnt); if (0 < n) { dbPtr += n; return n; } // ChunkMeta for fragments is delayed writing, so it isn't available // on the chunk if the chunk was read-back from the database. Use // our copy of ChunkMeta instead of the PackChunk's copy. ChunkMeta meta = chunkMeta.get(dbChunk.getChunkKey()); if (meta == null) return 0; ChunkKey next = ChunkMetaUtil.getNextFragment(meta, dbChunk.getChunkKey()); if (next == null) return 0; seekChunk(next, false); n = dbChunk.read(0, dst, pos, cnt); dbPtr = n; return n; }
case OBJ_TAG: { if (delta != null) { data = inflate(sz, pc, pos + p, ctx); type = typeCode; break SEARCH; if (sz < Integer.MAX_VALUE && !pc.isFragment()) { try { data = pc.inflateOne(sz, pos + p, ctx); return new ObjectLoader.SmallObject(typeCode, data); } catch (LargeObjectException tooBig) { baseChunkKey = pc.getChunkKey(); basePosInChunk = pos - (int) base; } else { if (baseChunkKey != pc.getChunkKey()) pc = ctx.getChunk(baseChunkKey); pos = basePosInChunk; checkCycle(delta, pc.key, pos); delta = new Delta(delta, // pc.key, pos, (int) sz, p + 20, // nc.getChunkKey(), base); if (sz != delta.deltaSize) break SEARCH; pc.getChunkKey(), // Integer.valueOf(pos)));
void copyObjectAsIs(PackOutputStream out, DhtObjectToPack obj, boolean validate, DhtReader ctx) throws IOException, StoredObjectRepresentationNotAvailableException { if (validate && !isValid()) { StoredObjectRepresentationNotAvailableException gone; DhtText.get().corruptChunk, getChunkKey()))); throw gone; if (isFragment()) { int cnt = meta.getFragmentCount(); for (int fragId = 1; fragId < cnt; fragId++) { PackChunk pc = ctx.getChunk(ChunkKey.fromString( meta.getFragment(fragId))); pc.copyEntireChunkAsIs(out, obj, validate);
public ObjectLoader open() throws IOException { ChunkKey chunkKey = current.chunkKey; // Objects returned by the queue are clustered by chunk. This object // is either in the current chunk, or are the next chunk ready on the // prefetcher. Anything else is a programming error. // PackChunk chunk; if (currChunk != null && chunkKey.equals(currChunk.getChunkKey())) chunk = currChunk; else { chunk = prefetcher.get(chunkKey); if (chunk == null) throw new DhtMissingChunkException(chunkKey); currChunk = chunk; reader.recentChunk(chunk); } if (current.info != null) { int ptr = current.info.getOffset(); int type = current.info.getType(); return PackChunk.read(chunk, ptr, reader, type); } else { int ptr = chunk.findOffset(repo, current.object); if (ptr < 0) throw DhtReader.missing(current.object, ObjectReader.OBJ_ANY); return PackChunk.read(chunk, ptr, reader, ObjectReader.OBJ_ANY); } }
if (chunk != null && chunk.hasIndex()) { int pos = chunk.findOffset(repo, objId); if (0 <= pos) return new ChunkAndOffset(chunk, pos); continue; if (prefetcher.isType(typeHint)) prefetcher.push(chunk.getMeta());
void copyEntireChunkAsIs(PackOutputStream out, DhtObjectToPack obj, boolean validate) throws IOException { if (validate && !isValid()) { if (obj != null) throw new CorruptObjectException(obj, MessageFormat.format( DhtText.get().corruptChunk, getChunkKey())); else throw new DhtException(MessageFormat.format( DhtText.get().corruptChunk, getChunkKey())); } // Do not copy the trailer onto the output stream. out.write(dataBuf, dataPtr, dataLen - TRAILER_SIZE); }
public void copyObjectAsIs(PackOutputStream out, ObjectToPack otp, boolean validate) throws IOException, StoredObjectRepresentationNotAvailableException { DhtObjectToPack obj = (DhtObjectToPack) otp; try { PackChunk chunk = recentChunks.get(obj.chunk); if (chunk == null) { chunk = prefetcher.get(obj.chunk); if (chunk == null) { // This should never happen during packing, it implies // the fetch plan was incorrect. Unfortunately that can // occur if objects need to be recompressed on the fly. // stats.access(obj.chunk).cntCopyObjectAsIs_PrefetchMiss++; chunk = getChunk(obj.chunk); } if (!chunk.isFragment()) recentChunk(chunk); } chunk.copyObjectAsIs(out, obj, validate, this); } catch (DhtMissingChunkException missingChunk) { stats.access(missingChunk.getChunkKey()).cntCopyObjectAsIs_InvalidChunk++; throw new StoredObjectRepresentationNotAvailableException(otp); } }
ObjectLoader open(RepositoryKey repo, AnyObjectId objId, int typeHint) throws IOException { if (objId instanceof IdWithChunk) { PackChunk chunk = get(((IdWithChunk) objId).getChunkKey()); if (chunk != null) { int pos = chunk.findOffset(repo, objId); if (0 <= pos) return PackChunk.read(chunk, pos, reader, typeHint); } // IdWithChunk is only a hint, and can be wrong. Locally // searching is faster than looking in the Database. } for (Node n = lruHead; n != null; n = n.next) { int pos = n.chunk.findOffset(repo, objId); if (0 <= pos) { hit(n); stats.recentChunks_Hits++; return PackChunk.read(n.chunk, pos, reader, typeHint); } } return null; }
void push(DhtReader ctx, Collection<RevCommit> roots) { // Approximate walk by using hints from the most recent commit. // Since the commits were recently parsed by the reader, we can // ask the reader for their chunk locations and most likely get // cache hits. int time = -1; PackChunk chunk = null; for (RevCommit cmit : roots) { if (time < cmit.getCommitTime()) { ChunkAndOffset p = ctx.getChunkGently(cmit); if (p != null && p.chunk.getMeta() != null) { time = cmit.getCommitTime(); chunk = p.chunk; } } } if (chunk != null) { synchronized (this) { status.put(chunk.getChunkKey(), Status.DONE); push(chunk.getMeta()); } } }
static ObjectLoader read(PackChunk pc, int pos, final DhtReader ctx, final int typeHint) throws IOException { try { return read1(pc, pos, ctx, typeHint, true /* use recentChunks */); } catch (DeltaChainCycleException cycleFound) { // A cycle can occur if recentChunks cache was used by the reader // to satisfy an OBJ_REF_DELTA, but the chunk that was chosen has // a reverse delta back onto an object already being read during // this invocation. Its not as uncommon as it sounds, as the Git // wire protocol can sometimes copy an object the repository already // has when dealing with reverts or cherry-picks. // // Work around the cycle by disabling the recentChunks cache for // this resolution only. This will force the DhtReader to re-read // OBJECT_INDEX and consider only the oldest chunk for any given // object. There cannot be a cycle if the method only walks along // the oldest chunks. try { ctx.getStatistics().deltaChainCycles++; return read1(pc, pos, ctx, typeHint, false /* no recentChunks */); } catch (DeltaChainCycleException cannotRecover) { throw new DhtException(MessageFormat.format( DhtText.get().cycleInDeltaChain, pc.getChunkKey(), Integer.valueOf(pos))); } } }
ChunkMeta meta = chunk.getMeta(); if (meta != null && meta.getBaseChunkCount() != 0) { for (ChunkMeta.BaseChunk base : meta.getBaseChunkList()) { chunk.copyEntireChunkAsIs(out, null, validate);
final byte[] dstbuf = newResult(sz); final Inflater inf = reader.inflater(); final int offset = pos; throw new DataFormatException(MessageFormat.format( DhtText.get().shortCompressedObject, getChunkKey(), Integer.valueOf(offset)));
boolean contains(RepositoryKey repo, AnyObjectId objId) { return 0 <= findOffset(repo, objId); }
@Override public String toString() { return "PackChunk[" + getChunkKey() + "]"; }
LargeNonDeltaObject(int type, long sz, PackChunk pc, int pos, DhtReader ctx) { this.type = type; this.sz = sz; this.pos = pos; this.ctx = ctx; this.meta = pc.getMeta(); firstChunk = pc; }
case OBJ_TAG: { if (delta != null) { data = inflate(sz, pc, pos + p, ctx); type = typeCode; break SEARCH; if (sz < Integer.MAX_VALUE && !pc.isFragment()) { try { data = pc.inflateOne(sz, pos + p, ctx); return new ObjectLoader.SmallObject(typeCode, data); } catch (LargeObjectException tooBig) { baseChunkKey = pc.getChunkKey(); basePosInChunk = pos - (int) base; } else { if (baseChunkKey != pc.getChunkKey()) pc = ctx.getChunk(baseChunkKey); pos = basePosInChunk; checkCycle(delta, pc.key, pos); delta = new Delta(delta, // pc.key, pos, (int) sz, p + 20, // nc.getChunkKey(), base); if (sz != delta.deltaSize) break SEARCH; pc.getChunkKey(), // Integer.valueOf(pos)));