final PackWriter pw = new PackWriter(cfg, walk.getObjectReader(), accumulator); try { pw.setIndexDisabled(true); if (req.getFilterBlobLimit() >= 0) { pw.setFilterBlobLimit(req.getFilterBlobLimit()); pw.setUseCachedPacks(false); } else { pw.setUseCachedPacks(true); pw.setUseBitmaps( req.getDepth() == 0 && req.getClientShallowCommits().isEmpty()); pw.setClientShallowCommits(req.getClientShallowCommits()); pw.setReuseDeltaCommits(true); pw.setDeltaBaseAsOffset( req.getClientCapabilities().contains(OPTION_OFS_DELTA)); pw.setThin(req.getClientCapabilities().contains(OPTION_THIN_PACK)); pw.setReuseValidatingObjects(false); pw.setTagTargets(tagTargets); int walkDepth = req.getDepth() == 0 ? Integer.MAX_VALUE : req.getDepth() - 1; pw.setShallowPack(req.getDepth(), unshallowCommits); pw.preparePack(pm, wantIds, commonBase, req.getClientShallowCommits()); } else {
try (PackWriter pw = new PackWriter( (pconfig == null) ? new PackConfig(repo) : pconfig, repo.newObjectReader())) { pw.setDeltaBaseAsOffset(true); pw.setReuseDeltaCommits(false); if (tagTargets != null) { pw.setTagTargets(tagTargets); pw.excludeObjects(idx); pw.preparePack(pm, want, have, PackWriter.NONE, tags); if (pw.getObjectCount() == 0) return null; checkCancelled(); String id = pw.computeName().getName(); File packdir = repo.getObjectDatabase().getPackDirectory(); tmpPack = File.createTempFile("gc_", ".pack_tmp", packdir); //$NON-NLS-1$ //$NON-NLS-2$ OutputStream channelStream = Channels .newOutputStream(channel)) { pw.writePack(pm, pm, channelStream); channel.force(true); OutputStream idxStream = Channels .newOutputStream(idxChannel)) { pw.writeIndex(idxStream); idxChannel.force(true);
searchForReuse(compressMonitor); if (config.isDeltaCompress()) searchForDeltas(compressMonitor); isIndexDisabled() ? packStream : new CheckedOutputStream(packStream, crc32), this); long objCnt = getObjectCount(); stats.totalObjects = objCnt; if (callback != null) callback.setObjectCount(objCnt); beginPhase(PackingPhase.WRITING, writeMonitor, objCnt); long writeStart = System.currentTimeMillis(); try { out.flush(); writeObjects(out); if (!edgeObjects.isEmpty() || !cachedPacks.isEmpty()) { for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) { writeChecksum(out); out.flush(); } finally { endPhase(writeMonitor);
private PackWriter newPackWriter() { PackWriter pw = new PackWriter(packConfig, ctx); pw.setDeltaBaseAsOffset(true); pw.setReuseDeltaCommits(false); return pw; }
/** * Include one object to the output file. * <p> * Objects are written in the order they are added. If the same object is * added twice, it may be written twice, creating a larger than necessary * file. * * @param object * the object to add. * @throws org.eclipse.jgit.errors.IncorrectObjectTypeException * the object is an unsupported type. */ public void addObject(RevObject object) throws IncorrectObjectTypeException { if (!exclude(object)) addObject(object, 0); }
private void packRefTreeGraph(ProgressMonitor pm) throws IOException { if (txnHeads.isEmpty()) return; try (PackWriter pw = newPackWriter()) { for (ObjectIdSet packedObjs : newPackObj) pw.excludeObjects(packedObjs); pw.preparePack(pm, txnHeads, NONE); if (0 < pw.getObjectCount()) writePack(GC_TXN, pw, pm, 0 /* unknown pack size */); } }
String pathIdx = null; try (PackWriter writer = new PackWriter(transport.getPackConfig(), local.newObjectReader())) { have.add(r.getPeeledObjectId()); writer.preparePack(monitor, need, have); if (writer.getObjectCount() == 0) return; packNames.put(n, n); final String base = "pack-" + writer.computeName().name(); //$NON-NLS-1$ final String packName = base + ".pack"; //$NON-NLS-1$ pathPack = "pack/" + packName; //$NON-NLS-1$ try (OutputStream os = new BufferedOutputStream( dest.writeFile(pathPack, monitor, wt + "..pack"))) { //$NON-NLS-1$ writer.writePack(monitor, monitor, os); writer.writeIndex(os);
Set<ObjectId> newObjects = new HashSet<>(); try (PackWriter writer = new PackWriter(transport.getPackConfig(), local.newObjectReader())) { writer.setIndexDisabled(true); writer.setUseCachedPacks(true); writer.setUseBitmaps(true); writer.setThin(thinPack); writer.setReuseValidatingObjects(false); writer.setDeltaBaseAsOffset(capableOfsDelta); writer.preparePack(monitor, newObjects, remoteObjects); packOut = new CheckingSideBandOutputStream(in, out); writer.writePack(monitor, monitor, packOut); packTransferTime = writer.getStatistics().getTimeWriting();
throws IOException { try (PackWriter packWriter = newPackWriter()) { packWriter.setObjectCountCallback(callback); for (RevCommit r : assume) exc.add(r.getId()); packWriter.setIndexDisabled(true); packWriter.setDeltaBaseAsOffset(true); packWriter.setThin(exc.size() > 0); packWriter.setReuseValidatingObjects(false); if (exc.size() == 0) packWriter.setTagTargets(tagTargets); packWriter.preparePack(monitor, inc, exc); packWriter.writePack(monitor, monitor, os);
pw.writePack(pm, pm, out); pack.addFileExt(PACK); pack.setBlockSize(PACK, out.blockSize()); pw.writeIndex(cnt); pack.addFileExt(INDEX); pack.setFileSize(INDEX, cnt.getCount()); pack.setBlockSize(INDEX, out.blockSize()); pack.setIndexVersion(pw.getIndexVersion()); if (pw.prepareBitmapIndex(pm)) { try (DfsOutputStream out = objdb.writeFile(pack, BITMAP_INDEX)) { CountingOutputStream cnt = new CountingOutputStream(out); pw.writeBitmapIndex(cnt); pack.addFileExt(BITMAP_INDEX); pack.setFileSize(BITMAP_INDEX, cnt.getCount()); PackStatistics stats = pw.getStatistics(); pack.setPackStats(stats); pack.setLastModified(startTimeMillis); newPackDesc.add(pack); newPackStats.add(stats); newPackObj.add(pw.getObjectSet()); return pack;
pc.setReuseObjects(true); try (PackWriter pw = new PackWriter(pc, ctx)) { pw.setDeltaBaseAsOffset(true); pw.setReuseDeltaCommits(false); if (pw.getObjectCount() == 0) { return; writeIndex(objdb, outDesc, pw); PackStatistics stats = pw.getStatistics();
cfg.setBuildBitmaps(false); try (PackWriter pw = new PackWriter(cfg, ctx); RevWalk pool = new RevWalk(ctx)) { pw.setDeltaBaseAsOffset(true); pw.setReuseDeltaCommits(true); pm.beginTask(JGitText.get().findingGarbage, objectsBefore()); long estimatedPackSize = 12 + 20; // header and trailer sizes. pw.addObject(pool.lookupAny(id, type)); long objSize = oldRevIdx.findNextOffset(offset, maxOffset) - offset; if (0 < pw.getObjectCount()) writePack(UNREACHABLE_GARBAGE, pw, pm, estimatedPackSize);
@NonNull Set<? extends ObjectId> noBitmaps) throws IOException { final long countingStart = System.currentTimeMillis(); beginPhase(PackingPhase.COUNTING, countingMonitor, ProgressMonitor.UNKNOWN); BitmapWalker bitmapWalker = new BitmapWalker( walker, bitmapIndex, countingMonitor); findObjectsToPackUsingBitmaps(bitmapWalker, want, have); endPhase(countingMonitor); stats.timeCounting = System.currentTimeMillis() - countingStart; stats.bitmapIndexMisses = bitmapWalker.getCountOfBitmapIndexMisses(); RevCommit c; while ((c = walker.next()) != null) { if (exclude(c)) continue; if (c.has(RevFlag.UNINTERESTING)) { addObject(cmit, 0); if (!cmit.has(added)) { cmit.add(added); addObject(cmit, 0); commitCnt++; RevCommit p = cmit.getParent(i); if (!p.has(added) && !p.has(RevFlag.UNINTERESTING) && !exclude(p)) { p.add(added); addObject(p, 0);
beginPhase(PackingPhase.FINDING_SOURCES, monitor, cnt); if (cnt <= 4096) { tmp.addAll(objectsLists[OBJ_TREE]); tmp.addAll(objectsLists[OBJ_BLOB]); searchForReuse(monitor, tmp); if (pruneCurrentObjectList) { pruneEdgesFromObjectList(objectsLists[OBJ_COMMIT]); pruneEdgesFromObjectList(objectsLists[OBJ_TREE]); pruneEdgesFromObjectList(objectsLists[OBJ_BLOB]); pruneEdgesFromObjectList(objectsLists[OBJ_TAG]); searchForReuse(monitor, objectsLists[OBJ_TAG]); searchForReuse(monitor, objectsLists[OBJ_COMMIT]); searchForReuse(monitor, objectsLists[OBJ_TREE]); searchForReuse(monitor, objectsLists[OBJ_BLOB]); endPhase(monitor); stats.timeSearchingForReuse = System.currentTimeMillis() - start; cutDeltaChains(objectsLists[OBJ_TREE]); cutDeltaChains(objectsLists[OBJ_BLOB]);
private void packHeads(ProgressMonitor pm) throws IOException { if (allHeadsAndTags.isEmpty()) { writeReftable(); return; } try (PackWriter pw = newPackWriter()) { pw.setTagTargets(tagTargets); pw.preparePack(pm, allHeadsAndTags, NONE, NONE, allTags); if (0 < pw.getObjectCount()) { long estSize = estimateGcPackSize(INSERT, RECEIVE, COMPACT, GC); writePack(GC, pw, pm, estSize); } else { writeReftable(); } } }
if (!canBuildBitmaps || getObjectCount() > Integer.MAX_VALUE || !cachedPacks.isEmpty()) return false; List<ObjectToPack> byName = sortByName(); sortedByName = null; objectsLists = null; .selectCommits(numCommits, excludeFromBitmapSelection); beginPhase(PackingPhase.BUILDING_BITMAPS, pm, selectedCommits.size()); endPhase(pm); return true;
+ edgeObjects.size()]; int cnt = 0; cnt = findObjectsNeedingDelta(list, cnt, OBJ_TREE); cnt = findObjectsNeedingDelta(list, cnt, OBJ_BLOB); if (cnt == 0) return; beginPhase(PackingPhase.GETTING_SIZES, monitor, cnt); AsyncObjectSizeQueue<ObjectToPack> sizeQueue = reader.getObjectSize( Arrays.<ObjectToPack> asList(list).subList(0, cnt), false); sizeQueue.release(); endPhase(monitor); stats.timeSearchingForSizes = System.currentTimeMillis() - sizingStart; searchForDeltas(monitor, list, cnt); stats.deltaSearchNonEdgeObjects = nonEdgeCnt; stats.timeCompressing = System.currentTimeMillis() - searchStart;
private static void writeIndex(DfsObjDatabase objdb, DfsPackDescription pack, PackWriter pw) throws IOException { try (DfsOutputStream out = objdb.writeFile(pack, INDEX)) { CountingOutputStream cnt = new CountingOutputStream(out); pw.writeIndex(cnt); pack.addFileExt(INDEX); pack.setFileSize(INDEX, cnt.getCount()); pack.setBlockSize(INDEX, out.blockSize()); pack.setIndexVersion(pw.getIndexVersion()); } }
list, 0, cnt); taskBlock.partitionTasks(); beginPhase(PackingPhase.COMPRESSING, monitor, taskBlock.cost()); pm.startWorkers(taskBlock.tasks.size()); if (executor instanceof ExecutorService) { runTasks((ExecutorService) executor, pm, taskBlock, errors); } else if (executor == null) { runTasks(pool, pm, taskBlock, errors); } finally { pool.shutdown(); endPhase(monitor);
/** * Prepare the list of objects to be written to the pack stream. * <p> * Basing on these 2 sets, another set of objects to put in a pack file is * created: this set consists of all objects reachable (ancestors) from * interesting objects, except uninteresting objects and their ancestors. * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk} * extensively to find out that appropriate set of output objects and their * optimal order in output pack. Order is consistent with general git * in-pack rules: sort by object type, recency, path and delta-base first. * </p> * * @param countingMonitor * progress during object enumeration. * @param want * collection of objects to be marked as interesting (start * points of graph traversal). Must not be {@code null}. * @param have * collection of objects to be marked as uninteresting (end * points of graph traversal). Pass {@link #NONE} if all objects * reachable from {@code want} are desired, such as when serving * a clone. * @throws java.io.IOException * when some I/O problem occur during reading objects. */ public void preparePack(ProgressMonitor countingMonitor, @NonNull Set<? extends ObjectId> want, @NonNull Set<? extends ObjectId> have) throws IOException { preparePack(countingMonitor, want, have, NONE, NONE); }