private void singleThreadDeltaSearch(ProgressMonitor monitor, ObjectToPack[] list, int cnt) throws IOException { long totalWeight = 0; for (int i = 0; i < cnt; i++) { ObjectToPack o = list[i]; totalWeight += DeltaTask.getAdjustedWeight(o); } long bytesPerUnit = 1; while (DeltaTask.MAX_METER <= (totalWeight / bytesPerUnit)) bytesPerUnit <<= 10; int cost = (int) (totalWeight / bytesPerUnit); if (totalWeight % bytesPerUnit != 0) cost++; beginPhase(PackingPhase.COMPRESSING, monitor, cost); new DeltaWindow(config, new DeltaCache(config), reader, monitor, bytesPerUnit, list, 0, cnt).search(); endPhase(monitor); }
private void cacheDelta(ObjectToPack srcObj, ObjectToPack resObj) { if (deltaCache.canCache(deltaLen, srcObj, resObj)) { try { byte[] zbuf = new byte[deflateBound(deltaLen)]; ZipStream zs = new ZipStream(deflater(), zbuf); if (deltaBuf instanceof byte[]) zs.write((byte[]) deltaBuf, 0, deltaLen); else ((TemporaryBuffer.Heap) deltaBuf).writeTo(zs, null); deltaBuf = null; int len = zs.finish(); resObj.setCachedDelta(deltaCache.cache(zbuf, len, deltaLen)); resObj.setCachedSize(deltaLen); } catch (IOException err) { deltaCache.credit(deltaLen); } catch (OutOfMemoryError err) { deltaCache.credit(deltaLen); } } }
boolean canCache(int length, ObjectToPack src, ObjectToPack res) { // If the cache would overflow, don't store. // if (0 < size && size < used + length) { checkForGarbageCollectedObjects(); if (0 < size && size < used + length) return false; } if (length < entryLimit) { used += length; return true; } // If the combined source files are multiple megabytes but the delta // is on the order of a kilobyte or two, this was likely costly to // construct. Cache it anyway, even though its over the limit. // if (length >> 10 < (src.getWeight() >> 20) + (res.getWeight() >> 21)) { used += length; return true; } return false; }
@Override void credit(int reservedSize) { lock.lock(); try { super.credit(reservedSize); } finally { lock.unlock(); } }
@Override Ref cache(byte[] data, int actLen, int reservedSize) { data = resize(data, actLen); lock.lock(); try { return super.cache(data, actLen, reservedSize); } finally { lock.unlock(); } } }
@Override boolean canCache(int length, ObjectToPack src, ObjectToPack res) { lock.lock(); try { return super.canCache(length, src, res); } finally { lock.unlock(); } }
Ref cache(byte[] data, int actLen, int reservedSize) { // The caller may have had to allocate more space than is // required. If we are about to waste anything, shrink it. // data = resize(data, actLen); // When we reserved space for this item we did it for the // inflated size of the delta, but we were just given the // compressed version. Adjust the cache cost to match. // if (reservedSize != data.length) { used -= reservedSize; used += data.length; } return new Ref(data, queue); }
@Override void credit(int reservedSize) { lock.lock(); try { super.credit(reservedSize); } finally { lock.unlock(); } }
@Override Ref cache(byte[] data, int actLen, int reservedSize) { data = resize(data, actLen); lock.lock(); try { return super.cache(data, actLen, reservedSize); } finally { lock.unlock(); } } }
@Override boolean canCache(int length, ObjectToPack src, ObjectToPack res) { lock.lock(); try { return super.canCache(length, src, res); } finally { lock.unlock(); } }
Ref cache(byte[] data, int actLen, int reservedSize) { // The caller may have had to allocate more space than is // required. If we are about to waste anything, shrink it. // data = resize(data, actLen); // When we reserved space for this item we did it for the // inflated size of the delta, but we were just given the // compressed version. Adjust the cache cost to match. // if (reservedSize != data.length) { used -= reservedSize; used += data.length; } return new Ref(data, queue); }
private void cacheDelta(ObjectToPack srcObj, ObjectToPack resObj) { if (deltaCache.canCache(deltaLen, srcObj, resObj)) { try { byte[] zbuf = new byte[deflateBound(deltaLen)]; ZipStream zs = new ZipStream(deflater(), zbuf); if (deltaBuf instanceof byte[]) zs.write((byte[]) deltaBuf, 0, deltaLen); else ((TemporaryBuffer.Heap) deltaBuf).writeTo(zs, null); deltaBuf = null; int len = zs.finish(); resObj.setCachedDelta(deltaCache.cache(zbuf, len, deltaLen)); resObj.setCachedSize(deltaLen); } catch (IOException err) { deltaCache.credit(deltaLen); } catch (OutOfMemoryError err) { deltaCache.credit(deltaLen); } } }
@Override void credit(int reservedSize) { lock.lock(); try { super.credit(reservedSize); } finally { lock.unlock(); } }
@Override Ref cache(byte[] data, int actLen, int reservedSize) { data = resize(data, actLen); lock.lock(); try { return super.cache(data, actLen, reservedSize); } finally { lock.unlock(); } } }
@Override boolean canCache(int length, ObjectToPack src, ObjectToPack res) { lock.lock(); try { return super.canCache(length, src, res); } finally { lock.unlock(); } }
Ref cache(byte[] data, int actLen, int reservedSize) { // The caller may have had to allocate more space than is // required. If we are about to waste anything, shrink it. // data = resize(data, actLen); // When we reserved space for this item we did it for the // inflated size of the delta, but we were just given the // compressed version. Adjust the cache cost to match. // if (reservedSize != data.length) { used -= reservedSize; used += data.length; } return new Ref(data, queue); }
private void singleThreadDeltaSearch(ProgressMonitor monitor, ObjectToPack[] list, int cnt) throws IOException { long totalWeight = 0; for (int i = 0; i < cnt; i++) { ObjectToPack o = list[i]; totalWeight += DeltaTask.getAdjustedWeight(o); } long bytesPerUnit = 1; while (DeltaTask.MAX_METER <= (totalWeight / bytesPerUnit)) bytesPerUnit <<= 10; int cost = (int) (totalWeight / bytesPerUnit); if (totalWeight % bytesPerUnit != 0) cost++; beginPhase(PackingPhase.COMPRESSING, monitor, cost); new DeltaWindow(config, new DeltaCache(config), reader, monitor, bytesPerUnit, list, 0, cnt).search(); endPhase(monitor); }
boolean canCache(int length, ObjectToPack src, ObjectToPack res) { // If the cache would overflow, don't store. // if (0 < size && size < used + length) { checkForGarbageCollectedObjects(); if (0 < size && size < used + length) return false; } if (length < entryLimit) { used += length; return true; } // If the combined source files are multiple megabytes but the delta // is on the order of a kilobyte or two, this was likely costly to // construct. Cache it anyway, even though its over the limit. // if (length >> 10 < (src.getWeight() >> 20) + (res.getWeight() >> 21)) { used += length; return true; } return false; }
private void cacheDelta(ObjectToPack srcObj, ObjectToPack resObj) { if (deltaCache.canCache(deltaLen, srcObj, resObj)) { try { byte[] zbuf = new byte[deflateBound(deltaLen)]; ZipStream zs = new ZipStream(deflater(), zbuf); if (deltaBuf instanceof byte[]) zs.write((byte[]) deltaBuf, 0, deltaLen); else ((TemporaryBuffer.Heap) deltaBuf).writeTo(zs, null); deltaBuf = null; int len = zs.finish(); resObj.setCachedDelta(deltaCache.cache(zbuf, len, deltaLen)); resObj.setCachedSize(deltaLen); } catch (IOException err) { deltaCache.credit(deltaLen); } catch (OutOfMemoryError err) { deltaCache.credit(deltaLen); } } }
private void singleThreadDeltaSearch(ProgressMonitor monitor, ObjectToPack[] list, int cnt) throws IOException { long totalWeight = 0; for (int i = 0; i < cnt; i++) { ObjectToPack o = list[i]; totalWeight += DeltaTask.getAdjustedWeight(o); } long bytesPerUnit = 1; while (DeltaTask.MAX_METER <= (totalWeight / bytesPerUnit)) bytesPerUnit <<= 10; int cost = (int) (totalWeight / bytesPerUnit); if (totalWeight % bytesPerUnit != 0) cost++; beginPhase(PackingPhase.COMPRESSING, monitor, cost); new DeltaWindow(config, new DeltaCache(config), reader, monitor, bytesPerUnit, list, 0, cnt).search(); endPhase(monitor); }