@Override public String toString() { return "start: " + offset + " end: " + end + " cache buffer: " + getBuffer(); }
@Override public String toString() { return "start: " + offset + " end: " + end + " cache buffer: " + getBuffer(); }
private void releaseInitialRefcount(CacheChunk cc, boolean isBacktracking) { // This is the last RG for which this buffer will be used. Remove the initial refcount if (isTracingEnabled) { LOG.trace("Unlocking " + cc.getBuffer() + " for the fetching thread" + (isBacktracking ? "; backtracking" : "")); } cacheWrapper.releaseBuffer(cc.getBuffer()); cc.setBuffer(null); }
private void releaseInitialRefcount(CacheChunk cc, boolean isBacktracking) { // This is the last RG for which this buffer will be used. Remove the initial refcount if (isTracingEnabled) { LOG.trace("Unlocking " + cc.getBuffer() + " for the fetching thread" + (isBacktracking ? "; backtracking" : "")); } cacheWrapper.releaseBuffer(cc.getBuffer()); cc.setBuffer(null); }
private void releaseInitialRefcounts(DiskRangeList current) { while (current != null) { DiskRangeList toFree = current; current = current.next; if (!(toFree instanceof CacheChunk)) continue; CacheChunk cc = (CacheChunk)toFree; if (cc.getBuffer() == null) continue; MemoryBuffer buffer = cc.getBuffer(); cacheWrapper.releaseBuffer(buffer); cc.setBuffer(null); } }
private void processCacheCollisions(long[] collisionMask, List<? extends CacheChunk> toDecompress, MemoryBuffer[] targetBuffers, List<MemoryBuffer> cacheBuffers) { if (collisionMask == null) return; assert collisionMask.length >= (toDecompress.size() >>> 6); // There are some elements that were cached in parallel, take care of them. long maskVal = -1; for (int i = 0; i < toDecompress.size(); ++i) { if ((i & 63) == 0) { maskVal = collisionMask[i >>> 6]; } if ((maskVal & 1) == 1) { // Cache has found an old buffer for the key and put it into array instead of our new one. CacheChunk replacedChunk = toDecompress.get(i); MemoryBuffer replacementBuffer = targetBuffers[i]; if (isTracingEnabled) { LOG.trace("Discarding data due to cache collision: " + replacedChunk.getBuffer() + " replaced with " + replacementBuffer); } trace.logCacheCollision(replacedChunk, replacementBuffer); assert replacedChunk.getBuffer() != replacementBuffer : i + " was not replaced in the results " + "even though mask is [" + Long.toBinaryString(maskVal) + "]"; replacedChunk.handleCacheCollision(cacheWrapper, replacementBuffer, cacheBuffers); } maskVal >>= 1; } }
private void processCacheCollisions(long[] collisionMask, List<? extends CacheChunk> toDecompress, MemoryBuffer[] targetBuffers, List<MemoryBuffer> cacheBuffers) { if (collisionMask == null) return; assert collisionMask.length >= (toDecompress.size() >>> 6); // There are some elements that were cached in parallel, take care of them. long maskVal = -1; for (int i = 0; i < toDecompress.size(); ++i) { if ((i & 63) == 0) { maskVal = collisionMask[i >>> 6]; } if ((maskVal & 1) == 1) { // Cache has found an old buffer for the key and put it into array instead of our new one. CacheChunk replacedChunk = toDecompress.get(i); MemoryBuffer replacementBuffer = targetBuffers[i]; if (isTracingEnabled) { LOG.trace("Discarding data due to cache collision: " + replacedChunk.getBuffer() + " replaced with " + replacementBuffer); } assert replacedChunk.getBuffer() != replacementBuffer : i + " was not replaced in the results " + "even though mask is [" + Long.toBinaryString(maskVal) + "]"; replacedChunk.handleCacheCollision(cacheWrapper, replacementBuffer, cacheBuffers); } maskVal >>= 1; } }
lastUncompressed = (CacheChunk)current; if (isTracingEnabled) { LOG.trace("Locking " + lastUncompressed.getBuffer() + " due to reuse"); cacheWrapper.reuseBuffer(lastUncompressed.getBuffer()); if (isFirst) { columnStreamData.setIndexBaseOffset((int)(lastUncompressed.getOffset() - streamOffset)); isFirst = false; columnStreamData.getCacheBuffers().add(lastUncompressed.getBuffer()); currentOffset = lastUncompressed.getEnd(); if (isTracingEnabled) { LOG.trace("Adding an uncompressed buffer " + lastUncompressed.getBuffer());
assert cc.getBuffer() != null; try { releaseInitialRefcount(cc, false);
lastUncompressed = (CacheChunk)current; if (isTracingEnabled) { LOG.trace("Locking " + lastUncompressed.getBuffer() + " due to reuse"); cacheWrapper.reuseBuffer(lastUncompressed.getBuffer()); if (isFirst) { columnStreamData.setIndexBaseOffset((int)(lastUncompressed.getOffset() - streamOffset)); isFirst = false; columnStreamData.getCacheBuffers().add(lastUncompressed.getBuffer()); currentOffset = lastUncompressed.getEnd(); if (isTracingEnabled) { LOG.trace("Adding an uncompressed buffer " + lastUncompressed.getBuffer());
assert cc.getBuffer() != null; try { releaseInitialRefcount(cc, false);
private void releaseInitialRefcounts(DiskRangeList current) { while (current != null) { DiskRangeList toFree = current; current = current.next; if (toFree instanceof ProcCacheChunk) { ProcCacheChunk pcc = (ProcCacheChunk)toFree; if (pcc.originalData != null) { // TODO: can this still happen? we now clean these up explicitly to avoid other issues. // This can only happen in case of failure - we read some data, but didn't decompress // it. Deallocate the buffer directly, do not decref. if (pcc.getBuffer() != null) { cacheWrapper.getAllocator().deallocate(pcc.getBuffer()); } continue; } } if (!(toFree instanceof CacheChunk)) continue; CacheChunk cc = (CacheChunk)toFree; if (cc.getBuffer() == null) continue; MemoryBuffer buffer = cc.getBuffer(); cacheWrapper.releaseBuffer(buffer); cc.setBuffer(null); } }
DiskRange r = results[index]; if (r instanceof CacheChunk) { LlapDataBuffer result = (LlapDataBuffer)((CacheChunk)r).getBuffer(); cache.decRefBuffer(result); if (victim == null && result.invalidate() == LlapCacheableBuffer.INVALIDATE_OK) {
ByteBuffer data = drl.getData().duplicate(); data.get(array, arrayOffset + offsetFromReadStart, candidateSize); cache.releaseBuffer(((CacheChunk)drl).getBuffer()); sizeRead += candidateSize; drl = drl.next; ByteBuffer data = candidate.getData().duplicate(); data.get(array, arrayOffset + offsetFromReadStart, candidateSize); cache.releaseBuffer(((CacheChunk)candidate).getBuffer()); sizeRead += candidateSize; continue;
LlapAllocatorBuffer result = (LlapAllocatorBuffer)((CacheChunk)iter).getBuffer(); assertEquals(makeFakeArenaIndex(fileIndex, offsets[j]), result.getArenaIndex()); cache.decRefBuffer(result);
if (obj instanceof MemoryBuffer) { assertTrue(iter instanceof CacheChunk); assertSame(obj, ((CacheChunk)iter).getBuffer()); } else { assertTrue(iter.equals(obj));