@Override public DiskRangeList createCacheChunk(MemoryBuffer buffer, long offset, long end) { return new CacheChunk(buffer, offset, end); } };
private void releaseInitialRefcount(CacheChunk cc, boolean isBacktracking) { // This is the last RG for which this buffer will be used. Remove the initial refcount if (isTracingEnabled) { LOG.trace("Unlocking " + cc.getBuffer() + " for the fetching thread" + (isBacktracking ? "; backtracking" : "")); } cacheWrapper.releaseBuffer(cc.getBuffer()); cc.setBuffer(null); }
@Override public String toString() { return super.toString() + ", original is set " + (this.originalData != null) + ", buffer was replaced " + (originalCbIndex == -1); }
lastUncompressed = (CacheChunk)current; if (isTracingEnabled) { LOG.trace("Locking " + lastUncompressed.getBuffer() + " due to reuse"); cacheWrapper.reuseBuffer(lastUncompressed.getBuffer()); if (isFirst) { columnStreamData.setIndexBaseOffset((int)(lastUncompressed.getOffset() - streamOffset)); isFirst = false; columnStreamData.getCacheBuffers().add(lastUncompressed.getBuffer()); currentOffset = lastUncompressed.getEnd(); if (isTracingEnabled) { LOG.trace("Adding an uncompressed buffer " + lastUncompressed.getBuffer());
@Override public String toString() { return "start: " + offset + " end: " + end + " cache buffer: " + getBuffer(); }
private void processCacheCollisions(long[] collisionMask, List<? extends CacheChunk> toDecompress, MemoryBuffer[] targetBuffers, List<MemoryBuffer> cacheBuffers) { if (collisionMask == null) return; assert collisionMask.length >= (toDecompress.size() >>> 6); // There are some elements that were cached in parallel, take care of them. long maskVal = -1; for (int i = 0; i < toDecompress.size(); ++i) { if ((i & 63) == 0) { maskVal = collisionMask[i >>> 6]; } if ((maskVal & 1) == 1) { // Cache has found an old buffer for the key and put it into array instead of our new one. CacheChunk replacedChunk = toDecompress.get(i); MemoryBuffer replacementBuffer = targetBuffers[i]; if (isTracingEnabled) { LOG.trace("Discarding data due to cache collision: " + replacedChunk.getBuffer() + " replaced with " + replacementBuffer); } assert replacedChunk.getBuffer() != replacementBuffer : i + " was not replaced in the results " + "even though mask is [" + Long.toBinaryString(maskVal) + "]"; replacedChunk.handleCacheCollision(cacheWrapper, replacementBuffer, cacheBuffers); } maskVal >>= 1; } }
lastUncompressed = (CacheChunk)current; if (isTracingEnabled) { LOG.trace("Locking " + lastUncompressed.getBuffer() + " due to reuse"); cacheWrapper.reuseBuffer(lastUncompressed.getBuffer()); if (isFirst) { columnStreamData.setIndexBaseOffset((int)(lastUncompressed.getOffset() - streamOffset)); isFirst = false; columnStreamData.getCacheBuffers().add(lastUncompressed.getBuffer()); currentOffset = lastUncompressed.getEnd(); if (isTracingEnabled) { LOG.trace("Adding an uncompressed buffer " + lastUncompressed.getBuffer());
@Override public String toString() { return "start: " + offset + " end: " + end + " cache buffer: " + getBuffer(); }
private void processCacheCollisions(long[] collisionMask, List<? extends CacheChunk> toDecompress, MemoryBuffer[] targetBuffers, List<MemoryBuffer> cacheBuffers) { if (collisionMask == null) return; assert collisionMask.length >= (toDecompress.size() >>> 6); // There are some elements that were cached in parallel, take care of them. long maskVal = -1; for (int i = 0; i < toDecompress.size(); ++i) { if ((i & 63) == 0) { maskVal = collisionMask[i >>> 6]; } if ((maskVal & 1) == 1) { // Cache has found an old buffer for the key and put it into array instead of our new one. CacheChunk replacedChunk = toDecompress.get(i); MemoryBuffer replacementBuffer = targetBuffers[i]; if (isTracingEnabled) { LOG.trace("Discarding data due to cache collision: " + replacedChunk.getBuffer() + " replaced with " + replacementBuffer); } trace.logCacheCollision(replacedChunk, replacementBuffer); assert replacedChunk.getBuffer() != replacementBuffer : i + " was not replaced in the results " + "even though mask is [" + Long.toBinaryString(maskVal) + "]"; replacedChunk.handleCacheCollision(cacheWrapper, replacementBuffer, cacheBuffers); } maskVal >>= 1; } }
@Override public DiskRangeList createCacheChunk(MemoryBuffer buffer, long offset, long end) { return new CacheChunk(buffer, offset, end); } };
private void releaseInitialRefcounts(DiskRangeList current) { while (current != null) { DiskRangeList toFree = current; current = current.next; if (!(toFree instanceof CacheChunk)) continue; CacheChunk cc = (CacheChunk)toFree; if (cc.getBuffer() == null) continue; MemoryBuffer buffer = cc.getBuffer(); cacheWrapper.releaseBuffer(buffer); cc.setBuffer(null); } }
DiskRange r = results[index]; if (r instanceof CacheChunk) { LlapDataBuffer result = (LlapDataBuffer)((CacheChunk)r).getBuffer(); cache.decRefBuffer(result); if (victim == null && result.invalidate() == LlapCacheableBuffer.INVALIDATE_OK) {
@Override public String toString() { return super.toString() + ", original is set " + (this.originalData != null) + ", buffer was replaced " + (originalCbIndex == -1); }
@Override public DiskRangeList createCacheChunk( MemoryBuffer buffer, long startOffset, long endOffset) { return new CacheChunk(buffer, startOffset, endOffset); } }, gotAllData);
private void releaseInitialRefcount(CacheChunk cc, boolean isBacktracking) { // This is the last RG for which this buffer will be used. Remove the initial refcount if (isTracingEnabled) { LOG.trace("Unlocking " + cc.getBuffer() + " for the fetching thread" + (isBacktracking ? "; backtracking" : "")); } cacheWrapper.releaseBuffer(cc.getBuffer()); cc.setBuffer(null); }
ByteBuffer data = drl.getData().duplicate(); data.get(array, arrayOffset + offsetFromReadStart, candidateSize); cache.releaseBuffer(((CacheChunk)drl).getBuffer()); sizeRead += candidateSize; drl = drl.next; ByteBuffer data = candidate.getData().duplicate(); data.get(array, arrayOffset + offsetFromReadStart, candidateSize); cache.releaseBuffer(((CacheChunk)candidate).getBuffer()); sizeRead += candidateSize; continue;