@Override public void handleCacheCollision(DataCache cacheWrapper, MemoryBuffer replacementBuffer, List<MemoryBuffer> cacheBuffers) { assert originalCbIndex >= 0; // Had the put succeeded for our new buffer, it would have refcount of 2 - 1 from put, // and 1 from notifyReused call above. "Old" buffer now has the 1 from put; new buffer // is not in cache. releaseBuffer will decref the buffer, and also deallocate. cacheWrapper.releaseBuffer(this.buffer); cacheWrapper.reuseBuffer(replacementBuffer); // Replace the buffer in our big range list, as well as in current results. this.buffer = replacementBuffer; cacheBuffers.set(originalCbIndex, replacementBuffer); originalCbIndex = -1; // This can only happen once at decompress time. }
private void releaseInitialRefcount(CacheChunk cc, boolean isBacktracking) { // This is the last RG for which this buffer will be used. Remove the initial refcount if (isTracingEnabled) { LOG.trace("Unlocking " + cc.getBuffer() + " for the fetching thread" + (isBacktracking ? "; backtracking" : "")); } cacheWrapper.releaseBuffer(cc.getBuffer()); cc.setBuffer(null); }
private void releaseInitialRefcount(CacheChunk cc, boolean isBacktracking) { // This is the last RG for which this buffer will be used. Remove the initial refcount if (isTracingEnabled) { LOG.trace("Unlocking " + cc.getBuffer() + " for the fetching thread" + (isBacktracking ? "; backtracking" : "")); } cacheWrapper.releaseBuffer(cc.getBuffer()); cc.setBuffer(null); }
private void releaseInitialRefcounts(DiskRangeList current) { while (current != null) { DiskRangeList toFree = current; current = current.next; if (!(toFree instanceof CacheChunk)) continue; CacheChunk cc = (CacheChunk)toFree; if (cc.getBuffer() == null) continue; MemoryBuffer buffer = cc.getBuffer(); cacheWrapper.releaseBuffer(buffer); cc.setBuffer(null); } }
private void releaseEcbRefCountsOnError(OrcEncodedColumnBatch ecb) { try { if (isTracingEnabled) { LOG.trace("Unlocking the batch not sent to consumer, on error"); } // We cannot send the ecb to consumer. Discard whatever is already there. for (int colIx = 0; colIx < ecb.getTotalColCount(); ++colIx) { if (!ecb.hasData(colIx)) continue; ColumnStreamData[] datas = ecb.getColumnData(colIx); for (ColumnStreamData data : datas) { if (data == null || data.decRef() != 0) continue; for (MemoryBuffer buf : data.getCacheBuffers()) { if (buf == null) continue; cacheWrapper.releaseBuffer(buf); } } } } catch (Throwable t) { LOG.error("Error during the cleanup of an error; ignoring", t); } }
ByteBuffer data = drl.getData().duplicate(); data.get(array, arrayOffset + offsetFromReadStart, candidateSize); cache.releaseBuffer(((CacheChunk)drl).getBuffer()); sizeRead += candidateSize; drl = drl.next; ByteBuffer data = candidate.getData().duplicate(); data.get(array, arrayOffset + offsetFromReadStart, candidateSize); cache.releaseBuffer(((CacheChunk)candidate).getBuffer()); sizeRead += candidateSize; continue; for (MemoryBuffer buffer : newCacheData) { if (buffer == null) continue; cache.releaseBuffer(buffer);
private void releaseInitialRefcounts(DiskRangeList current) { while (current != null) { DiskRangeList toFree = current; current = current.next; if (toFree instanceof ProcCacheChunk) { ProcCacheChunk pcc = (ProcCacheChunk)toFree; if (pcc.originalData != null) { // TODO: can this still happen? we now clean these up explicitly to avoid other issues. // This can only happen in case of failure - we read some data, but didn't decompress // it. Deallocate the buffer directly, do not decref. if (pcc.getBuffer() != null) { cacheWrapper.getAllocator().deallocate(pcc.getBuffer()); } continue; } } if (!(toFree instanceof CacheChunk)) continue; CacheChunk cc = (CacheChunk)toFree; if (cc.getBuffer() == null) continue; MemoryBuffer buffer = cc.getBuffer(); cacheWrapper.releaseBuffer(buffer); cc.setBuffer(null); } }
cacheWrapper.releaseBuffer(buf);
LOG.trace("Unlocking {} at the end of processing", buf); cacheWrapper.releaseBuffer(buf);
LOG.trace("Unlocking {} at the end of processing", buf); cacheWrapper.releaseBuffer(buf);