@Override public void handleCacheCollision(DataCache cacheWrapper, MemoryBuffer replacementBuffer, List<MemoryBuffer> cacheBuffers) { assert originalCbIndex >= 0; // Had the put succeeded for our new buffer, it would have refcount of 2 - 1 from put, // and 1 from notifyReused call above. "Old" buffer now has the 1 from put; new buffer // is not in cache. cacheWrapper.getAllocator().deallocate(getBuffer()); cacheWrapper.reuseBuffer(replacementBuffer); // Replace the buffer in our big range list, as well as in current results. this.buffer = replacementBuffer; cacheBuffers.set(originalCbIndex, replacementBuffer); originalCbIndex = -1; // This can only happen once at decompress time. }
@Override public void handleCacheCollision(DataCache cacheWrapper, MemoryBuffer replacementBuffer, List<MemoryBuffer> cacheBuffers) { assert originalCbIndex >= 0; // Had the put succeeded for our new buffer, it would have refcount of 2 - 1 from put, // and 1 from notifyReused call above. "Old" buffer now has the 1 from put; new buffer // is not in cache. releaseBuffer will decref the buffer, and also deallocate. cacheWrapper.releaseBuffer(this.buffer); cacheWrapper.reuseBuffer(replacementBuffer); // Replace the buffer in our big range list, as well as in current results. this.buffer = replacementBuffer; cacheBuffers.set(originalCbIndex, replacementBuffer); originalCbIndex = -1; // This can only happen once at decompress time. }
private void allocateMultiple(MemoryBuffer[] dest, int size) { if (allocator != null) { allocator.allocateMultiple(dest, size, cacheWrapper.getDataBufferFactory(), isStopped); } else { cacheWrapper.getAllocator().allocateMultiple(dest, size, cacheWrapper.getDataBufferFactory()); } }
DiskRangeList drl = new DiskRangeList(readStartPos, readStartPos + len); DataCache.BooleanRef gotAllData = new DataCache.BooleanRef(); drl = cache.getFileData(fileKey, drl, 0, new DataCache.DiskRangeListFactory() { @Override public DiskRangeList createCacheChunk( ByteBuffer data = drl.getData().duplicate(); data.get(array, arrayOffset + offsetFromReadStart, candidateSize); cache.releaseBuffer(((CacheChunk)drl).getBuffer()); sizeRead += candidateSize; drl = drl.next; int maxAlloc = cache.getAllocator().getMaxAllocation(); FileSystem fs = path.getFileSystem(conf); FSDataInputStream is = fs.open(path, bufferSize); Allocator allocator = cache.getAllocator(); long sizeRead = 0; while (current != null) { ByteBuffer data = candidate.getData().duplicate(); data.get(array, arrayOffset + offsetFromReadStart, candidateSize); cache.releaseBuffer(((CacheChunk)candidate).getBuffer()); sizeRead += candidateSize; continue; allocator.allocateMultiple(largeBuffers, maxAlloc, cache.getDataBufferFactory()); for (int i = 0; i < largeBuffers.length; ++i) {
long[] result = cacheWrapper.putFileData(fileKey, cacheKeys, null, baseOffset); assert result == null; // We don't expect conflicts from bad estimates. ++ix; cacheWrapper.getAllocator().allocateMultiple(targetBuffers, bufferSize); LOG.trace("Locking " + chunk.getBuffer() + " due to reuse (after decompression)"); cacheWrapper.reuseBuffer(chunk.getBuffer()); long[] collisionMask = cacheWrapper.putFileData( fileKey, cacheKeys, targetBuffers, baseOffset); processCacheCollisions(collisionMask, toDecompress, targetBuffers, csd.getCacheBuffers());
cacheWrapper.getFileData(fileKey, toRead.next, stripeOffset, CC_FACTORY, isAllInCache); if (/*isTracingEnabled && */LOG.isInfoEnabled()) { LOG.info("Disk ranges after cache (found everything " + isAllInCache.value + "; file " isDataReaderOpen = true; dataReader.readFileData(toRead.next, stripeOffset, cacheWrapper.getAllocator().isDirectAlloc()); toRelease = new IdentityHashMap<>(); DiskRangeList drl = toRead.next; LOG.trace("Unlocking {} at the end of processing", buf); cacheWrapper.releaseBuffer(buf);
private int determineUncompressedPartSize() { // We will break the uncompressed data in the cache in the chunks that are the size // of the prevalent ORC compression buffer (the default), or maximum allocation (since we // cannot allocate bigger chunks), whichever is less. long orcCbSizeDefault = ((Number)OrcConf.BUFFER_SIZE.getDefaultValue()).longValue(); int maxAllocSize = cacheWrapper.getAllocator().getMaxAllocation(); return (int)Math.min(maxAllocSize, orcCbSizeDefault); }
private void releaseInitialRefcount(CacheChunk cc, boolean isBacktracking) { // This is the last RG for which this buffer will be used. Remove the initial refcount if (isTracingEnabled) { LOG.trace("Unlocking " + cc.getBuffer() + " for the fetching thread" + (isBacktracking ? "; backtracking" : "")); } cacheWrapper.releaseBuffer(cc.getBuffer()); cc.setBuffer(null); }
private CacheChunk copyAndReplaceCandidateToNonCached( UncompressedCacheChunk candidateCached, long partOffset, long candidateEnd, DataCache cacheWrapper, MemoryBuffer[] singleAlloc) { // We thought we had the entire part to cache, but we don't; convert start to // non-cached. Since we are at the first gap, the previous stuff must be contiguous. singleAlloc[0] = null; trace.logPartialUncompressedData(partOffset, candidateEnd, true); allocateMultiple(singleAlloc, (int)(candidateEnd - partOffset)); MemoryBuffer buffer = singleAlloc[0]; cacheWrapper.reuseBuffer(buffer); ByteBuffer dest = buffer.getByteBufferRaw(); CacheChunk tcc = new CacheChunk(buffer, partOffset, candidateEnd); copyAndReplaceUncompressedChunks(candidateCached, dest, tcc, false); return tcc; }
private void releaseInitialRefcounts(DiskRangeList current) { while (current != null) { DiskRangeList toFree = current; current = current.next; if (toFree instanceof ProcCacheChunk) { ProcCacheChunk pcc = (ProcCacheChunk)toFree; if (pcc.originalData != null) { // TODO: can this still happen? we now clean these up explicitly to avoid other issues. // This can only happen in case of failure - we read some data, but didn't decompress // it. Deallocate the buffer directly, do not decref. if (pcc.getBuffer() != null) { cacheWrapper.getAllocator().deallocate(pcc.getBuffer()); } continue; } } if (!(toFree instanceof CacheChunk)) continue; CacheChunk cc = (CacheChunk)toFree; if (cc.getBuffer() == null) continue; MemoryBuffer buffer = cc.getBuffer(); cacheWrapper.releaseBuffer(buffer); cc.setBuffer(null); } }
cacheWrapper.getFileData(fileKey, toRead.next, stripeOffset, CC_FACTORY, isAllInCache); if (LOG.isInfoEnabled()) { LOG.info("Disk ranges after cache (found everything " + isAllInCache.value + "; file " cacheWrapper.getAllocator().isDirectAlloc()); toRelease = new IdentityHashMap<>(); DiskRangeList drl = toRead.next;
cacheWrapper.getAllocator().allocateMultiple( targetBuffers, (int)(partCount == 1 ? streamLen : partSize)); long[] collisionMask = cacheWrapper.putFileData(fileKey, cacheKeys, targetBuffers, baseOffset); processCacheCollisions(collisionMask, toCache, targetBuffers, null);
long[] collisionMask = cacheWrapper.putFileData( fileKey, cacheKeys, targetBuffers, baseOffset, tag); processCacheCollisions(collisionMask, toCache, targetBuffers, null);
boolean doTrace) { MemoryBuffer futureAlloc = cacheWrapper.getDataBufferFactory().create();
long[] result = cacheWrapper.putFileData(fileKey, cacheKeys, null, baseOffset, tag); assert result == null; // We don't expect conflicts from bad estimates. cacheWrapper.reuseBuffer(chunk.getBuffer()); } finally { chunk.originalData = null; csd.getCacheBuffers().remove(chunk.getBuffer()); try { cacheWrapper.getAllocator().deallocate(chunk.getBuffer()); } catch (Throwable t) { LOG.error("Ignoring the cleanup error after another error", t); long[] collisionMask = cacheWrapper.putFileData( fileKey, cacheKeys, targetBuffers, baseOffset, tag); processCacheCollisions(collisionMask, toDecompress, targetBuffers, csd.getCacheBuffers());
private int determineUncompressedPartSize() { // We will break the uncompressed data in the cache in the chunks that are the size // of the prevalent ORC compression buffer (the default), or maximum allocation (since we // cannot allocate bigger chunks), whichever is less. long orcCbSizeDefault = ((Number)OrcConf.BUFFER_SIZE.getDefaultValue()).longValue(); int maxAllocSize = cacheWrapper.getAllocator().getMaxAllocation(); return (int)Math.min(maxAllocSize, orcCbSizeDefault); }
private void releaseInitialRefcount(CacheChunk cc, boolean isBacktracking) { // This is the last RG for which this buffer will be used. Remove the initial refcount if (isTracingEnabled) { LOG.trace("Unlocking " + cc.getBuffer() + " for the fetching thread" + (isBacktracking ? "; backtracking" : "")); } cacheWrapper.releaseBuffer(cc.getBuffer()); cc.setBuffer(null); }
LOG.trace("Locking " + cc.getBuffer() + " due to reuse"); cacheWrapper.reuseBuffer(cc.getBuffer()); columnStreamData.getCacheBuffers().add(cc.getBuffer()); currentOffset = cc.getEnd();
private static CacheChunk copyAndReplaceCandidateToNonCached( UncompressedCacheChunk candidateCached, long partOffset, long candidateEnd, DataCache cacheWrapper, MemoryBuffer[] singleAlloc) { // We thought we had the entire part to cache, but we don't; convert start to // non-cached. Since we are at the first gap, the previous stuff must be contiguous. singleAlloc[0] = null; cacheWrapper.getAllocator().allocateMultiple(singleAlloc, (int)(candidateEnd - partOffset)); MemoryBuffer buffer = singleAlloc[0]; cacheWrapper.reuseBuffer(buffer); ByteBuffer dest = buffer.getByteBufferRaw(); CacheChunk tcc = POOLS.tccPool.take(); tcc.init(buffer, partOffset, candidateEnd); copyAndReplaceUncompressedChunks(candidateCached, dest, tcc); return tcc; }
@Override public void handleCacheCollision(DataCache cacheWrapper, MemoryBuffer replacementBuffer, List<MemoryBuffer> cacheBuffers) { assert cacheBuffers == null; // This is done at pre-read stage where there's nothing special w/refcounts. Just release. cacheWrapper.getAllocator().deallocate(getBuffer()); // Replace the buffer in our big range list, as well as in current results. this.setBuffer(replacementBuffer); }