@Override public void free() { if(isFreed) return; isFreed = true; underlying.free(); }
public void free(){ synchronized (this) { if(isFreed) return; isFreed = true; if(data == null) return; } data.free(); }
public void freeData() { if(data != null) { data.free(); } }
@Override public void free() { underlying.free(); }
@Override public void realFree() { bucket.free(); }
public RandomAccessBucket toBucket(BucketFactory bf) throws MetadataUnresolvedException, IOException { RandomAccessBucket b = bf.makeBucket(-1); DataOutputStream dos = null; boolean success = false; try { dos = new DataOutputStream(b.getOutputStream()); writeTo(dos); dos.close(); dos = null; b.setReadOnly(); // Must be after dos.close() success = true; return b; } finally { Closer.close(dos); if(!success) b.free(); } }
@Override public synchronized void free() { Bucket cur; synchronized(this) { if(hasBeenFreed) return; hasBeenFreed = true; Closer.close(os); closeInputStreams(true); if(isRAMBucket()) { // If it's in memory we must free before removing from the queue. currentBucket.free(); _hasFreed(currentSize); synchronized(ramBucketQueue) { ramBucketQueue.remove(getReference()); } return; } else { // Better to free outside the lock if it's not in-memory. cur = currentBucket; } } cur.free(); }
data.free();
data.free();
data.free();
public void testIrregularWrites() throws IOException { Random r = new Random(6032405); int length = 1024*64+1; byte[] data = new byte[length]; RandomAccessBucket bucket = (RandomAccessBucket) makeBucket(length); OutputStream os = bucket.getOutputStream(); r.nextBytes(data); for(int written=0;written<length;) { int toWrite = Math.min(length - written, 4095); os.write(data, written, toWrite); written += toWrite; } os.close(); InputStream is = bucket.getInputStream(); for(int moved=0;moved<length;) { int readBytes = Math.min(length - moved, 4095); byte[] buf = new byte[readBytes]; readBytes = is.read(buf); assertTrue(readBytes > 0); assertTrue(Arrays.equals(Arrays.copyOfRange(buf, 0, readBytes), Arrays.copyOfRange(data, moved, moved+readBytes))); moved += readBytes; } is.close(); bucket.free(); }
public void testIrregularWritesNotOverlapping() throws IOException { Random r = new Random(6032405); int length = 1024*64+1; byte[] data = new byte[length]; RandomAccessBucket bucket = (RandomAccessBucket) makeBucket(length); OutputStream os = bucket.getOutputStream(); r.nextBytes(data); for(int written=0;written<length;) { int toWrite = Math.min(length - written, 4095); os.write(data, written, toWrite); written += toWrite; } os.close(); InputStream is = bucket.getInputStream(); for(int moved=0;moved<length;) { int readBytes = Math.min(length - moved, 4093); // Co-prime with 4095 byte[] buf = new byte[readBytes]; readBytes = is.read(buf); assertTrue(readBytes > 0); assertTrue(Arrays.equals(Arrays.copyOfRange(buf, 0, readBytes), Arrays.copyOfRange(data, moved, moved+readBytes))); moved += readBytes; } is.close(); bucket.free(); }
private RandomAccessBucket fixNotPersistent(RandomAccessBucket data, ClientContext context) throws InsertException { boolean skip = false; try { if(!skip) { if(logMINOR) Logger.minor(this, "Copying data from "+data+" length "+data.size()); RandomAccessBucket newData = context.persistentBucketFactory.makeBucket(data.size()); BucketTools.copy(data, newData); data.free(); data = newData; } } catch (IOException e) { Logger.error(this, "Caught "+e+" while copying non-persistent data", e); throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null); } // Note that SegmentedBCB *does* support splitting, so we don't need to do anything to the data // if it doesn't fit in a single block. return data; }
if(bestCompressedData != null && bestCompressedData != origData) bestCompressedData.free(); bestCompressedData = result; bestCompressedDataSize = resultSize; Logger.minor(this, "New size "+resultSize+" ("+resultNumberOfBlocks+" blocks) better than old best "+bestCompressedDataSize+ " ("+bestNumberOfBlocks+" blocks)"); if(bestCompressedData != null && bestCompressedData != origData) bestCompressedData.free(); bestCompressedData = result; bestCompressedDataSize = resultSize; shouldFreeOnFinally = true; if(bestCompressedData != null && bestCompressedData != origData && bestCompressedData != result) bestCompressedData.free(); } finally { if(shouldFreeOnFinally && (result != null) && result != origData) result.free(); Logger.error(this, "Database disabled compressing data", new Exception("error")); if(bestCompressedData != null && bestCompressedData != origData) bestCompressedData.free(); } catch (InvalidCompressionCodecException e) { fail(new InsertException(InsertExceptionMode.INTERNAL_ERROR, e, null), context, bestCompressedData);
bucket.free(); return; } else if (request.isPartSet("key")) {
shouldFreeData = true; // must be freed regardless of whether the original data was to be freed if(freeData) { block.getData().free();