@Override public InputStream getInputStream() throws IOException { synchronized(this) { if(freed) throw new IOException("Already freed"); } return bucket.getInputStream(); }
@Override public void free() { if(isFreed) return; isFreed = true; underlying.free(); }
@Override public OutputStream getOutputStream() throws IOException { synchronized(this) { if(freed) throw new IOException("Already freed"); } return bucket.getOutputStream(); }
@Override public LockableRandomAccessBuffer toRandomAccessBuffer() throws IOException { synchronized(this) { if(outputStreamOpen) throw new IOException("Must close first"); readOnly = true; } underlying.setReadOnly(); LockableRandomAccessBuffer u = underlying.toRandomAccessBuffer(); return new PaddedRandomAccessBuffer(u, size); }
public static RandomAccessBucket makeImmutableBucket(BucketFactory bucketFactory, byte[] data, int offset, int length) throws IOException { RandomAccessBucket bucket = bucketFactory.makeBucket(length); OutputStream os = bucket.getOutputStreamUnbuffered(); try { os.write(data, offset, length); } finally { os.close(); } bucket.setReadOnly(); return bucket; }
@Override public LockableRandomAccessBuffer toRandomAccessBuffer() throws IOException { if(underlying.size() < type.headerLen) throw new IOException("Converting empty bucket"); underlying.setReadOnly(); LockableRandomAccessBuffer r = underlying.toRandomAccessBuffer(); try { return new EncryptedRandomAccessBuffer(type, r, masterKey, false); } catch (GeneralSecurityException e) { Logger.error(this, "Unable to convert encrypted bucket: "+e, e); throw new IOException(e); } }
public void testIrregularWrites() throws IOException { Random r = new Random(6032405); int length = 1024*64+1; byte[] data = new byte[length]; RandomAccessBucket bucket = (RandomAccessBucket) makeBucket(length); OutputStream os = bucket.getOutputStream(); r.nextBytes(data); for(int written=0;written<length;) { int toWrite = Math.min(length - written, 4095); os.write(data, written, toWrite); written += toWrite; } os.close(); InputStream is = bucket.getInputStream(); for(int moved=0;moved<length;) { int readBytes = Math.min(length - moved, 4095); byte[] buf = new byte[readBytes]; readBytes = is.read(buf); assertTrue(readBytes > 0); assertTrue(Arrays.equals(Arrays.copyOfRange(buf, 0, readBytes), Arrays.copyOfRange(data, moved, moved+readBytes))); moved += readBytes; } is.close(); bucket.free(); }
@Override public void tryCompress(final ClientContext context) throws InsertException { long origSize = origData.size(); long origNumberOfBlocks = origSize/CHKBlock.DATA_LENGTH; COMPRESSOR_TYPE bestCodec = null; MultiHashInputStream hasher = null; try { is = origData.getInputStream(); result = bucketFactory.makeBucket(-1); os = result.getOutputStream(); if(first && generateHashes != 0) { if(logMINOR) Logger.minor(this, "Generating hashes: "+generateHashes); long resultSize = result.size(); long resultNumberOfBlocks = resultSize/CHKBlock.DATA_LENGTH; bestCompressedData.free(); bestCompressedData = result; bestCompressedDataSize = resultSize; Logger.minor(this, "New size "+resultSize+" ("+resultNumberOfBlocks+" blocks) better than old best "+bestCompressedDataSize+ " ("+bestNumberOfBlocks+" blocks)"); if(bestCompressedData != null && bestCompressedData != origData) bestCompressedData.free(); bestCompressedData = result; bestCompressedDataSize = resultSize; shouldFreeOnFinally = true; if(bestCompressedData != null && bestCompressedData != origData && bestCompressedData != result) bestCompressedData.free();
byte[] data = new byte[length]; RandomAccessBucket bucket = (RandomAccessBucket) makeBucket(length); OutputStream os = bucket.getOutputStream(); r.nextBytes(data); for(int written=0;written<length;) { InputStream is = bucket.getInputStream(); for(int moved=0;moved<length;) { int readBytes = Math.min(length - moved, 4095); moved += readBytes; LockableRandomAccessBuffer raf = bucket.toRandomAccessBuffer(); assertEquals(length, raf.size()); RAFBucket wrapped = new RAFBucket(raf);
public long getDataSize() { if(data == null) return finishedSize; else { return data.size(); } }
public RandomAccessBucket toBucket(BucketFactory bf) throws MetadataUnresolvedException, IOException { RandomAccessBucket b = bf.makeBucket(-1); DataOutputStream dos = null; boolean success = false; try { dos = new DataOutputStream(b.getOutputStream()); writeTo(dos); dos.close(); dos = null; b.setReadOnly(); // Must be after dos.close() success = true; return b; } finally { Closer.close(dos); if(!success) b.free(); } }
void onCompressedInner(CompressionOutput output, ClientContext context) throws InsertException { HashResult[] hashes = output.hashes; long origSize = block.getData().size(); byte[] hashThisLayerOnly = null; if(hashes != null && metadata) { long bestCompressedDataSize = bestCompressedData.size(); RandomAccessBucket data = bestCompressedData; COMPRESSOR_TYPE bestCodec = output.bestCodec; shouldFreeData = true; // must be freed regardless of whether the original data was to be freed if(freeData) { block.getData().free(); short codecID = bestCodec == null ? -1 : bestCodec.metadataID; ctx.eventProducer.produceEvent(new FinishedCompressionEvent(codecID, origSize, bestCompressedDataSize), context); if(logMINOR) Logger.minor(this, "Compressed "+origSize+" to "+data.size()+" on "+this+" data = "+data); long compressedDataSize = data.size(); boolean fitsInOneBlockAsIs = bestCodec == null ? compressedDataSize <= blockSize : compressedDataSize <= oneBlockCompressedSize; boolean fitsInOneCHK = bestCodec == null ? compressedDataSize <= CHKBlock.DATA_LENGTH : compressedDataSize <= CHKBlock.MAX_COMPRESSED_DATA_LENGTH; dataRAF = data.toRandomAccessBuffer(); } catch (IOException e) { throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
bucketos = filedata.getOutputStream(); Logger.minor(this, "Name = " + name + " length = " + filedata.size() + " filename = " + filename); if(filename != null) uploadedFiles.put(name, new HTTPUploadedFileImpl(filename, contentType, filedata));
private RandomAccessBucket fixNotPersistent(RandomAccessBucket data, ClientContext context) throws InsertException { boolean skip = false; try { if(!skip) { if(logMINOR) Logger.minor(this, "Copying data from "+data+" length "+data.size()); RandomAccessBucket newData = context.persistentBucketFactory.makeBucket(data.size()); BucketTools.copy(data, newData); data.free(); data = newData; } } catch (IOException e) { Logger.error(this, "Caught "+e+" while copying non-persistent data", e); throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null); } // Note that SegmentedBCB *does* support splitting, so we don't need to do anything to the data // if it doesn't fit in a single block. return data; }
@Override public LockableRandomAccessBuffer toRandomAccessBuffer() throws IOException { synchronized(this) { if(freed) throw new IOException("Already freed"); } setReadOnly(); return new DelayedFreeRandomAccessBuffer(bucket.toRandomAccessBuffer(), factory); }
TempBucketInputStream(short idx) throws IOException { this.idx = idx; this.currentIS = currentBucket.getInputStreamUnbuffered(); }
TempBucketOutputStream(short idx) throws IOException { if(os == null) os = currentBucket.getOutputStreamUnbuffered(); }
@Override public RandomAccessBucket createShadow() { return currentBucket.createShadow(); }
@Override public synchronized String getName() { return currentBucket.getName(); }
public void testIrregularWritesNotOverlapping() throws IOException { Random r = new Random(6032405); int length = 1024*64+1; byte[] data = new byte[length]; RandomAccessBucket bucket = (RandomAccessBucket) makeBucket(length); OutputStream os = bucket.getOutputStream(); r.nextBytes(data); for(int written=0;written<length;) { int toWrite = Math.min(length - written, 4095); os.write(data, written, toWrite); written += toWrite; } os.close(); InputStream is = bucket.getInputStream(); for(int moved=0;moved<length;) { int readBytes = Math.min(length - moved, 4093); // Co-prime with 4095 byte[] buf = new byte[readBytes]; readBytes = is.read(buf); assertTrue(readBytes > 0); assertTrue(Arrays.equals(Arrays.copyOfRange(buf, 0, readBytes), Arrays.copyOfRange(data, moved, moved+readBytes))); moved += readBytes; } is.close(); bucket.free(); }