public static Bucket[] makeBuckets(BucketFactory bf, int count, int size) throws IOException { Bucket[] ret = new Bucket[count]; for (int i = 0; i < count; i++) { ret[i] = bf.makeBucket(size); } return ret; }
RandomAccessBucket createBucket(BucketFactory bf, long length, FCPServer server) throws IOException, PersistenceDisabledException { return bf.makeBucket(length); }
@Override public void readFrom(InputStream is, BucketFactory bf, FCPServer server) throws IOException, MessageInvalidException { if(initialMetadataLength == 0) return; Bucket data; data = bf.makeBucket(initialMetadataLength); BucketTools.copyFrom(data, is, initialMetadataLength); // No need for synchronization here. initialMetadata = data; }
public static RandomAccessBucket makeImmutableBucket(BucketFactory bucketFactory, byte[] data, int offset, int length) throws IOException { RandomAccessBucket bucket = bucketFactory.makeBucket(length); OutputStream os = bucket.getOutputStreamUnbuffered(); try { os.write(data, offset, length); } finally { os.close(); } bucket.setReadOnly(); return bucket; }
public InputStream checksumReaderWithLength(InputStream dis, BucketFactory bf, long maxLength) throws IOException, ChecksumFailedException { // IMHO it is better to implement this with copying, because then we don't start // constructing objects from bad data... long length = new DataInputStream(dis).readLong(); if(length < 0 || length > maxLength) throw new IOException("Bad length"); final Bucket bucket = bf.makeBucket(-1); OutputStream os = bucket.getOutputStream(); copyAndStripChecksum(dis, os, length); os.close(); return ReadBucketAndFreeInputStream.create(bucket); }
/** Create a stream which writes to temporary space and then on a non-aborted close() will * write the length (minus the offset) followed by the data. */ public static PrependLengthOutputStream create(OutputStream out, BucketFactory bf, int offset, boolean closeUnderlying) throws IOException { Bucket temp = bf.makeBucket(-1); OutputStream os = temp.getOutputStream(); return new PrependLengthOutputStream(os, temp, out, offset, closeUnderlying); }
private DataOutputStream getOutputStream() throws IOException, BinaryBlobAlreadyClosedException { if (_finalized) { throw new BinaryBlobAlreadyClosedException("Already finalized (getting final data) on "+this); } if (_stream_cache==null) { if (_isSingleBucket) { _stream_cache = new DataOutputStream(_out.getOutputStream()); } else { Bucket newBucket = _bf.makeBucket(-1); _buckets.add(newBucket); _stream_cache = new DataOutputStream(newBucket.getOutputStream()); } } if (!_started) { BinaryBlob.writeBinaryBlobHeader(_stream_cache); _started = true; } return _stream_cache; }
public static Bucket makeRandomBucket(long size) throws IOException { Bucket b = bf.makeBucket(size); BucketTools.fill(b, random, size); return b; }
/** * This one could actually be rather large, since it includes the listing of * which blocks go in which cross-segments ... */ private Bucket encodeCrossSegmentSettings(BucketFactory bf) throws IOException { if (crossSegments == null) return new NullBucket(); Bucket bucket = bf.makeBucket(-1); OutputStream os = bucket.getOutputStream(); OutputStream cos = checker.checksumWriterWithLength(os, new ArrayBucketFactory()); DataOutputStream dos = new DataOutputStream(cos); for (SplitFileInserterCrossSegmentStorage segment : crossSegments) { segment.writeFixedSettings(dos); } dos.close(); os.close(); return bucket; }
@Override public Bucket compress(Bucket data, BucketFactory bf, long maxReadLength, long maxWriteLength) throws IOException, CompressionOutputSizeException { Bucket output = bf.makeBucket(maxWriteLength); InputStream is = null; OutputStream os = null; try { is = data.getInputStream(); os = output.getOutputStream(); compress(is, os, maxReadLength, maxWriteLength); // It is essential that the close()'s throw if there is any problem. is.close(); is = null; os.close(); os = null; } finally { Closer.close(is); Closer.close(os); } return output; }
private void finalizeBucket(boolean mark) throws IOException, BinaryBlobAlreadyClosedException { if (_finalized) throw new BinaryBlobAlreadyClosedException("Already finalized (closing blob - 2)."); if(logMINOR) Logger.minor(this, "Finalizing binary blob "+this, new Exception("debug")); if (!_isSingleBucket) { if (!mark && (_buckets.size()==1)) { return; } Bucket out = _bf.makeBucket(-1); getSnapshot(out, mark); for (int i=0,n=_buckets.size(); i<n;i++) { _buckets.get(i).free(); } if (mark) { out.setReadOnly(); } _buckets.clear(); _buckets.add(0, out); } else if (mark){ DataOutputStream out = new DataOutputStream(getOutputStream()); try { BinaryBlob.writeEndBlob(out); } finally { out.close(); } } if (mark) { _finalized = true; } }
@Override public Bucket compress(Bucket data, BucketFactory bf, long maxReadLength, long maxWriteLength) throws IOException, CompressionOutputSizeException { Bucket output = bf.makeBucket(maxWriteLength); InputStream is = null; OutputStream os = null; try { is = data.getInputStream(); os = output.getOutputStream(); compress(is, os, maxReadLength, maxWriteLength); // It is essential that the close()'s throw if there is any problem. is.close(); is = null; os.close(); os = null; } finally { Closer.close(is); Closer.close(os); } return output; }
public RandomAccessBucket toBucket(BucketFactory bf) throws MetadataUnresolvedException, IOException { RandomAccessBucket b = bf.makeBucket(-1); DataOutputStream dos = null; boolean success = false; try { dos = new DataOutputStream(b.getOutputStream()); writeTo(dos); dos.close(); dos = null; b.setReadOnly(); // Must be after dos.close() success = true; return b; } finally { Closer.close(dos); if(!success) b.free(); } }
public static RandomAccessBucket toRandomAccessBucket(Bucket bucket, BucketFactory bf) throws IOException { if(bucket instanceof RandomAccessBucket) return (RandomAccessBucket)bucket; if(bucket instanceof DelayedFreeBucket) { RandomAccessBucket ret = ((DelayedFreeBucket)bucket).toRandomAccessBucket(); if(ret != null) return ret; } RandomAccessBucket ret = bf.makeBucket(bucket.size()); BucketTools.copy(bucket, ret); bucket.free(); return ret; }
Bucket data = ctx.getBucketFactory().makeBucket(-1); OutputStream os = data.getOutputStream(); try {
for(int i=0;i<bucketCount;i++) { int len = (int) Math.min(splitSize, remainingLength); Bucket bucket = bf.makeBucket(len); buckets[i] = bucket; dis.readFully(buf, 0, len);
filedata = this.bucketfactory.makeBucket(is.available()); bucketos = filedata.getOutputStream();
@Override public Bucket compress(Bucket data, BucketFactory bf, long maxReadLength, long maxWriteLength) throws IOException, CompressionOutputSizeException { Bucket output; InputStream is = null; OutputStream os = null; try { output = bf.makeBucket(maxWriteLength); is = data.getInputStream(); os = output.getOutputStream(); if(logMINOR) Logger.minor(this, "Compressing "+data+" size "+data.size()+" to new bucket "+output); compress(is, os, maxReadLength, maxWriteLength); // It is essential that the close()'s throw if there is any problem. is.close(); is = null; os.close(); os = null; } finally { Closer.close(is); Closer.close(os); } return output; }
Bucket b = bf.makeBucket(blockLength); MersenneTwister mt = new MersenneTwister(hash); OutputStream os = b.getOutputStreamUnbuffered();
@Override public Bucket compress(Bucket data, BucketFactory bf, long maxReadLength, long maxWriteLength) throws IOException, CompressionOutputSizeException { Bucket output; InputStream is = null; OutputStream os = null; try { output = bf.makeBucket(maxWriteLength); is = data.getInputStream(); os = output.getOutputStream(); if(logMINOR) Logger.minor(this, "Compressing "+data+" size "+data.size()+" to new bucket "+output); compress(is, os, maxReadLength, maxWriteLength); // It is essential that the close()'s throw if there is any problem. is.close(); is = null; os.close(); os = null; } finally { Closer.close(is); Closer.close(os); } return output; }