public static Bucket[] makeBuckets(BucketFactory bf, int count, int size) throws IOException { Bucket[] ret = new Bucket[count]; for (int i = 0; i < count; i++) { ret[i] = bf.makeBucket(size); } return ret; }
RandomAccessBucket createBucket(BucketFactory bf, long length, FCPServer server) throws IOException, PersistenceDisabledException { return bf.makeBucket(length); }
@Override public void readFrom(InputStream is, BucketFactory bf, FCPServer server) throws IOException, MessageInvalidException { if(initialMetadataLength == 0) return; Bucket data; data = bf.makeBucket(initialMetadataLength); BucketTools.copyFrom(data, is, initialMetadataLength); // No need for synchronization here. initialMetadata = data; }
public static RandomAccessBucket makeImmutableBucket(BucketFactory bucketFactory, byte[] data, int offset, int length) throws IOException { RandomAccessBucket bucket = bucketFactory.makeBucket(length); OutputStream os = bucket.getOutputStreamUnbuffered(); try { os.write(data, offset, length); } finally { os.close(); } bucket.setReadOnly(); return bucket; }
public InputStream checksumReaderWithLength(InputStream dis, BucketFactory bf, long maxLength) throws IOException, ChecksumFailedException { // IMHO it is better to implement this with copying, because then we don't start // constructing objects from bad data... long length = new DataInputStream(dis).readLong(); if(length < 0 || length > maxLength) throw new IOException("Bad length"); final Bucket bucket = bf.makeBucket(-1); OutputStream os = bucket.getOutputStream(); copyAndStripChecksum(dis, os, length); os.close(); return ReadBucketAndFreeInputStream.create(bucket); }
/** Create a stream which writes to temporary space and then on a non-aborted close() will * write the length (minus the offset) followed by the data. */ public static PrependLengthOutputStream create(OutputStream out, BucketFactory bf, int offset, boolean closeUnderlying) throws IOException { Bucket temp = bf.makeBucket(-1); OutputStream os = temp.getOutputStream(); return new PrependLengthOutputStream(os, temp, out, offset, closeUnderlying); }
private DataOutputStream getOutputStream() throws IOException, BinaryBlobAlreadyClosedException { if (_finalized) { throw new BinaryBlobAlreadyClosedException("Already finalized (getting final data) on "+this); } if (_stream_cache==null) { if (_isSingleBucket) { _stream_cache = new DataOutputStream(_out.getOutputStream()); } else { Bucket newBucket = _bf.makeBucket(-1); _buckets.add(newBucket); _stream_cache = new DataOutputStream(newBucket.getOutputStream()); } } if (!_started) { BinaryBlob.writeBinaryBlobHeader(_stream_cache); _started = true; } return _stream_cache; }
public static Bucket makeRandomBucket(long size) throws IOException { Bucket b = bf.makeBucket(size); BucketTools.fill(b, random, size); return b; }
/** * This one could actually be rather large, since it includes the listing of * which blocks go in which cross-segments ... */ private Bucket encodeCrossSegmentSettings(BucketFactory bf) throws IOException { if (crossSegments == null) return new NullBucket(); Bucket bucket = bf.makeBucket(-1); OutputStream os = bucket.getOutputStream(); OutputStream cos = checker.checksumWriterWithLength(os, new ArrayBucketFactory()); DataOutputStream dos = new DataOutputStream(cos); for (SplitFileInserterCrossSegmentStorage segment : crossSegments) { segment.writeFixedSettings(dos); } dos.close(); os.close(); return bucket; }
@Override public Bucket compress(Bucket data, BucketFactory bf, long maxReadLength, long maxWriteLength) throws IOException, CompressionOutputSizeException { Bucket output = bf.makeBucket(maxWriteLength); InputStream is = null; OutputStream os = null; try { is = data.getInputStream(); os = output.getOutputStream(); compress(is, os, maxReadLength, maxWriteLength); // It is essential that the close()'s throw if there is any problem. is.close(); is = null; os.close(); os = null; } finally { Closer.close(is); Closer.close(os); } return output; }
@Override public Bucket compress(Bucket data, BucketFactory bf, long maxReadLength, long maxWriteLength) throws IOException, CompressionOutputSizeException { Bucket output = bf.makeBucket(maxWriteLength); InputStream is = null; OutputStream os = null; try { is = data.getInputStream(); os = output.getOutputStream(); compress(is, os, maxReadLength, maxWriteLength); // It is essential that the close()'s throw if there is any problem. is.close(); is = null; os.close(); os = null; } finally { Closer.close(is); Closer.close(os); } return output; }
public RandomAccessBucket toBucket(BucketFactory bf) throws MetadataUnresolvedException, IOException { RandomAccessBucket b = bf.makeBucket(-1); DataOutputStream dos = null; boolean success = false; try { dos = new DataOutputStream(b.getOutputStream()); writeTo(dos); dos.close(); dos = null; b.setReadOnly(); // Must be after dos.close() success = true; return b; } finally { Closer.close(dos); if(!success) b.free(); } }
public static RandomAccessBucket toRandomAccessBucket(Bucket bucket, BucketFactory bf) throws IOException { if(bucket instanceof RandomAccessBucket) return (RandomAccessBucket)bucket; if(bucket instanceof DelayedFreeBucket) { RandomAccessBucket ret = ((DelayedFreeBucket)bucket).toRandomAccessBucket(); if(ret != null) return ret; } RandomAccessBucket ret = bf.makeBucket(bucket.size()); BucketTools.copy(bucket, ret); bucket.free(); return ret; }
@Override public Bucket compress(Bucket data, BucketFactory bf, long maxReadLength, long maxWriteLength) throws IOException, CompressionOutputSizeException { Bucket output; InputStream is = null; OutputStream os = null; try { output = bf.makeBucket(maxWriteLength); is = data.getInputStream(); os = output.getOutputStream(); if(logMINOR) Logger.minor(this, "Compressing "+data+" size "+data.size()+" to new bucket "+output); compress(is, os, maxReadLength, maxWriteLength); // It is essential that the close()'s throw if there is any problem. is.close(); is = null; os.close(); os = null; } finally { Closer.close(is); Closer.close(os); } return output; }
@Override public Bucket compress(Bucket data, BucketFactory bf, long maxReadLength, long maxWriteLength) throws IOException, CompressionOutputSizeException { Bucket output; InputStream is = null; OutputStream os = null; try { output = bf.makeBucket(maxWriteLength); is = data.getInputStream(); os = output.getOutputStream(); if(logMINOR) Logger.minor(this, "Compressing "+data+" size "+data.size()+" to new bucket "+output); compress(is, os, maxReadLength, maxWriteLength); // It is essential that the close()'s throw if there is any problem. is.close(); is = null; os.close(); os = null; } finally { Closer.close(is); Closer.close(os); } return output; }
private ClientGetter makeGetter(NodeClientCore core, Bucket ret) throws IOException { if (binaryBlob && ret == null) { ret = core.clientContext.getBucketFactory(persistence == Persistence.FOREVER).makeBucket(fctx.maxOutputLength); } return new ClientGetter(this, uri, fctx, priorityClass, binaryBlob ? new NullBucket() : ret, binaryBlob ? new BinaryBlobWriter(ret) : null, false, initialMetadata, extensionCheck); }
public void verifyOutput(SplitFileFetcherStorage storage) throws IOException { StreamGenerator g = storage.streamGenerator(); Bucket out = bf.makeBucket(-1); OutputStream os = out.getOutputStream(); g.writeTo(os, null); os.close(); assertTrue(BucketTools.equalBuckets(originalData, out)); out.free(); }
private void verifyOutput(SplitFileFetcherStorage storage, Bucket originalData) throws IOException { StreamGenerator g = storage.streamGenerator(); Bucket out = smallBucketFactory.makeBucket(-1); OutputStream os = out.getOutputStream(); g.writeTo(os, null); os.close(); assertTrue(BucketTools.equalBuckets(originalData, out)); out.free(); }
public Bucket decompress(Bucket data, BucketFactory bf, long maxLength, long maxCheckSizeLength, Bucket preferred) throws IOException, CompressionOutputSizeException { Bucket output; if(preferred != null) output = preferred; else output = bf.makeBucket(maxLength); if(logMINOR) Logger.minor(this, "Decompressing "+data+" size "+data.size()+" to new bucket "+output); CountedInputStream is = null; OutputStream os = null; try { is = new CountedInputStream(data.getInputStream()); os = output.getOutputStream(); decompress(is, os, maxLength, maxCheckSizeLength); if(logMINOR) Logger.minor(this, "Output: "+output+" size "+output.size()+" read "+is.count()); // It is essential that the close()'s throw if there is any problem. is.close(); is = null; os.close(); os = null; } finally { Closer.close(is); Closer.close(os); } return output; }
public Bucket decompress(Bucket data, BucketFactory bf, long maxLength, long maxCheckSizeLength, Bucket preferred) throws IOException, CompressionOutputSizeException { Bucket output; if(preferred != null) output = preferred; else output = bf.makeBucket(maxLength); if(logMINOR) Logger.minor(this, "Decompressing "+data+" size "+data.size()+" to new bucket "+output); CountedInputStream is = null; OutputStream os = null; try { is = new CountedInputStream(data.getInputStream()); os = output.getOutputStream(); decompress(is, os, maxLength, maxCheckSizeLength); if(logMINOR) Logger.minor(this, "Output: "+output+" size "+output.size()+" read "+is.count()); // It is essential that the close()'s throw if there is any problem. is.close(); is = null; os.close(); os = null; } finally { Closer.close(os); Closer.close(is); } return output; }