@Override public void close() throws IOException { in.close(); data.free(); }
/** * Parse a bucket of data into a Metadata structure. * @throws MetadataParseException If the parsing failed because of invalid metadata. * @throws IOException If we could not read the metadata from the bucket. */ public static Metadata construct(Bucket data) throws MetadataParseException, IOException { InputStream is = data.getInputStream(); Metadata m; try { DataInputStream dis = new DataInputStream(is); m = new Metadata(dis, data.size()); } finally { is.close(); } return m; }
@Override public OutputStream getOutputStream() throws IOException { return proxy.getOutputStream(); }
ReaderBucketInputStream(boolean buffer) throws IOException { is = buffer ? bucket.getInputStream() : bucket.getInputStreamUnbuffered(); }
public static boolean equalBuckets(Bucket a, Bucket b) throws IOException { if(a.size() != b.size()) return false; long size = a.size(); InputStream aIn = null, bIn = null; try { aIn = a.getInputStreamUnbuffered(); bIn = b.getInputStreamUnbuffered(); return FileUtil.equalStreams(aIn, bIn, size); } finally { aIn.close(); bIn.close(); } }
@Override public Bucket compress(Bucket data, BucketFactory bf, long maxReadLength, long maxWriteLength) throws IOException, CompressionOutputSizeException { Bucket output; InputStream is = null; OutputStream os = null; try { output = bf.makeBucket(maxWriteLength); is = data.getInputStream(); os = output.getOutputStream(); if(logMINOR) Logger.minor(this, "Compressing "+data+" size "+data.size()+" to new bucket "+output); compress(is, os, maxReadLength, maxWriteLength); // It is essential that the close()'s throw if there is any problem. is.close(); is = null; os.close(); os = null; } finally { Closer.close(is); Closer.close(os); } return output; }
private FilterStatus applyFilter(Bucket input, Bucket output, String mimeType, FilterOperation operation, NodeClientCore core) throws UnsafeContentTypeException, IOException { InputStream inputStream = null; OutputStream outputStream = null; try { inputStream = input.getInputStream(); outputStream = output.getOutputStream(); return applyFilter(inputStream, outputStream, mimeType, operation, core); } finally { Closer.close(inputStream); Closer.close(outputStream); } }
@Override public InputStream getInputStream() throws IOException { return proxy.getInputStream(); }
public void verifyOutput(SplitFileFetcherStorage storage) throws IOException { StreamGenerator g = storage.streamGenerator(); Bucket out = bf.makeBucket(-1); OutputStream os = out.getOutputStream(); g.writeTo(os, null); os.close(); assertTrue(BucketTools.equalBuckets(originalData, out)); out.free(); }
@Override public void writeTo(OutputStream os, ClientContext context) throws IOException { try{ if(logMINOR) Logger.minor(this, "Generating Stream", new Exception("debug")); InputStream data = bucket.getInputStream(); try { FileUtil.copy(data, os, -1); } finally { data.close(); } os.close(); bucket.free(); if(logMINOR) Logger.minor(this, "Stream completely generated", new Exception("debug")); } finally { Closer.close(bucket); Closer.close(os); } }
return buckets; long length = origData.size(); if(length > ((long)Integer.MAX_VALUE) * splitSize) throw new IllegalArgumentException("Way too big!: "+length+" for "+splitSize); Logger.minor(BucketTools.class, "Splitting bucket "+origData+" of size "+length+" into "+bucketCount+" buckets"); Bucket[] buckets = new Bucket[bucketCount]; InputStream is = origData.getInputStreamUnbuffered(); DataInputStream dis = null; try { dis.readFully(buf, 0, len); remainingLength -= len; OutputStream os = bucket.getOutputStreamUnbuffered(); try { os.write(buf, 0, len); origData.free(); return buckets;
public FilterResultMessage(String identifier, String charset, String mimeType, boolean unsafeContentType, Bucket bucket) { this.identifier = identifier; this.charset = charset; this.mimeType = mimeType; this.unsafeContentType = unsafeContentType; if (unsafeContentType) { this.dataLength = -1; } else { this.dataLength = bucket.size(); this.bucket = bucket; } }
private BlockItem getBlockItem(BlockItemKey key, ClientContext context) throws InsertException { try { synchronized(this) { if(finished) return null; } if(persistent) { if(sourceData == null) { Logger.error(this, "getBlockItem(): sourceData = null", new Exception("error")); fail(new InsertException(InsertExceptionMode.INTERNAL_ERROR), context); return null; } } Bucket data = sourceData.createShadow(); FreenetURI u = uri; if(u.getKeyType().equals("CHK")) u = FreenetURI.EMPTY_CHK_URI; if(data == null) { data = context.tempBucketFactory.makeBucket(sourceData.size()); BucketTools.copy(sourceData, data); } CompatibilityMode cmode = ctx.getCompatibilityMode(); boolean pre1254 = !(cmode == CompatibilityMode.COMPAT_CURRENT || cmode.ordinal() >= CompatibilityMode.COMPAT_1255.ordinal()); return new BlockItem(key, data, isMetadata, compressionCodec, sourceLength, u, persistent, pre1254, cryptoAlgorithm, cryptoKey); } catch (IOException e) { throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null); } }
public freenet.support.api.ManifestElement migrate(BucketFactory bf, ClientContext context) throws ResumeFailedException, IOException { if(data == null) { if(targetURI == null) throw new ResumeFailedException("Must have either a URI or a redirect"); return new freenet.support.api.ManifestElement(name, fullName, mimeOverride, targetURI); } else { if(data.size() != dataSize) throw new ResumeFailedException("Bucket in site insert changed size from "+dataSize+" to "+data.size()); data.onResume(context); RandomAccessBucket convertedData = BucketTools.toRandomAccessBucket(data, bf); return new freenet.support.api.ManifestElement(name, fullName, convertedData, mimeOverride, dataSize); } }
/** * Copy from the input stream of <code>src</code> to the output stream of * <code>dest</code>. * * @param src * @param dst * @throws IOException */ public static void copy(Bucket src, Bucket dst) throws IOException { OutputStream out = dst.getOutputStreamUnbuffered(); InputStream in = src.getInputStreamUnbuffered(); ReadableByteChannel readChannel = Channels.newChannel(in); WritableByteChannel writeChannel = Channels.newChannel(out); try { // No benefit to allocateDirect() as we're wrapping streams anyway, and worse, it'd be a memory leak. ByteBuffer buffer = ByteBuffer.allocate(BUFFER_SIZE); while (readChannel.read(buffer) != -1) { buffer.flip(); while(buffer.hasRemaining()) writeChannel.write(buffer); buffer.clear(); } } finally { writeChannel.close(); readChannel.close(); } }
@Override public InputStream getInputStreamUnbuffered() throws IOException { return proxy.getInputStreamUnbuffered(); }
@Override public Bucket createShadow() { return proxy.createShadow(); }
@Override public String getName() { return bucket.getName(); }
@Override public Bucket compress(Bucket data, BucketFactory bf, long maxReadLength, long maxWriteLength) throws IOException, CompressionOutputSizeException { Bucket output; InputStream is = null; OutputStream os = null; try { output = bf.makeBucket(maxWriteLength); is = data.getInputStream(); os = output.getOutputStream(); if(logMINOR) Logger.minor(this, "Compressing "+data+" size "+data.size()+" to new bucket "+output); compress(is, os, maxReadLength, maxWriteLength); // It is essential that the close()'s throw if there is any problem. is.close(); is = null; os.close(); os = null; } finally { Closer.close(is); Closer.close(os); } return output; }