public void closeFile() throws KettleException { try { if ( data.s3ObjectInputStream != null ) { data.s3ObjectInputStream.close(); } } catch ( IOException e ) { throw new KettleException( "Unable to close file channel for file '" + data.filenames[data.filenr - 1], e ); } } }
fos.write(read_buf, 0, read_len); s3is.close(); fos.close(); } catch (AmazonServiceException e) {
private ObjectMetadata downloadTo0(final OutputStream output, RequestMetricCollector requestMetricCollector) { GetObjectRequest req = new GetObjectRequest(getBucketName(), getKey()) .withRequestMetricCollector(requestMetricCollector); S3Object s3Object = getAmazonS3Client().getObject(req); S3ObjectInputStream objectContent = s3Object.getObjectContent(); try { byte[] buffer = new byte[1024 * 10]; int bytesRead = -1; while ((bytesRead = objectContent.read(buffer)) > -1) { output.write(buffer, 0, bytesRead); } } catch (IOException ioe) { objectContent.abort(); throw new SdkClientException("Unable to transfer content from Amazon S3 to the output stream", ioe); } finally { try { objectContent.close(); } catch (IOException ioe) {} } return s3Object.getObjectMetadata(); }
} finally { try { if( s != null ) s.close(); } catch( IOException e ) {}
data.s3ObjectInputStream.close();
/** * Releases any underlying system resources. If the resources are already * released then invoking this method has no effect. * * @throws IOException if an I/O error occurs */ @Override public void close() throws IOException { if (getObjectContent() != null) { getObjectContent().close(); } }
/** * To allow customers to override abort to just close. We can think about exposing this method * as protected to allow customers to completely prevent the abort behavior if there is a need */ private void doAbort() { try { close(); } catch (final IOException e) { // expected from some implementations because the stream is closed LogFactory.getLog(getClass()).debug("FYI", e); } }
s3Object.getObjectContent().close(); } catch (final Exception e) { log.debug("Caught exception. Ignoring.");
@Override public void close() throws IOException { if (lastOpenStream != null) { lastOpenStream.close(); } lastOpenStream = null; } }
} finally { try { objectContent.close(); } catch (final IOException ioe) { LOGGER.warn("could not close the object content", ioe);
@Override public void close() throws IOException { wrapped.close(); }
S3ObjectInputStream s3InputStream = exchange.getIn().getBody(S3ObjectInputStream.class); s3InputStream.close();
/** * Releases any underlying system resources. If the resources are already * released then invoking this method has no effect. * * @throws IOException if an I/O error occurs */ @Override public void close() throws IOException { if (getObjectContent() != null) { getObjectContent().close(); } }
private void stageFile(S3Object file, Reference ref, File directory) throws IOException { S3ObjectInputStream inStream = null; FileOutputStream outStream = null; try { inStream = file.getObjectContent(); outStream = new FileOutputStream(new File(directory, new File( stripProtocol(ref.getDataStoreReference(), false)).getName())); IOUtils.copy(inStream, outStream); } finally { try { inStream.close(); } catch (Exception ignored) {} try { outStream.close(); } catch (Exception ignored) {} } }
private void stageFile(S3Object file, Reference ref, File directory) throws IOException { S3ObjectInputStream inStream = null; FileOutputStream outStream = null; try { inStream = file.getObjectContent(); outStream = new FileOutputStream(new File(directory, new File( stripProtocol(ref.getDataStoreReference(), false)).getName())); IOUtils.copy(inStream, outStream); } finally { try { inStream.close(); } catch (Exception ignored) {} try { outStream.close(); } catch (Exception ignored) {} } }
/** * To allow customers to override abort to just close. We can think about exposing this method * as protected to allow customers to completely prevent the abort behavior if there is a need */ private void doAbort() { try { close(); } catch (final IOException e) { // expected from some implementations because the stream is closed LogFactory.getLog(getClass()).debug("FYI", e); } }
@Override public void read(String source, OutputStream outputStream) throws IOException { String[] bucketKey = splitPathToBucketAndKey(source, true); S3Object s3Object = this.amazonS3.getObject(bucketKey[0], bucketKey[1]); S3ObjectInputStream objectContent = s3Object.getObjectContent(); try { StreamUtils.copy(objectContent, outputStream); } finally { objectContent.close(); } }
@Override public synchronized void close() throws IOException { super.close(); closed = true; if (wrappedStream != null) { if (contentLength - pos <= CLOSE_THRESHOLD) { // Close, rather than abort, so that the http connection can be reused. wrappedStream.close(); } else { // Abort, rather than just close, the underlying stream. Otherwise, the // remaining object payload is read from S3 while closing the stream. wrappedStream.abort(); } } }
@Override public void read(String source, OutputStream outputStream) throws IOException { String[] bucketKey = splitPathToBucketAndKey(source, true); S3Object s3Object = this.amazonS3.getObject(bucketKey[0], bucketKey[1]); S3ObjectInputStream objectContent = s3Object.getObjectContent(); try { StreamUtils.copy(objectContent, outputStream); } finally { objectContent.close(); } }
private ObjectMetadata downloadTo0(final OutputStream output, RequestMetricCollector requestMetricCollector) { GetObjectRequest req = new GetObjectRequest(getBucketName(), getKey()) .withRequestMetricCollector(requestMetricCollector); S3Object s3Object = getAmazonS3Client().getObject(req); S3ObjectInputStream objectContent = s3Object.getObjectContent(); try { byte[] buffer = new byte[1024 * 10]; int bytesRead = -1; while ((bytesRead = objectContent.read(buffer)) > -1) { output.write(buffer, 0, bytesRead); } } catch (IOException ioe) { objectContent.abort(); throw new SdkClientException("Unable to transfer content from Amazon S3 to the output stream", ioe); } finally { try { objectContent.close(); } catch (IOException ioe) {} } return s3Object.getObjectMetadata(); }