@Override public void cleanup() { long cleanAge = mConf.isSet(PropertyKey.UNDERFS_S3A_INTERMEDIATE_UPLOAD_CLEAN_AGE) ? mConf.getMs(PropertyKey.UNDERFS_S3A_INTERMEDIATE_UPLOAD_CLEAN_AGE) : FormatUtils.parseTimeSize(PropertyKey.UNDERFS_S3A_INTERMEDIATE_UPLOAD_CLEAN_AGE .getDefaultValue()); Date cleanBefore = new Date(new Date().getTime() - cleanAge); mManager.abortMultipartUploads(mBucketName, cleanBefore); }
public void deleteMultiparts(String bucketName, Date since) { transferManager.abortMultipartUploads(bucketName, since); }
/** * Abort all outstanding MPUs older than a given age. * @param seconds time in seconds * @throws IOException on any failure, other than 403 "permission denied" */ @Retries.RetryTranslated public void abortOutstandingMultipartUploads(long seconds) throws IOException { Preconditions.checkArgument(seconds >= 0); Date purgeBefore = new Date(new Date().getTime() - seconds * 1000); LOG.debug("Purging outstanding multipart uploads older than {}", purgeBefore); invoker.retry("Purging multipart uploads", bucket, true, () -> transfers.abortMultipartUploads(bucket, purgeBefore)); }
@Override public void close() { // backend is closing. abort all mulitpart uploads from start. if(s3service.doesBucketExist(bucket)) { tmx.abortMultipartUploads(bucket, startTime); } tmx.shutdownNow(); s3service.shutdown(); LOG.info("S3Backend closed."); }
@Override public void close() { // backend is closing. abort all mulitpart uploads from start. if(s3service.doesBucketExist(bucket)) { tmx.abortMultipartUploads(bucket, startTime); } tmx.shutdownNow(); s3service.shutdown(); LOG.info("S3Backend closed."); }
private void initMultipartUploads(Configuration conf) throws IOException { boolean purgeExistingMultipart = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, PURGE_EXISTING_MULTIPART, DEFAULT_PURGE_EXISTING_MULTIPART); long purgeExistingMultipartAge = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, PURGE_EXISTING_MULTIPART_AGE, DEFAULT_PURGE_EXISTING_MULTIPART_AGE); if (purgeExistingMultipart) { Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge * 1000); try { transfers.abortMultipartUploads(mBucket, purgeBefore); } catch (AmazonServiceException e) { if (e.getStatusCode() == 403) { LOG.debug("Failed to purging multipart uploads against {}," + " FS may be read only", mBucket, e); } else { throw translateException("purging multipart uploads", mBucket, e); } } } }
private void initMultipartUploads(Configuration conf) throws IOException { boolean purgeExistingMultipart = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, PURGE_EXISTING_MULTIPART, DEFAULT_PURGE_EXISTING_MULTIPART); long purgeExistingMultipartAge = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, PURGE_EXISTING_MULTIPART_AGE, DEFAULT_PURGE_EXISTING_MULTIPART_AGE); if (purgeExistingMultipart) { Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge * 1000); try { transfers.abortMultipartUploads(mBucket, purgeBefore); } catch (AmazonServiceException e) { if (e.getStatusCode() == 403) { LOG.debug("Failed to purging multipart uploads against {}," + " FS may be read only", mBucket, e); } else { throw translateException("purging multipart uploads", mBucket, e); } } } }
@Override public void close() throws DataStoreException { super.close(); // backend is closing. abort all mulitpart uploads from start. if(s3service.doesBucketExist(bucket)) { tmx.abortMultipartUploads(bucket, startTime); } tmx.shutdownNow(); s3service.shutdown(); LOG.info("S3Backend closed."); }
@Override public void close() throws DataStoreException { super.close(); // backend is closing. abort all mulitpart uploads from start. if(s3service.doesBucketExist(bucket)) { tmx.abortMultipartUploads(bucket, startTime); } tmx.shutdownNow(); s3service.shutdown(); LOG.info("S3Backend closed."); }
Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge*1000); transferManager.abortMultipartUploads(bucket, purgeBefore); transferManager.shutdownNow(false);
if (s3service.doesBucketExist(bucket)) { for (int i = 0; i < 4; i++) { tmx.abortMultipartUploads(bucket, date); ObjectListing prevObjectListing = s3service.listObjects(bucket); while (prevObjectListing != null ) {
if (s3service.doesBucketExist(bucket)) { for (int i = 0; i < 4; i++) { tmx.abortMultipartUploads(bucket, date); ObjectListing prevObjectListing = s3service.listObjects(bucket); while (prevObjectListing != null ) {
Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge*1000); transfers.abortMultipartUploads(bucket, purgeBefore);
if (s3service.doesBucketExist(bucket)) { for (int i = 0; i < 4; i++) { tmx.abortMultipartUploads(bucket, date); ObjectListing prevObjectListing = s3service.listObjects(bucket); while (prevObjectListing != null) {