private void prepareUploadPart(String container, String key, String uploadId, int part, Payload payload, long offset, long size, SortedMap<Integer, String> etags) { Payload chunkedPart = slicer.slice(payload, offset, size); String eTag = null; try { eTag = client.uploadPart(container, key, part, uploadId, chunkedPart); etags.put(Integer.valueOf(part), eTag); } catch (KeyNotFoundException e) { // note that because of eventual consistency, the upload id may not be // present yet we may wish to add this condition to the retry handler // we may also choose to implement ListParts and wait for the uploadId // to become available there. eTag = client.uploadPart(container, key, part, uploadId, chunkedPart); etags.put(Integer.valueOf(part), eTag); } } }
int partCount = algorithm.getParts(); if (partCount > 0) { String uploadId = client.initiateMultipartUpload(container, ObjectMetadataBuilder.create().key(key).build()); try { SortedMap<Integer, String> etags = Maps.newTreeMap(); etags); return client.completeMultipartUpload(container, key, uploadId, etags); } catch (RuntimeException ex) { client.abortMultipartUpload(container, key, uploadId); throw ex; return client.putObject(container, blobToObject.apply(blob));
private String putBlobWithReducedRedundancy(String container, Blob blob) { AWSS3PutObjectOptions options = new AWSS3PutObjectOptions(); options.storageClass(ObjectMetadata.StorageClass.REDUCED_REDUNDANCY); return getContext().unwrapApi(AWSS3Client.class).putObject(container, blob2Object.apply(blob), options); }
try { String key = "constitution.txt"; String uploadId = getApi().initiateMultipartUpload(containerName, ObjectMetadataBuilder.create().key(key).contentMD5(oneHundredOneConstitutionsMD5).build()); byte[] buffer = toByteArray(oneHundredOneConstitutions); eTagOf1 = getApi().uploadPart(containerName, key, 1, uploadId, part1); } catch (KeyNotFoundException e) { eTagOf1 = getApi().uploadPart(containerName, key, 1, uploadId, part1); String eTag = getApi().completeMultipartUpload(containerName, key, uploadId, ImmutableMap.of(1, eTagOf1)); object = getApi().getObject(containerName, key); assertEquals(toByteArray(object.getPayload()), buffer); assertEquals(getApi().headObject(containerName, key).getContentMetadata().getContentMD5(), null);
int effectiveParts = remaining > 0 ? parts + 1 : parts; try { uploadId = client.initiateMultipartUpload(container, errors.get(), key, container, uploadId)); String eTag = client.completeMultipartUpload(container, key, uploadId, etags); logger.debug(String.format("multipart upload of %s to container %s with uploadId %s" + " successfully finished with %s retries", key, container, uploadId, errors.get())); client.abortMultipartUpload(container, key, uploadId);
public void testPutWithStorageClass() throws Exception { String containerName = getContainerName(); AWSS3Client s3Client = getApi(); try { for (StorageClass storageClass : StorageClass.values()) { if (storageClass == StorageClass.GLACIER) { // AWS does not allow creation of Glacier objects continue; } String blobName = "test-" + storageClass; BlobStore blobStore = view.getBlobStore(); blobStore.createContainerInLocation(null, containerName); S3Object object = s3Client.newS3Object(); object.getMetadata().setKey(blobName); object.setPayload("something"); s3Client.putObject(containerName, object, storageClass(storageClass)); ListBucketResponse response = s3Client.listBucket(containerName, withPrefix(blobName)); ObjectMetadata metadata = response.iterator().next(); assertThat(metadata.getStorageClass()).isEqualTo(storageClass); } } finally { returnContainer(containerName); } }
int partCount = algorithm.getParts(); if (partCount > 0) { String uploadId = client.initiateMultipartUpload(container, ObjectMetadataBuilder.create().key(key).build()); try { SortedMap<Integer, String> etags = Maps.newTreeMap(); etags); return client.completeMultipartUpload(container, key, uploadId, etags); } catch (RuntimeException ex) { client.abortMultipartUpload(container, key, uploadId); throw ex; return client.putObject(container, blobToObject.apply(blob));
private String putBlobWithReducedRedundancy(String container, Blob blob) { AWSS3PutObjectOptions options = new AWSS3PutObjectOptions(); options.storageClass(ObjectMetadata.StorageClass.REDUCED_REDUNDANCY); return getContext().unwrapApi(AWSS3Client.class).putObject(container, blob2Object.apply(blob), options); }
int effectiveParts = remaining > 0 ? parts + 1 : parts; try { uploadId = client.initiateMultipartUpload(container, errors.get(), key, container, uploadId)); String eTag = client.completeMultipartUpload(container, key, uploadId, etags); logger.debug(String.format("multipart upload of %s to container %s with uploadId %s" + " successfully finished with %s retries", key, container, uploadId, errors.get())); client.abortMultipartUpload(container, key, uploadId);
.contentType(metadata.getContentType()) .contentDisposition(metadata.getContentDisposition()); String uploadId = client.initiateMultipartUpload(container, builder.build()); try { SortedMap<Integer, String> etags = Maps.newTreeMap(); etags); return client.completeMultipartUpload(container, key, uploadId, etags); } catch (RuntimeException ex) { client.abortMultipartUpload(container, key, uploadId); throw ex; return client.putObject(container, blobToObject.apply(blob));
private String putBlobWithReducedRedundancy(String container, Blob blob) { AWSS3PutObjectOptions options = new AWSS3PutObjectOptions(); options.storageClass(ObjectMetadata.StorageClass.REDUCED_REDUNDANCY); return getContext().unwrapApi(AWSS3Client.class).putObject(container, blob2Object.apply(blob), options); }
int effectiveParts = remaining > 0 ? parts + 1 : parts; try { uploadId = client.initiateMultipartUpload(container, errors.get(), key, container, uploadId)); String eTag = client.completeMultipartUpload(container, key, uploadId, etags); logger.debug(String.format("multipart upload of %s to container %s with uploadId %s" + " successfully finished with %s retries", key, container, uploadId, errors.get())); client.abortMultipartUpload(container, key, uploadId);
private void prepareUploadPart(String container, String key, String uploadId, int part, Payload payload, long offset, long size, SortedMap<Integer, String> etags) { Payload chunkedPart = slicer.slice(payload, offset, size); String eTag = null; try { eTag = client.uploadPart(container, key, part, uploadId, chunkedPart); etags.put(Integer.valueOf(part), eTag); } catch (KeyNotFoundException e) { // note that because of eventual consistency, the upload id may not be // present yet we may wish to add this condition to the retry handler // we may also choose to implement ListParts and wait for the uploadId // to become available there. eTag = client.uploadPart(container, key, part, uploadId, chunkedPart); etags.put(Integer.valueOf(part), eTag); } } }
private String putBlobWithReducedRedundancy(String container, Blob blob) { AWSS3PutObjectOptions options = new AWSS3PutObjectOptions(); try { AccessControlList acl = bucketAcls.getUnchecked(container); if (acl != null && acl.hasPermission(AccessControlList.GroupGranteeURI.ALL_USERS, AccessControlList.Permission.READ)) { options.withAcl(CannedAccessPolicy.PUBLIC_READ); } options.storageClass(ObjectMetadata.StorageClass.REDUCED_REDUNDANCY); } catch (CacheLoader.InvalidCacheLoadException e) { // nulls not permitted from cache loader } return getContext().unwrap(AWSS3ApiMetadata.CONTEXT_TOKEN).getApi().putObject(container, blob2Object.apply(blob), options); }
private void prepareUploadPart(String container, String key, String uploadId, int part, Payload payload, long offset, long size, SortedMap<Integer, String> etags) { Payload chunkedPart = slicer.slice(payload, offset, size); String eTag = null; try { eTag = client.uploadPart(container, key, part, uploadId, chunkedPart); etags.put(Integer.valueOf(part), eTag); } catch (KeyNotFoundException e) { // note that because of eventual consistency, the upload id may not be // present yet we may wish to add this condition to the retry handler // we may also choose to implement ListParts and wait for the uploadId // to become available there. eTag = client.uploadPart(container, key, part, uploadId, chunkedPart); etags.put(Integer.valueOf(part), eTag); } } }
private String putBlobWithReducedRedundancy(String container, Blob blob) { AWSS3PutObjectOptions options = new AWSS3PutObjectOptions(); try { AccessControlList acl = bucketAcls.getUnchecked(container); if (acl != null && acl.hasPermission(AccessControlList.GroupGranteeURI.ALL_USERS, AccessControlList.Permission.READ)) { options.withAcl(CannedAccessPolicy.PUBLIC_READ); } options.storageClass(ObjectMetadata.StorageClass.REDUCED_REDUNDANCY); } catch (CacheLoader.InvalidCacheLoadException e) { // nulls not permitted from cache loader } return getContext().unwrap(AWSS3ApiMetadata.CONTEXT_TOKEN).getApi().putObject(container, blob2Object.apply(blob), options); } }
private String putBlobWithReducedRedundancy(String container, Blob blob) { AWSS3PutObjectOptions options = new AWSS3PutObjectOptions(); try { AccessControlList acl = bucketAcls.getUnchecked(container); if (acl != null && acl.hasPermission(AccessControlList.GroupGranteeURI.ALL_USERS, AccessControlList.Permission.READ)) { options.withAcl(CannedAccessPolicy.PUBLIC_READ); } options.storageClass(ObjectMetadata.StorageClass.REDUCED_REDUNDANCY); } catch (CacheLoader.InvalidCacheLoadException e) { // nulls not permitted from cache loader } return getContext().unwrap(AWSS3ApiMetadata.CONTEXT_TOKEN).getApi().putObject(container, blob2Object.apply(blob), options); } }
@Test public void testPutWithReducedRedundancy() { Injector injector = createInjector(Functions.forMap(ImmutableMap.<HttpRequest, HttpResponse>of()), createModule(), setupProperties()); Blob blob = injector.getInstance(BlobBuilder.class).name("test").payload("content").build(); BlobToObject blobToObject = injector.getInstance(BlobToObject.class); AWSS3Client client = requestsSendResponses(bucketLocationRequest, bucketLocationResponse, HttpRequest.builder() .method("PUT") .endpoint("https://test.s3-eu-west-1.amazonaws.com/test") .addHeader("Expect", "100-continue") .addHeader("x-amz-storage-class", "REDUCED_REDUNDANCY") .addHeader("Host", "test.s3-eu-west-1.amazonaws.com") .addHeader("Date", CONSTANT_DATE) .addHeader("Authorization", "AWS identity:1mJrW85/mqZpYTFIK5Ebtt2MM6E=") .payload("content").build(), HttpResponse.builder() .statusCode(200) .addHeader("x-amz-id-2", "w0rL+9fALQiCOToesVQefs8WalIgn+ZhMD7hHMKYud/xv7MyKkAWQOtFNEfK97Ri") .addHeader("x-amz-request-id", "7A84C3CD4437A4C0") .addHeader("Date", CONSTANT_DATE) .addHeader("ETag", "437b930db84b8079c2dd804a71936b5f") .addHeader("Server", "AmazonS3").build() ); client.putObject("test", blobToObject.apply(blob), storageClass(StorageClass.REDUCED_REDUNDANCY)); }
@Test public void testPutWithReducedRedundancy() { Injector injector = createInjector(Functions.forMap(ImmutableMap.<HttpRequest, HttpResponse>of()), createModule(), setupProperties()); Blob blob = injector.getInstance(BlobBuilder.class).name("test").payload("content").build(); BlobToObject blobToObject = injector.getInstance(BlobToObject.class); AWSS3Client client = requestsSendResponses(bucketLocationRequest, bucketLocationResponse, HttpRequest.builder() .method("PUT") .endpoint("https://test.s3-eu-west-1.amazonaws.com/test") .addHeader("Expect", "100-continue") .addHeader("x-amz-storage-class", "REDUCED_REDUNDANCY") .addHeader("Host", "test.s3-eu-west-1.amazonaws.com") .addHeader("Date", CONSTANT_DATE) .addHeader("Authorization", "AWS identity:1mJrW85/mqZpYTFIK5Ebtt2MM6E=") .payload("content").build(), HttpResponse.builder() .statusCode(200) .addHeader("x-amz-id-2", "w0rL+9fALQiCOToesVQefs8WalIgn+ZhMD7hHMKYud/xv7MyKkAWQOtFNEfK97Ri") .addHeader("x-amz-request-id", "7A84C3CD4437A4C0") .addHeader("Date", CONSTANT_DATE) .addHeader("ETag", "437b930db84b8079c2dd804a71936b5f") .addHeader("Server", "AmazonS3").build() ); client.putObject("test", blobToObject.apply(blob), storageClass(StorageClass.REDUCED_REDUNDANCY)); } }