metadata.setHeader(key, ServiceUtils.parseRfc822Date(header.getValue())); } catch (Exception pe) { log.warn("Unable to parse last modified date: " + header.getValue(), pe); metadata.setHeader(key, Long.parseLong(header.getValue())); } catch (NumberFormatException nfe) { throw new SdkClientException( metadata.setHeader(key, ServiceUtils.removeQuotes(header.getValue())); } else if (key.equalsIgnoreCase(Headers.EXPIRES)) { metadata.setHeader(Headers.EXPIRES, header.getValue()); try { metadata.setHttpExpiresDate(DateUtils.parseRFC822Date(header.getValue())); } else if (key.equalsIgnoreCase(Headers.S3_PARTS_COUNT)) { try { metadata.setHeader(key, Integer.parseInt(header.getValue())); } catch (NumberFormatException nfe) { throw new SdkClientException( metadata.setHeader(key, header.getValue());
metadata.setHeader(SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY, origReqMetadata.getRawMetadataValue(SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY)); metadata.setSSECustomerAlgorithm(origReqMetadata.getSSECustomerAlgorithm());
} else if (key.equalsIgnoreCase(Headers.LAST_MODIFIED)) { try { metadata.setHeader(key, ServiceUtils.parseRfc822Date(header.getValue())); } catch (final Exception pe) { log.warn("Unable to parse last modified date: " + header.getValue(), pe); metadata.setHeader(key, Long.parseLong(header.getValue())); } catch (final NumberFormatException nfe) { log.warn("Unable to parse content length: " + header.getValue(), nfe); metadata.setHeader(key, ServiceUtils.removeQuotes(header.getValue())); } else if (key.equalsIgnoreCase(Headers.EXPIRES)) { try { } else if (key.equalsIgnoreCase(Headers.S3_PARTS_COUNT)) { try { metadata.setHeader(key, Integer.parseInt(header.getValue())); } catch (final NumberFormatException nfe) { throw new AmazonClientException( metadata.setHeader(key, header.getValue());
@Override public void provideObjectMetadata(final File file, final ObjectMetadata objectMetadata) { /** * This is a terrible hack, but the SDK as of 1.10.69 does not allow setting ACLs * for directory uploads otherwise. */ objectMetadata.setHeader(Headers.S3_CANNED_ACL, CannedAccessControlList.BucketOwnerFullControl); } });
@Test public void downloadObjectToFileTest() throws Throwable { final S3Object s3Object = new S3Object(); final String input = "test input"; s3Object.setObjectContent(new StringInputStream(input)); final ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.ETAG, "5eed650258ee02f6a77c87b748b764ec"); s3Object.setObjectMetadata(metadata); ServiceUtils.downloadObjectToFile(s3Object, File.createTempFile("temp1", "temp2"), true, false); }
s3Object.setObjectContent(new StringInputStream(input)); final ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.ETAG, "5eed650258ee02f6a77c87b748b764ec"); s3Object.setObjectMetadata(metadata);
from.setExpirationTime(expTime); from.setExpirationTimeRuleId("expirationTimeRuleId"); from.setHeader("k1", "v1"); from.setHeader("k2", "v2"); from.setHttpExpiresDate(httpExpiresDate); from.setLastModified(lastModified); from.setSSECustomerAlgorithm("SSECustomerAlgorithm"); from.setUserMetadata(userMetadata); from.setHeader(Headers.CONTENT_RANGE, "/9999"); from.setHeader(Headers.S3_VERSION_ID, "versionid"); from.setHeader(Headers.ETAG, "etag"); from.setHeader(Headers.STORAGE_CLASS, StorageClass.ReducedRedundancy.toString());
String EXPIRY_DATE = "Mon, Jan 1 2030 11:11:11 GMT"; BasicAWSCredentials awsCreds = new BasicAWSCredentials(ACCESS_KEY_ID, SECRET_ACCESS_KEY); AmazonS3Client s3Client = new AmazonS3Client(awsCreds); ResponseHeaderOverrides override = new ResponseHeaderOverrides(); override.setContentType("image/jpeg"); override.setExpires(EXPIRY_DATE); File imageFile = new File(path); PutObjectRequest pros = new PutObjectRequest(BUCKET_NAME, BUCKET_PATH, imageFile); ObjectMetadata meta = new ObjectMetadata(); meta.addUserMetadata("expires", EXPIRY_DATE); meta.setHeader("expires", EXPIRY_DATE); pros.setMetadata(meta); s3Client.putObject(pros);
AmazonS3 s3 = new AmazonS3Client(); String bucketName = "bucketName "; String key = "key.txt"; ObjectMetadata newObjectMetadata = new ObjectMetadata(); // ... whatever you desire, e.g.: newObjectMetadata.setHeader("Expires", "Thu, 21 Mar 2042 08:16:32 GMT"); CopyObjectRequest copyObjectRequest = new CopyObjectRequest() .WithSourceBucketName(bucketName) .WithSourceKey(key) .WithDestinationBucket(bucketName) .WithDestinationKey(key) .withNewObjectMetadata(newObjectMetadata); s3.copyObject(copyObjectRequest);
private com.amazonaws.services.s3.model.ObjectMetadata getAmazonMetadata(PutFileRequest request) { com.amazonaws.services.s3.model.ObjectMetadata meta = new com.amazonaws.services.s3.model.ObjectMetadata(); meta.setUserMetadata(request.getMetadata().getUserMetadata()); Map<String, Object> raw = request.getMetadata().getRawMetadata(); for (String key : raw.keySet()) { meta.setHeader(key, raw.get(key)); } meta.setContentLength(request.getFile().length()); meta.setContentEncoding(request.getEncoding()); return meta; }
private com.amazonaws.services.s3.model.ObjectMetadata getAmazonMetadata(PutDirRequest request) { com.amazonaws.services.s3.model.ObjectMetadata meta = new com.amazonaws.services.s3.model.ObjectMetadata(); meta.setUserMetadata(request.getMetadata().getUserMetadata()); Map<String, Object> raw = request.getMetadata().getRawMetadata(); for (String key : raw.keySet()) { meta.setHeader(key, raw.get(key)); } meta.setContentLength(0L); meta.setContentType(directoryContentType); meta.setContentEncoding(UTF8); return meta; }
@Override protected void saveData(InputStream data) throws IOException { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentType(getContentType()); @SuppressWarnings("unchecked") Map<String, List<String>> headers = (Map<String, List<String>>) getMetadata().get(HTTP_HEADERS); if (headers != null) { headers.forEach((key, values) -> { if (values != null) { switch (key) { case Headers.CONTENT_LENGTH: values.forEach(value -> metadata.setHeader(key, ObjectUtils.to(Long.class, value))); break; default: values.forEach(value -> metadata.setHeader(key, value)); break; } } }); } PutObjectRequest poRequest = new PutObjectRequest(getBucket(), getPath(), data, metadata); poRequest.setCannedAcl(ObjectUtils.firstNonNull(getCannedAccessControlList(), CannedAccessControlList.PublicRead)); createClient().putObject(poRequest); }
public ObjectMetadata buildMetadata(FilePath filePath) throws IOException, InterruptedException { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentType(Mimetypes.getInstance().getMimetype(filePath.getName())); metadata.setContentLength(filePath.length()); metadata.setLastModified(new Date(filePath.lastModified())); if ((storageClass != null) && !"".equals(storageClass)) { metadata.setHeader("x-amz-storage-class", storageClass); } if (useServerSideEncryption) { metadata.setServerSideEncryption(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); } for (MetadataPair metadataPair : userMetadata) { metadata.addUserMetadata(metadataPair.key, metadataPair.value); } return metadata; }
@Test public void testValidateGlacierS3FilesRestored() { // Put a 1 byte already restored Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Validate the file. S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.validateGlacierS3FilesRestored(params); }
@Test public void testValidateGlacierS3FilesRestoredAmazonServiceException() { // Build a mock file path that triggers an Amazon service exception when we request S3 metadata for the object. String testKey = String.format("%s/%s", TEST_S3_KEY_PREFIX, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION); // Put a 1 byte Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), testKey, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to validate if the Glacier S3 file is already restored for a mocked S3 file // that triggers an Amazon service exception when we request S3 metadata for the object. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(testKey))); s3Dao.validateGlacierS3FilesRestored(params); fail("Should throw an IllegalStateException when Glacier S3 object validation fails due to an Amazon service exception."); } catch (IllegalStateException e) { assertEquals(String.format("Fail to check restore status for \"%s\" key in \"%s\" bucket. " + "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", testKey, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
@Test public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreNotInitiated() { // Put a 1 byte Glacier storage class file in S3 that has no restore initiated (OngoingRestore flag is null). ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to validate if the Glacier S3 file is already restored. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.validateGlacierS3FilesRestored(params); fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored."); } catch (IllegalArgumentException e) { assertEquals(String .format("Archived Glacier S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {null}, S3 bucket name {%s}", TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
@Test public void testRestoreObjectsNonGlacierObject() { // Put a 1 byte non-Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Standard); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to initiate a restore request for a non-Glacier file. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS); fail("Should throw an IllegalStateException when file has a non-Glacier storage class."); } catch (IllegalStateException e) { assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " + "Reason: object is not in Glacier (Service: null; Status Code: 0; Error Code: null; Request ID: null)", TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
@Test public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreInProgress() { // Put a 1 byte Glacier storage class file in S3 that is still being restored (OngoingRestore flag is true). ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(true); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to validate if the Glacier S3 file is already restored. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.validateGlacierS3FilesRestored(params); fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored."); } catch (IllegalArgumentException e) { assertEquals(String .format("Archived Glacier S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {true}, S3 bucket name {%s}", TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
@Test public void testRestoreObjects() { // Put a 1 byte Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Initiate a restore request for the test S3 file. S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS); // Validate that there is an ongoing restore request for this object. ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null); assertTrue(objectMetadata.getOngoingRestore()); }
@Test public void testRestoreObjectsGlacierObjectAlreadyBeingRestored() { // Put a 1 byte Glacier storage class file in S3 flagged as already being restored. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(true); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Initiate a restore request for the test S3 file. S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS); // Validate that there is still an ongoing restore request for this object. ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null); assertTrue(objectMetadata.getOngoingRestore()); }