.withMetadata(metadata) .withTagging(objectTagging) .withCannedAcl(cannedAcl) .<PutObjectRequest> withGeneralProgressListener( listener), transferListener, null, null));
request.withCannedAcl(aclType);
/** * Creates and returns a {@link PutObjectRequest} for the instruction file * with the specified suffix. */ public PutObjectRequest createPutObjectRequest(S3Object s3Object) { if (!s3Object.getBucketName().equals(s3ObjectId.getBucket()) || !s3Object.getKey().equals(s3ObjectId.getKey())) { throw new IllegalArgumentException("s3Object passed inconsistent with the instruction file being created"); } InstructionFileId ifid= s3ObjectId.instructionFileId(suffix); // ObjectMetadata metadata = s3Object.getObjectMetadata(); return new PutObjectRequest(ifid.getBucket(), ifid.getKey(), redirectLocation) .withAccessControlList(accessControlList) .withCannedAcl(cannedAcl) // .withFile(file) // .withInputStream(inputStream) // don't want the metadata for the new instruction file // .withMetadata(metadata == null ? null : metadata.clone()) .withStorageClass(storageClass) .withGeneralProgressListener(getGeneralProgressListener()) .withRequestMetricCollector(getRequestMetricCollector()) ; } }
request.withCannedAcl(cannedAcl);
/** * Creates and returns a {@link PutObjectRequest} for the instruction file * with the specified suffix. */ public PutObjectRequest createPutObjectRequest(S3Object s3Object) { if (!s3Object.getBucketName().equals(s3ObjectId.getBucket()) || !s3Object.getKey().equals(s3ObjectId.getKey())) { throw new IllegalArgumentException("s3Object passed inconsistent with the instruction file being created"); } InstructionFileId ifid= s3ObjectId.instructionFileId(suffix); // ObjectMetadata metadata = s3Object.getObjectMetadata(); return new PutObjectRequest(ifid.getBucket(), ifid.getKey(), redirectLocation) .withAccessControlList(accessControlList) .withCannedAcl(cannedAcl) // .withFile(file) // .withInputStream(inputStream) // don't want the metadata for the new instruction file // .withMetadata(metadata == null ? null : metadata.clone()) .withStorageClass(storageClass) .withGeneralProgressListener(getGeneralProgressListener()) .withRequestMetricCollector(getRequestMetricCollector()) ; } }
public static String S3putFile(File file, String bucketName, String key, CannedAccessControlList accessControlList) throws IOException { if(file == null) { throw new NullPointerException("file must not be null"); } else if(bucketName == null || key == null) { throw new NullPointerException("Object parameters (bucketName / key) must not be null"); } else { PutObjectRequest putObjectRequest = new PutObjectRequest(s3Bucket, key, file); //putObjectRequest.withCannedAcl(accessControlList.AuthenticatedRead); putObjectRequest.withCannedAcl(accessControlList); amazonS3.putObject(putObjectRequest); return key; } }
public void uploadFileToS3(final File file, final String bucket, final String path, final String region, final String roleArn) { // upload mp3 to S3 bucket final PutObjectRequest s3Put = new PutObjectRequest(bucket, path, file).withCannedAcl(CannedAccessControlList.PublicRead); getS3Client(region, roleArn).putObject(s3Put); if (!file.delete()) { logger.warning("Could not delete mp3 temporary audio file."); } }
private PutObjectRequest createDirectoryPutObjectRequest(String key) { ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(0); return new PutObjectRequest(this.bucketName, key, inputStream, objectMetadata).withCannedAcl(CannedAccessControlList.PublicRead); }
private PutObjectRequest createDirectoryPutObjectRequest(String key) { ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(0); return new PutObjectRequest(this.bucketName, key, inputStream, objectMetadata).withCannedAcl(CannedAccessControlList.PublicRead); }
final long uploadStartTimeMs = System.currentTimeMillis(); final PutObjectRequest putRequest = new PutObjectRequest(s3bucket, targetDirectory + "/" + file.getName(), file) .withCannedAcl(CannedAccessControlList.PublicRead);
@Override public void write(byte[] content) { ByteArrayInputStream byteIS = new ByteArrayInputStream(content,0,content.length); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(content.length); PutObjectRequest request = new PutObjectRequest(bucketName,id,byteIS,metadata); request.withCannedAcl(acl); s3.putObject(request); } }
@RequestMapping(value = "upload", method = RequestMethod.POST) public @ResponseBody String handleFileUpload(@RequestParam("name") String name, @RequestParam("file") MultipartFile file) { if (!file.isEmpty()) { try { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentType(file.getContentType()); // Upload the file for public read amazonS3Template.getAmazonS3Client().putObject(new PutObjectRequest(bucketName, name, file.getInputStream(), objectMetadata) .withCannedAcl(CannedAccessControlList.PublicRead)); return "You successfully uploaded " + name + "!"; } catch (Exception e) { return "You failed to upload " + name + " => " + e.getMessage(); } } else { return "You failed to upload " + name + " because the file was empty."; } } }
private Transfer startTransfer(Mapper.Context context, S3UploadDescriptor uploadDescriptor) throws IOException { InputStream input = getInputStream(uploadDescriptor.getSource(), context.getConfiguration()); int bufferSize = context.getConfiguration().getInt(ConfigurationVariable.UPLOAD_BUFFER_SIZE.getName(), -1); if (bufferSize <= 0) { // The default value is the same value used by FileSystem to configure the InputStream. // See https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml bufferSize = context.getConfiguration().getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT); } LOG.info("Buffer of the input stream is {} for file {}", bufferSize, uploadDescriptor.getSource()); // input stream should not be closed; transfer manager will do it input = new BufferedInputStream(input, bufferSize); try { PutObjectRequest request = new PutObjectRequest(uploadDescriptor.getBucketName(), uploadDescriptor.getKey(), input, uploadDescriptor.getMetadata()); String cannedAcl = context.getConfiguration().get(ConfigurationVariable.CANNED_ACL.getName()); if (cannedAcl != null) { CannedAccessControlList acl = CannedAclUtils.toCannedAccessControlList(cannedAcl); LOG.debug("Using CannedACL {}", acl.name()); request.withCannedAcl(acl); } // We add 1 to the buffer size as per the com.amazonaws.RequestClientOptions doc request.getRequestClientOptions().setReadLimit(bufferSize + 1); return transferManager.upload(request); } catch (AmazonClientException e) { throw new CopyReadException(e); } }
private Transfer startTransfer(Mapper.Context context, S3UploadDescriptor uploadDescriptor) throws IOException { InputStream input = getInputStream(uploadDescriptor.getSource(), context.getConfiguration()); int bufferSize = context.getConfiguration().getInt(ConfigurationVariable.UPLOAD_BUFFER_SIZE.getName(), -1); if (bufferSize <= 0) { // The default value is the same value used by FileSystem to configure the InputStream. // See https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml bufferSize = context.getConfiguration().getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT); } LOG.info("Buffer of the input stream is {} for file {}", bufferSize, uploadDescriptor.getSource()); // input stream should not be closed; transfer manager will do it input = new BufferedInputStream(input, bufferSize); try { PutObjectRequest request = new PutObjectRequest(uploadDescriptor.getBucketName(), uploadDescriptor.getKey(), input, uploadDescriptor.getMetadata()); String cannedAcl = context.getConfiguration().get(ConfigurationVariable.CANNED_ACL.getName()); if (cannedAcl != null) { CannedAccessControlList acl = CannedAclUtils.toCannedAccessControlList(cannedAcl); LOG.debug("Using CannedACL {}", acl.name()); request.withCannedAcl(acl); } // We add 1 to the buffer size as per the com.amazonaws.RequestClientOptions doc request.getRequestClientOptions().setReadLimit(bufferSize + 1); return transferManager.upload(request); } catch (AmazonClientException e) { throw new CopyReadException(e); } }
@Override public void write(InputStream in, long contentLength, boolean toCloseStreamWhenFinished) { try { ObjectMetadata metadata = new ObjectMetadata(); if (contentLength > 0){ metadata.setContentLength(contentLength); } PutObjectRequest request = new PutObjectRequest(bucketName,id,in,metadata); request.withCannedAcl(acl); s3.putObject(request); }finally{ if (toCloseStreamWhenFinished){ IOTools.close(in); } } }
public boolean uploadFile(final AmazonS3 amazonS3, MultipartFile fileToUpload, String s3BucketName, String key) { try { File file = AdminUtils.convert(fileToUpload); long size = fileToUpload.getSize(); String contentType = fileToUpload.getContentType(); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentType(contentType); metadata.setContentLength(size); PutObjectRequest putObjectRequest = new PutObjectRequest(s3BucketName, key, file).withCannedAcl(CannedAccessControlList.PublicRead); amazonS3.putObject(putObjectRequest); return Boolean.TRUE; } catch (IOException exception) { log.error(UNEXPECTED_ERROR_OCCURRED, exception); } return Boolean.FALSE; } }
@Override public void run() { // to enable conventionMappings feature String relativePath = prefix + element.getRelativePath().toString(); String key = relativePath.startsWith("/") ? relativePath.substring(1) : relativePath; boolean doUpload = false; try { ObjectMetadata metadata = s3.getObjectMetadata(bucketName, key); if (metadata.getETag().equalsIgnoreCase(md5(element.getFile())) == false) { doUpload = true; } } catch (AmazonS3Exception e) { doUpload = true; } if (doUpload) { logger.info(" => s3://{}/{}", bucketName, key); s3.putObject(new PutObjectRequest(bucketName, key, element.getFile()) .withStorageClass(storageClass) .withCannedAcl(acl) .withMetadata(metadataProvider == null ? null : metadataProvider.call(bucketName, key, element.getFile()))); } else { logger.info(" => s3://{}/{} (SKIP)", bucketName, key); } } }
/** * Creates and returns a {@link PutObjectRequest} for the instruction file * with the specified suffix. */ public PutObjectRequest createPutObjectRequest(S3Object s3Object) { if (!s3Object.getBucketName().equals(s3ObjectId.getBucket()) || !s3Object.getKey().equals(s3ObjectId.getKey())) { throw new IllegalArgumentException("s3Object passed inconsistent with the instruction file being created"); } InstructionFileId ifid= s3ObjectId.instructionFileId(suffix); // ObjectMetadata metadata = s3Object.getObjectMetadata(); return new PutObjectRequest(ifid.getBucket(), ifid.getKey(), redirectLocation) .withAccessControlList(accessControlList) .withCannedAcl(cannedAcl) // .withFile(file) // .withInputStream(inputStream) // don't want the metadata for the new instruction file // .withMetadata(metadata == null ? null : metadata.clone()) .withStorageClass(storageClass) .withGeneralProgressListener(getGeneralProgressListener()) .withRequestMetricCollector(getRequestMetricCollector()) ; } }
/** * Creates and returns a {@link PutObjectRequest} for the instruction file * with the specified suffix. */ public PutObjectRequest createPutObjectRequest(S3Object s3Object) { if (!s3Object.getBucketName().equals(s3ObjectId.getBucket()) || !s3Object.getKey().equals(s3ObjectId.getKey())) { throw new IllegalArgumentException("s3Object passed inconsistent with the instruction file being created"); } InstructionFileId ifid= s3ObjectId.instructionFileId(suffix); // ObjectMetadata metadata = s3Object.getObjectMetadata(); return new PutObjectRequest(ifid.getBucket(), ifid.getKey(), redirectLocation) .withAccessControlList(accessControlList) .withCannedAcl(cannedAcl) // .withFile(file) // .withInputStream(inputStream) // don't want the metadata for the new instruction file // .withMetadata(metadata == null ? null : metadata.clone()) .withStorageClass(storageClass) .withGeneralProgressListener(getGeneralProgressListener()) .withRequestMetricCollector(getRequestMetricCollector()) ; } }
@Override protected void doPut(String fullPath, ISObject stuff, Map<String, String> attrs) { ObjectMetadata meta = new ObjectMetadata(); meta.setContentType(stuff.getAttribute(ISObject.ATTR_CONTENT_TYPE)); if (!(stuff instanceof SObject.InputStreamSObject)) { long length = stuff.getLength(); if (0 < length) { meta.setContentLength(stuff.getLength()); } } PutObjectRequest req = new PutObjectRequest(bucket, fullPath, stuff.asInputStream(), meta); req.setTagging(mapToTagList(attrs)); StorageClass storageClass = StorageClass.valueOfIgnoreCase(attrs.remove(ATTR_STORAGE_CLASS), defStorageClass); if (null != storageClass) { req.setStorageClass(storageClass.toString()); } req.withCannedAcl(CannedAccessControlList.PublicRead); s3.putObject(req); }