upload.waitForCompletion(); STATS.uploadSuccessful(); log.debug("Completed upload for host: %s, key: %s", host, key);
protected String copyToS3(final File srcFile, final S3TO destStore, final String destPath) throws InterruptedException { final String key = destPath + S3Utils.SEPARATOR + srcFile.getName(); putFile(destStore, srcFile, destStore.getBucketName(), key).waitForCompletion(); return key; }
upload.waitForCompletion(); } catch (InterruptedException e) {
String key = destData.getPath() + S3Utils.SEPARATOR + srcFile.getName(); putFile(s3, srcFile, bucket, key).waitForCompletion();
public class UploadObjectMultipartUploadUsingHighLevelAPI { public static void main(String[] args) throws Exception { String existingBucketName = "*** Provide existing bucket name ***"; String keyName = "*** Provide object key ***"; String filePath = "*** Path to and name of the file to upload ***"; TransferManager tm = new TransferManager(new ProfileCredentialsProvider()); System.out.println("Hello"); // TransferManager processes all transfers asynchronously, // so this call will return immediately. Upload upload = tm.upload( existingBucketName, keyName, new File(filePath)); System.out.println("Hello2"); try { // Or you can block and wait for the upload to finish upload.waitForCompletion(); System.out.println("Upload complete."); } catch (AmazonClientException amazonClientException) { System.out.println("Unable to upload file, upload was aborted."); amazonClientException.printStackTrace(); } } }
/** * Use this method to reliably upload large files and wait until they are fully uploaded before continuing. Behind the scenes this is accomplished by splitting the file up into * manageable chunks and using separate threads to upload each chunk. Consider using multi-part uploads on files larger than <code>MULTI_PART_UPLOAD_THRESHOLD</code>. When this * method returns, all threads have finished and the file has been reassembled on S3. The benefit to this method is that if any one thread fails, only the portion of the file * that particular thread was handling will have to be re-uploaded (instead of the entire file). A reasonable number of automatic retries occurs if an individual upload thread * fails. If the file upload fails this method throws <code>AmazonS3Exception</code> */ public void blockingMultiPartUpload(PutObjectRequest request, TransferManager manager) { // Use multi-part upload for large files Upload upload = manager.upload(request); try { // Block and wait for the upload to finish upload.waitForCompletion(); } catch (Exception e) { throw new AmazonS3Exception("Unexpected error uploading file", e); } }
/** * Use this method to reliably upload large files and wait until they are fully uploaded before continuing. Behind the scenes this is * accomplished by splitting the file up into manageable chunks and using separate threads to upload each chunk. Consider using * multi-part uploads on files larger than <code>MULTI_PART_UPLOAD_THRESHOLD</code>. When this method returns, all threads have finished * and the file has been reassembled on S3. The benefit to this method is that if any one thread fails, only the portion of the file * that particular thread was handling will have to be re-uploaded (instead of the entire file). A reasonable number of automatic * retries occurs if an individual upload thread fails. If the file upload fails this method throws <code>AmazonS3Exception</code> */ public void blockingMultiPartUpload(PutObjectRequest request, TransferManager manager) { // Use multi-part upload for large files Upload upload = manager.upload(request); try { // Block and wait for the upload to finish upload.waitForCompletion(); } catch (Exception e) { throw new AmazonS3Exception("Unexpected error uploading file", e); } }
public long putChunk(String localDataFile, String localIndexFile, TopicPartition tp) throws IOException { // Put data file then index, then finally update/create the last_index_file marker String dataFileKey = this.getChunkFileKey(localDataFile); String idxFileKey = this.getChunkFileKey(localIndexFile); // Read offset first since we'll delete the file after upload long nextOffset = getNextOffsetFromIndexFileContents(new FileReader(localIndexFile)); try { Upload upload = tm.upload(this.bucket, dataFileKey, new File(localDataFile)); upload.waitForCompletion(); upload = tm.upload(this.bucket, idxFileKey, new File(localIndexFile)); upload.waitForCompletion(); } catch (Exception e) { throw new IOException("Failed to upload to S3", e); } this.updateCursorFile(idxFileKey, tp); // Sanity check - return what the new nextOffset will be based on the index we just uploaded return nextOffset; }
public long putChunk(String localDataFile, String localIndexFile, TopicPartition tp) throws IOException { // Put data file then index, then finally update/create the last_index_file marker String dataFileKey = this.getChunkFileKey(localDataFile); String idxFileKey = this.getChunkFileKey(localIndexFile); // Read offset first since we'll delete the file after upload long nextOffset = getNextOffsetFromIndexFileContents(new FileReader(localIndexFile)); try { Upload upload = tm.upload(this.bucket, dataFileKey, new File(localDataFile)); upload.waitForCompletion(); upload = tm.upload(this.bucket, idxFileKey, new File(localIndexFile)); upload.waitForCompletion(); } catch (Exception e) { throw new IOException("Failed to upload to S3", e); } this.updateCursorFile(idxFileKey, tp); // Sanity check - return what the new nextOffset will be based on the index we just uploaded return nextOffset; }
ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentEncoding("UTF-8"); size = inputStream.available(); metadata.setContentLength(size); TransferManager transferManager = new TransferManager(credentialsProvider); Upload upload = transferManager.upload(bucket_name, key, images3, metadata); upload.waitForCompletion();
upload.waitForCompletion(); } catch (final Exception e) { System.out.format("Failed to upload to S3 %s/%s/%s%n", s3bucket, targetDirectory, file.getName());
private void transferFileToS3(final String key) { final long fileSizeMb = file.length() / (1024 * 1024); getLogger().lifecycle("Uploading {} MB from file {} to {}...", fileSizeMb, file, getS3Url()); final TransferManager transferManager = createTransferManager(); final Instant start = Instant.now(); final Upload upload = transferManager.upload(config.getDeploymentBucket(), key, file); try { upload.waitForCompletion(); final Duration uploadDuration = Duration.between(start, Instant.now()); getLogger().lifecycle("Uploaded {} to {} in {}", file, getS3Url(), uploadDuration); } catch (final InterruptedException e) { Thread.currentThread().interrupt(); throw new AssertionError("Upload interrupted", e); } }
AWSCredentials credentials = new BasicAWSCredentials( "whatever", "whatever"); File f = new File("/home/myuser/test"); TransferManager transferManager = new TransferManager(credentials); //+upload from HDFS to S3 Configuration conf = new Configuration(); // set the hadoop config files conf.addResource(new Path("/etc/hadoop/conf/core-site.xml")); conf.addResource(new Path("/etc/hadoop/conf/hdfs-site.xml")); Path path = new Path("hdfs://my_ip_address/user/ubuntu/test/test.txt"); FileSystem fs = path.getFileSystem(conf); FSDataInputStream inputStream = fs.open(path); ObjectMetadata objectMetadata = new ObjectMetadata(); Upload upload = transferManager.upload("xpatterns-deployment-ubuntu", "test_cu_jmen3", inputStream, objectMetadata); //-upload from HDFS to S3 try { upload.waitForCompletion(); } catch (InterruptedException e) { e.printStackTrace(); }
void stageFiles() { if (files.isEmpty()) { return; } TransferManager transferManager = new TransferManager(s3); List<PutObjectRequest> requests = new ArrayList<>(); for (StagingFile f : files) { logger.info("Staging {} -> {}", f.file().reference().filename(), f.file().s3Uri()); requests.add(stagingFilePutRequest(f)); } try { List<Upload> uploads = requests.stream() .map(transferManager::upload) .collect(toList()); for (Upload upload : uploads) { try { upload.waitForCompletion(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new TaskExecutionException(e); } } } finally { transferManager.shutdownNow(false); requests.forEach(r -> closeQuietly(r.getInputStream())); } }
try { Upload u = tm.upload(bucketName, s3key, file); u.waitForCompletion(); } catch (Exception e) { throw new BuildException("Error when trying to upload file: "
private void uploadObject() throws IOException { try { log.debug("Starting upload for host: %s, key: %s, file: %s, size: %s", host, key, tempFile, tempFile.length()); STATS.uploadStarted(); PutObjectRequest request = new PutObjectRequest(host, key, tempFile); if (sseEnabled) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setMetadata(metadata); } Upload upload = transferManager.upload(request); if (log.isDebugEnabled()) { upload.addProgressListener(createProgressListener(upload)); } upload.waitForCompletion(); STATS.uploadSuccessful(); log.debug("Completed upload for host: %s, key: %s", host, key); } catch (AmazonClientException e) { STATS.uploadFailed(); throw new IOException(e); } catch (InterruptedException e) { STATS.uploadFailed(); Thread.currentThread().interrupt(); throw new InterruptedIOException(); } }
upload.waitForCompletion(); setResourceUrl(s3.getUrl(bucketName, key).toString()); getLogger().info("Upload completed: {}", getResourceUrl());
@Override public void saveData(String directory, String fileName, PersistenceBuffer persistenceBuffer) { try { ObjectMetadata metadata = new ObjectMetadata(); // Set content encoding to gzip. This way browsers will decompress on download using native deflate code. // http://www.rightbrainnetworks.com/blog/serving-compressed-gzipped-static-files-from-amazon-s3-or-cloudfront/ metadata.setContentEncoding("gzip"); metadata.setContentType(persistenceBuffer.getMimeType()); // We must setContentLength or the S3 client will re-buffer the InputStream into another memory buffer. metadata.setContentLength(persistenceBuffer.getSize()); // amazonS3.putObject(directory, fileName, persistenceBuffer.getInputStream(), metadata); final Upload upload = transferManager.upload(directory, fileName, persistenceBuffer.getInputStream(), metadata); upload.addProgressListener(new UploadProgressLogger(upload)); // Block until upload completes to avoid accumulating unlimited uploads in memory. upload.waitForCompletion(); } catch (Exception e) { throw new RuntimeException(e); } }
@Override public void saveData(String directory, String fileName, PersistenceBuffer persistenceBuffer) { try { ObjectMetadata metadata = new ObjectMetadata(); // Set content encoding to gzip. This way browsers will decompress on download using native deflate code. // http://www.rightbrainnetworks.com/blog/serving-compressed-gzipped-static-files-from-amazon-s3-or-cloudfront/ metadata.setContentEncoding("gzip"); metadata.setContentType(persistenceBuffer.getMimeType()); // We must setContentLength or the S3 client will re-buffer the InputStream into another memory buffer. metadata.setContentLength(persistenceBuffer.getSize()); // amazonS3.putObject(directory, fileName, persistenceBuffer.getInputStream(), metadata); final Upload upload = transferManager.upload(directory, fileName, persistenceBuffer.getInputStream(), metadata); upload.addProgressListener(new UploadProgressLogger(upload)); // Block until upload completes to avoid accumulating unlimited uploads in memory. upload.waitForCompletion(); } catch (Exception e) { throw new RuntimeException(e); } }
@Override public PutObjectResult putObject(PutObjectRequest req) throws AmazonClientException, AmazonServiceException { if (!multipartUpload) { return super.putObject(req); } final long contentLen = TransferManagerUtils.getContentLength(req); String tempFilename = req.getKey() + ".tmp"; String origFilename = req.getKey(); req.setKey(tempFilename); XProgressListener progressListener = new XProgressListener(); req.setGeneralProgressListener(new ProgressListenerChain(progressListener)); progressListener.setContentLen(contentLen); progressListener.setUpload(transferManager.upload(req)); progressListener.setSilentUpload(silentUpload); try { progressListener.getUpload().waitForCompletion(); } catch (InterruptedException e) { throw new AmazonClientException(e.getMessage(), e); } CopyObjectRequest copyReq = new CopyObjectRequest(req.getBucketName(), tempFilename, req.getBucketName(), origFilename); copyObject(copyReq); deleteObject(new DeleteObjectRequest(req.getBucketName(), tempFilename)); return null; }