Refine search
STATS.uploadStarted(); PutObjectRequest request = new PutObjectRequest(host, key, tempFile); if (sseEnabled) { switch (sseType) { case KMS: if (sseKmsKeyId != null) { request.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(sseKmsKeyId)); request.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams()); request.withCannedAcl(aclType); Upload upload = transferManager.upload(request); upload.addProgressListener(createProgressListener(upload)); upload.waitForCompletion(); STATS.uploadSuccessful(); log.debug("Completed upload for host: %s, key: %s", host, key);
PutObjectRequest putReq = new PutObjectRequest(mBucketName, path, mFile).withMetadata(meta); mManager.upload(putReq).waitForUploadResult(); if (!mFile.delete()) { LOG.error("Failed to delete temporary file @ {}", mFile.getPath());
System.out.println("\nSubtransfer progress:\n"); for (Upload u : sub_xfers) { System.out.println(" " + u.getDescription()); if (u.isDone()) { TransferState xfer_state = u.getState(); System.out.println(" " + xfer_state); } else { TransferProgress progress = u.getProgress(); double pct = progress.getPercentTransferred(); printProgressBar(pct);
TransferManager xfer_mgr = TransferManagerBuilder.standard().build(); try { Upload u = xfer_mgr.upload(bucket_name, key_name, f); u.addProgressListener(new ProgressListener() { public void progressChanged(ProgressEvent e) { double pct = e.getBytesTransferred() * 100.0 / e.getBytes(); TransferState xfer_state = u.getState(); System.out.println(": " + xfer_state); } catch (AmazonServiceException e) { System.exit(1); xfer_mgr.shutdownNow();
@Override public void addMetadataRecord(File input, String name) throws DataStoreException { checkArgument(input != null, "input should not be null"); checkArgument(!Strings.isNullOrEmpty(name), "name should not be empty"); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); Upload upload = tmx.upload(s3ReqDecorator .decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input))); upload.waitForUploadResult(); } catch (InterruptedException e) { LOG.error("Exception in uploading metadata file {}", new Object[] {input, e}); throw new DataStoreException("Error in uploading metadata file", e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
transferConfiguration.setMultipartUploadThreshold(partSizeThreshold); TransferManager transfers = new TransferManager(client); transfers.setConfiguration(transferConfiguration); PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); Upload upload = transfers.upload(putObjectRequest); upload.addProgressListener(listener); upload.waitForUploadResult(); long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred(); if (statistics != null && delta != 0) { if (LOG.isDebugEnabled()) {
bucket, key); copReq.setNewObjectMetadata(objectMetaData); Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq)); try { copy.waitForCopyResult(); try { Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest( bucket, key, file))); up.waitForUploadResult(); LOG.debug("synchronous upload to identifier [{}] completed.", identifier); } catch (Exception e2 ) {
@Override public void close() throws IOException { if (closed.getAndSet(true)) { return; } mBackupOutputStream.close(); LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey); try { final ObjectMetadata om = new ObjectMetadata(); om.setContentLength(mBackupFile.length()); om.setContentType(mContentType); om.setUserMetadata(mMetadata); PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile); putObjectRequest.setMetadata(om); Upload upload = transfers.upload(putObjectRequest); upload.waitForUploadResult(); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException(e.toString()) .initCause(e); } catch (AmazonClientException e) { throw new IOException(String.format("saving output %s %s", mKey, e)); } finally { if (!mBackupFile.delete()) { LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream); } super.close(); } LOG.debug("OutputStream for key '{}' upload complete", mKey); }
om.setServerSideEncryption(serverSideEncryptionAlgorithm); PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); Upload upload = transfers.upload(putObjectRequest); upload.addProgressListener(listener); upload.waitForUploadResult(); long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred(); if (statistics != null && delta != 0) { if (LOG.isDebugEnabled()) {
path += localFile.getName(); PutObjectRequest request = new PutObjectRequest(this.bucket, path, localFile); metas.setSSEAlgorithm(this.sseAlgorithm); request.withMetadata(metas); request.withCannedAcl(this.acl); final Upload upload = mgr.upload(request); upload.addProgressListener((ProgressListener) progressEvent -> { if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) { RemoteUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription()); upload.waitForCompletion(); return null; fileUpload = mgr.uploadDirectory(this.bucket, this.path, localFile, true, metadatasProvider); for (final Upload upload : fileUpload.getSubTransfers()) { upload.addProgressListener((ProgressListener) progressEvent -> { if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) { RemoteUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription());
private void uploadObject() throws IOException { try { log.debug("Starting upload for host: %s, key: %s, file: %s, size: %s", host, key, tempFile, tempFile.length()); STATS.uploadStarted(); PutObjectRequest request = new PutObjectRequest(host, key, tempFile); if (sseEnabled) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setMetadata(metadata); } Upload upload = transferManager.upload(request); if (log.isDebugEnabled()) { upload.addProgressListener(createProgressListener(upload)); } upload.waitForCompletion(); STATS.uploadSuccessful(); log.debug("Completed upload for host: %s, key: %s", host, key); } catch (AmazonClientException e) { STATS.uploadFailed(); throw new IOException(e); } catch (InterruptedException e) { STATS.uploadFailed(); Thread.currentThread().interrupt(); throw new InterruptedIOException(); } }
getLogger().info("Uploading... s3://{}/{}", bucketName, key); Upload upload = s3mgr.upload(new PutObjectRequest(getBucketName(), getKey(), getFile()) .withMetadata(getObjectMetadata())); upload.addProgressListener(new ProgressListener() { upload.waitForCompletion(); setResourceUrl(s3.getUrl(bucketName, key).toString()); getLogger().info("Upload completed: {}", getResourceUrl());
final String path = file.getAbsolutePath(); final long uploadStartTimeMs = System.currentTimeMillis(); final PutObjectRequest putRequest = new PutObjectRequest(s3bucket, targetDirectory + "/" + file.getName(), file) .withCannedAcl(CannedAccessControlList.PublicRead); final Upload upload = this.transferManager.upload(putRequest); int statusChecks = 0; while (!upload.isDone()) { if (this.uploadTimedOut(uploadStartTimeMs)) { System.err.format("Timed out uploading file to S3 (%s). Will skip file. Report might be incomplete.%n", path); upload.waitForCompletion(); } catch (final Exception e) { System.out.format("Failed to upload to S3 %s/%s/%s%n", s3bucket, targetDirectory, file.getName());
/** * Use this method to reliably upload large files and wait until they are fully uploaded before continuing. Behind the scenes this is * accomplished by splitting the file up into manageable chunks and using separate threads to upload each chunk. Consider using * multi-part uploads on files larger than <code>MULTI_PART_UPLOAD_THRESHOLD</code>. When this method returns, all threads have finished * and the file has been reassembled on S3. The benefit to this method is that if any one thread fails, only the portion of the file * that particular thread was handling will have to be re-uploaded (instead of the entire file). A reasonable number of automatic * retries occurs if an individual upload thread fails. If the file upload fails this method throws <code>AmazonS3Exception</code> */ public void blockingMultiPartUpload(PutObjectRequest request, TransferManager manager) { // Use multi-part upload for large files Upload upload = manager.upload(request); try { // Block and wait for the upload to finish upload.waitForCompletion(); } catch (Exception e) { throw new AmazonS3Exception("Unexpected error uploading file", e); } }
PutObjectRequest putObjectRequest = new PutObjectRequest(s3TO.getBucketName(), s3Key, inputStream, objectMetadata); putObjectRequest.withStorageClass(StorageClass.ReducedRedundancy); upload.addProgressListener(new ProgressListener() { @Override public void progressChanged(ProgressEvent progressEvent) { upload.waitForCompletion(); } catch (InterruptedException e) {
TransferManager transferManager; if (awsSecretKey != null && awsAccessKeyId != null) { transferManager = new TransferManager(getOrCreateClient(AmazonS3Client.class)); } else { transferManager = new TransferManager(); System.out.println("Uploading file " + file.getName() + "..."); Upload upload = transferManager.upload(bucketName, key, file); if (printStatusUpdates) { while (!upload.isDone()) { System.out.print(upload.getProgress() .getBytesTransferred() + "/" + upload.getProgress() .getTotalBytesToTransfer() + " bytes transferred...\r"); Thread.sleep(statusUpdatePeriodInMs); System.out.print(upload.getProgress() .getBytesTransferred() + "/" + upload.getProgress() .getTotalBytesToTransfer() + " bytes transferred...\n"); } else { upload.waitForCompletion();
private void addFile() throws Exception { InputStream is = new ByteArrayInputStream(this.outputStream.toByteArray()); int contentLength = outputStream.size(); TransferManager transferManager = new TransferManager(amazonS3Client); ObjectMetadata metadata = new ObjectMetadata(); metadata.setExpirationTime(DateTime.now().plusDays(365 * 3).toDate()); metadata.setContentLength(contentLength); metadata.addUserMetadata("writer", "org.apache.streams"); for (String s : metaData.keySet()) { metadata.addUserMetadata(s, metaData.get(s)); } String fileNameToWrite = path + fileName; Upload upload = transferManager.upload(bucketName, fileNameToWrite, is, metadata); try { upload.waitForUploadResult(); is.close(); transferManager.shutdownNow(false); LOGGER.info("S3 File Close[{} kb] - {}", contentLength / 1024, path + fileName); } catch (Exception ignored) { LOGGER.trace("Ignoring", ignored); } }
@Override public PutObjectResult putObject(PutObjectRequest req) throws AmazonClientException, AmazonServiceException { if (!multipartUpload) { return super.putObject(req); } final long contentLen = TransferManagerUtils.getContentLength(req); String tempFilename = req.getKey() + ".tmp"; String origFilename = req.getKey(); req.setKey(tempFilename); XProgressListener progressListener = new XProgressListener(); req.setGeneralProgressListener(new ProgressListenerChain(progressListener)); progressListener.setContentLen(contentLen); progressListener.setUpload(transferManager.upload(req)); progressListener.setSilentUpload(silentUpload); try { progressListener.getUpload().waitForCompletion(); } catch (InterruptedException e) { throw new AmazonClientException(e.getMessage(), e); } CopyObjectRequest copyReq = new CopyObjectRequest(req.getBucketName(), tempFilename, req.getBucketName(), origFilename); copyObject(copyReq); deleteObject(new DeleteObjectRequest(req.getBucketName(), tempFilename)); return null; }
@Override public void saveData(String directory, String fileName, PersistenceBuffer persistenceBuffer) { try { ObjectMetadata metadata = new ObjectMetadata(); // Set content encoding to gzip. This way browsers will decompress on download using native deflate code. // http://www.rightbrainnetworks.com/blog/serving-compressed-gzipped-static-files-from-amazon-s3-or-cloudfront/ metadata.setContentEncoding("gzip"); metadata.setContentType(persistenceBuffer.getMimeType()); // We must setContentLength or the S3 client will re-buffer the InputStream into another memory buffer. metadata.setContentLength(persistenceBuffer.getSize()); // amazonS3.putObject(directory, fileName, persistenceBuffer.getInputStream(), metadata); final Upload upload = transferManager.upload(directory, fileName, persistenceBuffer.getInputStream(), metadata); upload.addProgressListener(new UploadProgressLogger(upload)); // Block until upload completes to avoid accumulating unlimited uploads in memory. upload.waitForCompletion(); } catch (Exception e) { throw new RuntimeException(e); } }
void stageFiles() { if (files.isEmpty()) { return; } TransferManager transferManager = new TransferManager(s3); List<PutObjectRequest> requests = new ArrayList<>(); for (StagingFile f : files) { logger.info("Staging {} -> {}", f.file().reference().filename(), f.file().s3Uri()); requests.add(stagingFilePutRequest(f)); } try { List<Upload> uploads = requests.stream() .map(transferManager::upload) .collect(toList()); for (Upload upload : uploads) { try { upload.waitForCompletion(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new TaskExecutionException(e); } } } finally { transferManager.shutdownNow(false); requests.forEach(r -> closeQuietly(r.getInputStream())); } }