u.addProgressListener(new ProgressListener() { public void progressChanged(ProgressEvent e) { double pct = e.getBytesTransferred() * 100.0 / e.getBytes();
upload.addProgressListener(createProgressListener(upload));
upload.addProgressListener(new ProgressListener() { @Override public void progressChanged(ProgressEvent progressEvent) {
upload.addProgressListener(listener);
upload.addProgressListener(listener);
upload.addProgressListener((ProgressListener) progressEvent -> { if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) { RemoteUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription()); fileUpload = mgr.uploadDirectory(this.bucket, this.path, localFile, true, metadatasProvider); for (final Upload upload : fileUpload.getSubTransfers()) { upload.addProgressListener((ProgressListener) progressEvent -> { if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) { RemoteUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription());
private void uploadObject() throws IOException { try { log.debug("Starting upload for host: %s, key: %s, file: %s, size: %s", host, key, tempFile, tempFile.length()); STATS.uploadStarted(); PutObjectRequest request = new PutObjectRequest(host, key, tempFile); if (sseEnabled) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setMetadata(metadata); } Upload upload = transferManager.upload(request); if (log.isDebugEnabled()) { upload.addProgressListener(createProgressListener(upload)); } upload.waitForCompletion(); STATS.uploadSuccessful(); log.debug("Completed upload for host: %s, key: %s", host, key); } catch (AmazonClientException e) { STATS.uploadFailed(); throw new IOException(e); } catch (InterruptedException e) { STATS.uploadFailed(); Thread.currentThread().interrupt(); throw new InterruptedIOException(); } }
up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback)); LOG.debug(
up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback)); LOG.debug(
upload.addProgressListener(new ProgressListener() {
fileUpload = mgr.uploadFileList(this.bucket, this.path, localFile, this.fileList, metadatasProvider); for (final Upload upload : fileUpload.getSubTransfers()) { upload.addProgressListener((ProgressListener) progressEvent -> { if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) { RemoteListUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription());
@Override public void saveData(String directory, String fileName, PersistenceBuffer persistenceBuffer) { try { ObjectMetadata metadata = new ObjectMetadata(); // Set content encoding to gzip. This way browsers will decompress on download using native deflate code. // http://www.rightbrainnetworks.com/blog/serving-compressed-gzipped-static-files-from-amazon-s3-or-cloudfront/ metadata.setContentEncoding("gzip"); metadata.setContentType(persistenceBuffer.getMimeType()); // We must setContentLength or the S3 client will re-buffer the InputStream into another memory buffer. metadata.setContentLength(persistenceBuffer.getSize()); // amazonS3.putObject(directory, fileName, persistenceBuffer.getInputStream(), metadata); final Upload upload = transferManager.upload(directory, fileName, persistenceBuffer.getInputStream(), metadata); upload.addProgressListener(new UploadProgressLogger(upload)); // Block until upload completes to avoid accumulating unlimited uploads in memory. upload.waitForCompletion(); } catch (Exception e) { throw new RuntimeException(e); } }
@Override public void saveData(String directory, String fileName, PersistenceBuffer persistenceBuffer) { try { ObjectMetadata metadata = new ObjectMetadata(); // Set content encoding to gzip. This way browsers will decompress on download using native deflate code. // http://www.rightbrainnetworks.com/blog/serving-compressed-gzipped-static-files-from-amazon-s3-or-cloudfront/ metadata.setContentEncoding("gzip"); metadata.setContentType(persistenceBuffer.getMimeType()); // We must setContentLength or the S3 client will re-buffer the InputStream into another memory buffer. metadata.setContentLength(persistenceBuffer.getSize()); // amazonS3.putObject(directory, fileName, persistenceBuffer.getInputStream(), metadata); final Upload upload = transferManager.upload(directory, fileName, persistenceBuffer.getInputStream(), metadata); upload.addProgressListener(new UploadProgressLogger(upload)); // Block until upload completes to avoid accumulating unlimited uploads in memory. upload.waitForCompletion(); } catch (Exception e) { throw new RuntimeException(e); } }
up.addProgressListener(progressListener); try { up.waitForUploadResult();
upload.addProgressListener(createProgressListener(upload));
/** * Execute a PUT via the transfer manager, blocking for completion, * updating the metastore afterwards. * If the waiting for completion is interrupted, the upload will be * aborted before an {@code InterruptedIOException} is thrown. * @param putObjectRequest request * @param progress optional progress callback * @return the upload result * @throws InterruptedIOException if the blocking was interrupted. */ @Retries.OnceRaw("For PUT; post-PUT actions are RetriesExceptionsSwallowed") UploadResult executePut(PutObjectRequest putObjectRequest, Progressable progress) throws InterruptedIOException { String key = putObjectRequest.getKey(); UploadInfo info = putObject(putObjectRequest); Upload upload = info.getUpload(); ProgressableProgressListener listener = new ProgressableProgressListener( this, key, upload, progress); upload.addProgressListener(listener); UploadResult result = waitForUploadCompletion(key, info); listener.uploadCompleted(); // post-write actions finishedWrite(key, info.getLength()); return result; }
up.addProgressListener(progressListener); try { up.waitForUploadResult();