protected void fireProgressEvent(final ProgressEventType eventType) { publishProgress(listenerChain, eventType); }
@Override protected void onEOF() { publishProgress(getListener(), ProgressEventType.TRANSFER_COMPLETED_EVENT); } };
/** * Pause before the next retry and record metrics around retry behavior. */ private void pauseBeforeRetry(ExecOneRequestParams execOneParams, final ProgressListener listener) throws InterruptedException { publishProgress(listener, ProgressEventType.CLIENT_REQUEST_RETRY_EVENT); // Notify the progress listener of the retry awsRequestMetrics.startEvent(Field.RetryPauseTime); try { doPauseBeforeRetry(execOneParams); } finally { awsRequestMetrics.endEvent(Field.RetryPauseTime); } }
/** * Cancels all the futures associated with this upload operation. Also * cleans up the parts on Amazon S3 if the upload is performed as a * multi-part upload operation. */ void performAbort() { cancelFutures(); multipartUploadCallable.performAbortMultipartUpload(); publishProgress(listener, ProgressEventType.TRANSFER_CANCELED_EVENT); } }
void uploadComplete() { markAllDone(); transfer.setState(TransferState.Completed); // AmazonS3Client takes care of all the events for single part uploads, // so we only need to send a completed event for multipart uploads. if (multipartUploadCallable.isMultipartUpload()) { publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT); } }
@Override public CopyResult call() throws Exception { try { CopyResult result = multipartCopyCallable.call(); if (result == null) { futures.addAll(multipartCopyCallable.getFutures()); futureReference.set(threadPool.submit(new CompleteMultipartCopy(multipartCopyCallable.getMultipartUploadId(), s3, origReq, futures, listener, this))); } else { copyComplete(); } return result; } catch (CancellationException e) { transfer.setState(TransferState.Canceled); publishProgress(listener, ProgressEventType.TRANSFER_CANCELED_EVENT); throw new SdkClientException("Upload canceled"); } catch (Exception e) { transfer.setState(TransferState.Failed); publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw e; } }
public UploadResult call() throws Exception { upload.setState(TransferState.InProgress); if ( isMultipartUpload() ) { publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT); return uploadInParts(); } else { return uploadInOneChunk(); } }
public CopyResult call() throws Exception { copy.setState(TransferState.InProgress); if (isMultipartCopy()) { publishProgress(listenerChain, ProgressEventType.TRANSFER_STARTED_EVENT); copyInParts(); return null; } else { return copyInOneChunk(); } }
@Override public UploadResult call() throws Exception { try { UploadResult result = multipartUploadCallable.call(); /** * If the result is null, it is a mutli part parellel upload. So, an * new task is submitted for initiating a complete multi part upload * request. */ if (result == null) { futures.addAll(multipartUploadCallable.getFutures()); futureReference.set(threadPool.submit(new CompleteMultipartUpload( multipartUploadCallable.getMultipartUploadId(), s3, origReq, futures, multipartUploadCallable .getETags(), listener, this))); } else { uploadComplete(); } return result; } catch (CancellationException e) { transfer.setState(TransferState.Canceled); publishProgress(listener, ProgressEventType.TRANSFER_CANCELED_EVENT); throw new SdkClientException("Upload canceled"); } catch (Exception e) { transfer.setState(TransferState.Failed); throw e; } }
publishProgress(listener, ProgressEventType.HTTP_RESPONSE_STARTED_EVENT); try { awsResponse = responseHandler awsRequestMetrics.endEvent(Field.ResponseProcessingTime); publishProgress(listener, ProgressEventType.HTTP_RESPONSE_COMPLETED_EVENT);
/** * Performs the copy of an Amazon S3 object from source bucket to * destination bucket as multiple copy part requests. The information about * the part to be copied is specified in the request as a byte range * (first-last) * * @throws Exception * Any Exception that occurs while carrying out the request. */ private void copyInParts() throws Exception { multipartUploadId = initiateMultipartUpload(copyObjectRequest); long optimalPartSize = getOptimalPartSize(metadata.getContentLength()); try { CopyPartRequestFactory requestFactory = new CopyPartRequestFactory( copyObjectRequest, multipartUploadId, optimalPartSize, metadata.getContentLength()); copyPartsInParallel(requestFactory); } catch (Exception e) { publishProgress(listenerChain, ProgressEventType.TRANSFER_FAILED_EVENT); abortMultipartCopy(); throw new RuntimeException("Unable to perform multipart copy", e); } }
private UploadResult uploadInSinglePart(final String accountId, final String vaultName, final String archiveDescription, final File file, ProgressListener progressListener) { String checksum = TreeHashGenerator.calculateTreeHash(file); ResettableInputStream is = newResettableInputStream(file); try { publishProgress(progressListener, ProgressEventType.TRANSFER_STARTED_EVENT); final UploadArchiveRequest req = new UploadArchiveRequest() .withAccountId(accountId) .withArchiveDescription(archiveDescription) .withVaultName(vaultName) .withChecksum(checksum) .withBody(is) .withContentLength(file.length()) // capture the bytes transferred .withGeneralProgressListener(progressListener) ; UploadArchiveResult uploadArchiveResult = glacier.uploadArchive(req); String artifactId = uploadArchiveResult.getArchiveId(); publishProgress(progressListener, ProgressEventType.TRANSFER_COMPLETED_EVENT); return new UploadResult(artifactId); } catch (Throwable t) { publishProgress(progressListener, ProgressEventType.TRANSFER_FAILED_EVENT); throw failure(t); } finally { is.release(); } } }
publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); performAbortMultipartUpload(); throw e;
void copyComplete() { markAllDone(); transfer.setState(TransferState.Completed); // Since the copy has completed we can assume all bytes were successfully transferred // This is required since there are no progress updates available during server-side // copying of data. transfer.getProgress().updateProgress(transfer.getProgress().getTotalBytesToTransfer()); // AmazonS3Client takes care of all the events for single part uploads, // so we only need to send a completed event for multipart uploads. if (multipartCopyCallable.isMultipartCopy()) { publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT); } }
request.setContent(notCloseable); try { publishProgress(listener, ProgressEventType.CLIENT_REQUEST_STARTED_EVENT); response = executeHelper(); publishProgress(listener, ProgressEventType.CLIENT_REQUEST_SUCCESS_EVENT); awsRequestMetrics.endEvent(AwsClientSideMonitoringMetrics.ApiCallLatency); awsRequestMetrics.getTimingInfo().endTiming(); return response; } catch (AmazonClientException e) { publishProgress(listener, ProgressEventType.CLIENT_REQUEST_FAILED_EVENT);
String partSizeString = Long.toString(partSize); publishProgress(progressListener, ProgressEventType.TRANSFER_PREPARING_EVENT); String uploadId = null; try { uploadId = initiateResult.getUploadId(); } catch (Throwable t) { publishProgress(progressListener, ProgressEventType.TRANSFER_FAILED_EVENT); throw failure(t); publishProgress(progressListener, ProgressEventType.TRANSFER_STARTED_EVENT); final String fileNotFoundMsg = "Unable to find file '" + file.getAbsolutePath() + "'"; publishProgress(progressListener, ProgressEventType.TRANSFER_COMPLETED_EVENT); return new UploadResult(artifactId); } catch (Throwable t) { publishProgress(progressListener, ProgressEventType.TRANSFER_FAILED_EVENT); glacier.abortMultipartUpload(new AbortMultipartUploadRequest(accountId, vaultName, uploadId)); throw failure(t, "Unable to finish the upload");
publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT); UploadPartResult result = new UploadPartResult(); result.setETag(etag); return result; } catch (Throwable t) { publishProgress(listener, ProgressEventType.TRANSFER_PART_FAILED_EVENT); publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT); throw failure(t);
JobStatusMonitor jobStatusMonitor = null; String jobId = null; publishProgress(progressListener, ProgressEventType.TRANSFER_PREPARING_EVENT); publishProgress(progressListener, ProgressEventType.TRANSFER_FAILED_EVENT); throw failure(t); } finally {
@Override public UploadResult call() throws Exception { CompleteMultipartUploadResult res; try { CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest( origReq.getBucketName(), origReq.getKey(), uploadId, collectPartETags()) .withRequesterPays(origReq.isRequesterPays()) .withGeneralProgressListener(origReq.getGeneralProgressListener()) .withRequestMetricCollector(origReq.getRequestMetricCollector()) ; res = s3.completeMultipartUpload(req); } catch (Exception e) { monitor.uploadFailure(); publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw e; } UploadResult uploadResult = new UploadResult(); uploadResult.setBucketName(origReq .getBucketName()); uploadResult.setKey(origReq.getKey()); uploadResult.setETag(res.getETag()); uploadResult.setVersionId(res.getVersionId()); monitor.uploadComplete(); return uploadResult; }
@Override public CopyResult call() throws Exception { CompleteMultipartUploadResult res; try { CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest( origReq.getDestinationBucketName(), origReq.getDestinationKey(), uploadId, collectPartETags()) .withRequesterPays(origReq.isRequesterPays()) .withGeneralProgressListener(origReq.getGeneralProgressListener()) .withRequestMetricCollector(origReq.getRequestMetricCollector()) ; res = s3.completeMultipartUpload(req); } catch (Exception e) { monitor.reportFailure(); publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw e; } CopyResult copyResult = new CopyResult(); copyResult.setSourceBucketName(origReq.getSourceBucketName()); copyResult.setSourceKey(origReq.getSourceKey()); copyResult.setDestinationBucketName(res .getBucketName()); copyResult.setDestinationKey(res.getKey()); copyResult.setETag(res.getETag()); copyResult.setVersionId(res.getVersionId()); monitor.copyComplete(); return copyResult; }