Refine search
String bucket, String key, boolean isAdditionalHeadRequestToFindRegion) { AmazonWebServiceRequest originalRequest = request.getOriginalRequest(); checkHttps(originalRequest); S3SignerProvider signerProvider = new S3SignerProvider(this, getSigner()); ExecutionContext executionContext = createExecutionContext(originalRequest, signerProvider); AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); request.setAWSRequestMetrics(awsRequestMetrics); Response<X> response = null; try { request.setTimeOffset(timeOffset); if (ase.getStatusCode() == 301) { if (ase.getAdditionalDetails() != null) { String region = ase.getAdditionalDetails().get(Headers.S3_BUCKET_REGION); bucketRegionCache.put(bucket, region); ase.setErrorMessage("The bucket is in this region: " + region + ". Please use this region to retry the request");
if (404 == e.getStatusCode() || "NoSuchKey".equals(e.getErrorCode()) || "NoSuchBucket".equals(e.getErrorCode())) { return Optional.absent(); } else {
Request<CopyPartRequest> request = createRequest(destinationBucketName, destinationKey, copyPartRequest, HttpMethodName.PUT); request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UploadPartCopy"); populateRequestWithCopyPartParameters(request, copyPartRequest); request.addParameter("uploadId", copyPartRequest.getUploadId()); request.addParameter("partNumber", Integer.toString(copyPartRequest.getPartNumber())); populateRequesterPaysHeader(request, copyPartRequest.isRequesterPays()); if ( ase.getStatusCode() == Constants.FAILED_PRECONDITION_STATUS_CODE ) { return null; String hostId = copyObjectResultHandler.getErrorHostId(); AmazonS3Exception ase = new AmazonS3Exception(errorMessage); ase.setErrorCode(errorCode); ase.setErrorType(ErrorType.Service); ase.setRequestId(requestId); ase.setExtendedRequestId(hostId); ase.setServiceName(request.getServiceName()); ase.setStatusCode(200);
@Override public String getMessage() { return getErrorMessage() + " (Service: " + getServiceName() + "; Status Code: " + getStatusCode() + "; Error Code: " + getErrorCode() + "; Request ID: " + getRequestId() + "; S3 Extended Request ID: " + getExtendedRequestId() + ")"; }
@Override public boolean test(AmazonS3Exception exception) { if (exception == null || exception.getErrorCode() == null || exception.getErrorMessage() == null) { return false; } return exception.getErrorCode().contains(ERROR_CODE) && exception.getErrorMessage().contains(RETYABLE_ERROR_MESSAGE); } }
/** * Creates a new AmazonS3Exception object with the values set. */ public AmazonS3Exception build() { AmazonS3Exception s3Exception = errorResponseXml == null ? new AmazonS3Exception( errorMessage) : new AmazonS3Exception(errorMessage, errorResponseXml); s3Exception.setErrorCode(errorCode); s3Exception.setExtendedRequestId(extendedRequestId); s3Exception.setStatusCode(statusCode); s3Exception.setRequestId(requestId); s3Exception.setCloudFrontId(cloudFrontId); s3Exception.setAdditionalDetails(additionalDetails); s3Exception.setErrorType(errorTypeOf(statusCode)); return s3Exception; }
private <X, Y extends AmazonWebServiceRequest> X invoke(Request<Y> request, HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler, String bucket, String key) { final AmazonWebServiceRequest originalRequest = request.getOriginalRequest(); final ExecutionContext executionContext = createExecutionContext(originalRequest); final AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); request.setAWSRequestMetrics(awsRequestMetrics); Response<X> response = null; try { request.setTimeOffset(timeOffset); && !(request.getOriginalRequest() instanceof CreateBucketRequest) && noExplicitRegionProvided(request)) { fetchRegionFromCache(bucket); if (ase.getStatusCode() == 301) { if (ase.getAdditionalDetails() != null) { final String region = ase.getAdditionalDetails().get(Headers.S3_BUCKET_REGION); bucketRegionCache.put(bucket, region); ase.setErrorMessage("The bucket is in this region: " + region + ". Please use this region to retry the request");
@Override public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest) throws SdkClientException, AmazonServiceException { copyObjectRequest = beforeClientExecution(copyObjectRequest); rejectNull(copyObjectRequest.getSourceBucketName(), "The source bucket name must be specified when copying an object"); rejectNull(copyObjectRequest.getSourceKey(), "The source object key must be specified when copying an object"); rejectNull(copyObjectRequest.getDestinationBucketName(), Request<CopyObjectRequest> request = createRequest(destinationBucketName, destinationKey, copyObjectRequest, HttpMethodName.PUT); request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CopyObject"); populateRequestWithCopyObjectParameters(request, copyObjectRequest); if (ase.getStatusCode() == Constants.FAILED_PRECONDITION_STATUS_CODE) { return null; String hostId = copyObjectResultHandler.getErrorHostId(); AmazonS3Exception ase = new AmazonS3Exception(errorMessage); ase.setErrorCode(errorCode); ase.setErrorType(ErrorType.Service); ase.setRequestId(requestId); ase.setExtendedRequestId(hostId); ase.setServiceName(request.getServiceName()); ase.setStatusCode(200);
@Override public S3Object getObject(GetObjectRequest getObjectRequest) throws SdkClientException, AmazonServiceException { getObjectRequest = beforeClientExecution(getObjectRequest); assertNotNull(getObjectRequest, "GetObjectRequest"); assertStringNotEmpty(getObjectRequest.getBucketName(), "BucketName"); assertStringNotEmpty(getObjectRequest.getKey(), "Key"); Request<GetObjectRequest> request = createRequest(getObjectRequest.getBucketName(), getObjectRequest.getKey(), getObjectRequest, HttpMethodName.GET); request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "GetObject"); request.addParameter("versionId", getObjectRequest.getVersionId()); addPartNumberIfNotNull(request, getObjectRequest.getPartNumber()); request.addHeader(Headers.RANGE, "bytes=" + Long.toString(range[0]) + "-" + Long.toString(range[1])); if (ase.getStatusCode() == 412 || ase.getStatusCode() == 304) { publishProgress(listener, ProgressEventType.TRANSFER_CANCELED_EVENT); return null;
/** * Retrieves the region of the bucket by making a HeadBucket request to us-west-1 region. * * Currently S3 doesn't return region in a HEAD Bucket request if the bucket * owner has enabled bucket to accept only SigV4 requests via bucket * policies. */ private String getBucketRegionViaHeadRequest(String bucketName) { String bucketRegion = null; try { Request<HeadBucketRequest> request = createRequest(bucketName, null, new HeadBucketRequest(bucketName), HttpMethodName.HEAD); request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "HeadBucket"); HeadBucketResult result = invoke(request, new HeadBucketResultHandler(), bucketName, null, true); bucketRegion = result.getBucketRegion(); } catch (AmazonS3Exception exception) { if (exception.getAdditionalDetails() != null) { bucketRegion = exception.getAdditionalDetails().get( Headers.S3_BUCKET_REGION); } } if (bucketRegion == null && log.isDebugEnabled()) { log.debug("Not able to derive region of the " + bucketName + " from the HEAD Bucket requests."); } return bucketRegion; }
"The key parameter must be specified when requesting an object"); final Request<GetObjectRequest> request = createRequest(getObjectRequest.getBucketName(), getObjectRequest.getKey(), getObjectRequest, HttpMethodName.GET); request.addParameter("versionId", getObjectRequest.getVersionId()); request.addHeader(Headers.RANGE, rangeHeader); populateRequesterPaysHeader(request, getObjectRequest.isRequesterPays()); addResponseHeaderParameters(request, getObjectRequest.getResponseHeaders()); addDateHeader(request, Headers.GET_OBJECT_IF_MODIFIED_SINCE, if (ase.getStatusCode() == 412 || ase.getStatusCode() == 304) { fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.CANCELED_EVENT_CODE);
import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.AmazonS3Exception; import org.apache.http.HttpStatus; try { AmazonS3 s3 = new AmazonS3Client(); S3Object object = s3.getObjectMetadata("my-bucket", "my-client"); } catch (AmazonS3Exception e) { if (e.getStatusCode() == HttpStatus.SC_NOT_FOUND) { // bucket/key does not exist } else { throw e; } }
@Override public boolean doesObjectExist(String bucketName, String objectName) throws AmazonServiceException, SdkClientException { try { getObjectMetadata(bucketName, objectName); return true; } catch (AmazonS3Exception e) { if (e.getStatusCode() == 404) { return false; } throw e; } }
@Nullable public S3Object getObject(String key) throws StorageException { final S3Object object; try { object = conn.getObject(bucketName, key); } catch (AmazonS3Exception e) { if (404 == e.getStatusCode()) { // 404 == not found return null; } throw new StorageException("Error fetching " + key + ": " + e.getMessage(), e); } if (isPendingDelete(object)) { closeObject(object); return null; } return object; }
private boolean isNotFoundError(AmazonS3Exception e) { return (e.getStatusCode() == 404) || (e.getStatusCode() == 403); } }
@Test public void testHandleErrorResponse() throws IOException { String response = "<Error>" + "<Message>testError</Message>" + "<Code>testCode</Code>" + "<RequestId>testId</RequestId>" + "<HostId>testHost</HostId>" + "</Error>"; ByteArrayInputStream content = new ByteArrayInputStream(response.getBytes(StringUtils.UTF8)); HttpResponse errorResponse = new HttpResponse.Builder() .statusCode(200) .content(content) .statusText("testError").build(); S3ErrorResponseHandler handler = new S3ErrorResponseHandler(); AmazonS3Exception ase = (AmazonS3Exception) handler.handle(errorResponse); assertEquals(ase.getErrorMessage(), "testError"); assertEquals(ase.getErrorType(), ErrorType.Client); assertEquals(ase.getRequestId(), "testId"); assertEquals(ase.getExtendedRequestId(), "testHost"); assertEquals(ase.getStatusCode(), 200); assertEquals(ase.getErrorCode(), "testCode"); }
@Nullable public ObjectMetadata getObjectMetadata(String key) throws StorageException { ObjectMetadata obj = null; try { obj = conn.getObjectMetadata(bucketName, key); } catch (AmazonS3Exception e) { if (404 != e.getStatusCode()) { // 404 == not found throw new StorageException( "Error checking existence of " + key + ": " + e.getMessage(), e); } } return obj; }
@Override public S3Object getObject(GetObjectRequest getObjectRequest) { if (getObjectHttpCode != SC_OK) { AmazonS3Exception exception = new AmazonS3Exception("Failing getObject call with " + getObjectHttpCode); exception.setStatusCode(getObjectHttpCode); throw exception; } return null; }
class CloudFrontRetryCondition implements RetryCondition { @Override public boolean shouldRetry(AmazonWebServiceRequest originalRequest, AmazonClientException exception, int retriesAttempted) { if(exception instanceof AmazonS3Exception) { final AmazonS3Exception s3Exception = (AmazonS3Exception) exception; return s3Exception.getStatusCode() == 400 && s3Exception.getErrorCode().equals("MalformedPolicy") && s3Exception.getErrorMessage().equals("Invalid principal in policy") && s3Exception.getAdditionalDetails().get("Detail").contains("arn:aws:iam::cloudfront:user/CloudFront Origin Access Identity"); } else { return false; } } }
MultipartUploadListing listing = s3.listMultipartUploads(listRequest); for (MultipartUpload upload : listing.getMultipartUploads()) { long uploadTime = upload.getInitiated().getTime(); } catch(AmazonClientException e) { if (e instanceof AmazonS3Exception && ((AmazonS3Exception)e).getStatusCode() == 403 && ((AmazonS3Exception) e).getErrorCode().equals("AccessDenied")) { getLogger().warn("AccessDenied checking S3 Multipart Upload list for {}: {} " + "** The configured user does not have the s3:ListBucketMultipartUploads permission " +