public PrestoS3OutputStream( AmazonS3 s3, String host, String key, File tempFile, boolean sseEnabled, PrestoS3SseType sseType, String sseKmsKeyId, long multiPartUploadMinFileSize, long multiPartUploadMinPartSize, PrestoS3AclType aclType) throws IOException { super(new BufferedOutputStream(new FileOutputStream(requireNonNull(tempFile, "tempFile is null")))); transferManager = TransferManagerBuilder.standard() .withS3Client(requireNonNull(s3, "s3 is null")) .withMinimumUploadPartSize(multiPartUploadMinPartSize) .withMultipartUploadThreshold(multiPartUploadMinFileSize).build(); requireNonNull(aclType, "aclType is null"); this.aclType = aclType.getCannedACL(); this.host = requireNonNull(host, "host is null"); this.key = requireNonNull(key, "key is null"); this.tempFile = tempFile; this.sseEnabled = sseEnabled; this.sseType = requireNonNull(sseType, "sseType is null"); this.sseKmsKeyId = sseKmsKeyId; log.debug("OutputStream for key '%s' using file: %s", key, tempFile); }
public PrestoS3OutputStream( AmazonS3 s3, String host, String key, File tempFile, boolean sseEnabled, PrestoS3SseType sseType, String sseKmsKeyId, long multiPartUploadMinFileSize, long multiPartUploadMinPartSize, PrestoS3AclType aclType) throws IOException { super(new BufferedOutputStream(new FileOutputStream(requireNonNull(tempFile, "tempFile is null")))); transferManager = TransferManagerBuilder.standard() .withS3Client(requireNonNull(s3, "s3 is null")) .withMinimumUploadPartSize(multiPartUploadMinPartSize) .withMultipartUploadThreshold(multiPartUploadMinFileSize).build(); requireNonNull(aclType, "aclType is null"); this.aclType = aclType.getCannedACL(); this.host = requireNonNull(host, "host is null"); this.key = requireNonNull(key, "key is null"); this.tempFile = tempFile; this.sseEnabled = sseEnabled; this.sseType = requireNonNull(sseType, "sseType is null"); this.sseKmsKeyId = sseKmsKeyId; log.debug("OutputStream for key '%s' using file: %s", key, tempFile); }
/** * Implementation of the Mapper::setup() method. This extracts the S3MapReduceCp options specified in the Job's * configuration, to set up the Job. * * @param context Mapper's context. * @throws IOException On IO failure. * @throws InterruptedException If the job is interrupted. */ @Override public void setup(Context context) throws IOException, InterruptedException { conf = new S3MapReduceCpConfiguration(context.getConfiguration()); ignoreFailures = conf.getBoolean(ConfigurationVariable.IGNORE_FAILURES); targetFinalPath = new Path(conf.get(S3MapReduceCpConstants.CONF_LABEL_TARGET_FINAL_PATH)); AwsS3ClientFactory awsS3ClientFactory = new AwsS3ClientFactory(); transferManager = TransferManagerBuilder .standard() .withMinimumUploadPartSize(conf.getLong(ConfigurationVariable.MINIMUM_UPLOAD_PART_SIZE)) .withMultipartUploadThreshold(conf.getLong(ConfigurationVariable.MULTIPART_UPLOAD_THRESHOLD)) .withS3Client(awsS3ClientFactory.newInstance(conf)) .withShutDownThreadPools(true) .withExecutorFactory(new ExecutorFactory() { @Override public ExecutorService newExecutor() { return Executors.newFixedThreadPool(conf.getInt(ConfigurationVariable.NUMBER_OF_UPLOAD_WORKERS)); } }) .build(); }
/** * Implementation of the Mapper::setup() method. This extracts the S3MapReduceCp options specified in the Job's * configuration, to set up the Job. * * @param context Mapper's context. * @throws IOException On IO failure. * @throws InterruptedException If the job is interrupted. */ @Override public void setup(Context context) throws IOException, InterruptedException { conf = new S3MapReduceCpConfiguration(context.getConfiguration()); ignoreFailures = conf.getBoolean(ConfigurationVariable.IGNORE_FAILURES); targetFinalPath = new Path(conf.get(S3MapReduceCpConstants.CONF_LABEL_TARGET_FINAL_PATH)); AwsS3ClientFactory awsS3ClientFactory = new AwsS3ClientFactory(); transferManager = TransferManagerBuilder .standard() .withMinimumUploadPartSize(conf.getLong(ConfigurationVariable.MINIMUM_UPLOAD_PART_SIZE)) .withMultipartUploadThreshold(conf.getLong(ConfigurationVariable.MULTIPART_UPLOAD_THRESHOLD)) .withS3Client(awsS3ClientFactory.newInstance(conf)) .withShutDownThreadPools(true) .withExecutorFactory(new ExecutorFactory() { @Override public ExecutorService newExecutor() { return Executors.newFixedThreadPool(conf.getInt(ConfigurationVariable.NUMBER_OF_UPLOAD_WORKERS)); } }) .build(); }