public PrestoS3OutputStream( AmazonS3 s3, String host, String key, File tempFile, boolean sseEnabled, PrestoS3SseType sseType, String sseKmsKeyId, long multiPartUploadMinFileSize, long multiPartUploadMinPartSize, PrestoS3AclType aclType) throws IOException { super(new BufferedOutputStream(new FileOutputStream(requireNonNull(tempFile, "tempFile is null")))); transferManager = TransferManagerBuilder.standard() .withS3Client(requireNonNull(s3, "s3 is null")) .withMinimumUploadPartSize(multiPartUploadMinPartSize) .withMultipartUploadThreshold(multiPartUploadMinFileSize).build(); requireNonNull(aclType, "aclType is null"); this.aclType = aclType.getCannedACL(); this.host = requireNonNull(host, "host is null"); this.key = requireNonNull(key, "key is null"); this.tempFile = tempFile; this.sseEnabled = sseEnabled; this.sseType = requireNonNull(sseType, "sseType is null"); this.sseKmsKeyId = sseKmsKeyId; log.debug("OutputStream for key '%s' using file: %s", key, tempFile); }
public LogsS3Uploader(AmazonS3 s3Client, String s3Bucket) { this.s3Bucket = s3Bucket; this.transferManager = TransferManagerBuilder.standard().withS3Client(s3Client).build(); }
/** * Implementation of the Mapper::setup() method. This extracts the S3MapReduceCp options specified in the Job's * configuration, to set up the Job. * * @param context Mapper's context. * @throws IOException On IO failure. * @throws InterruptedException If the job is interrupted. */ @Override public void setup(Context context) throws IOException, InterruptedException { conf = new S3MapReduceCpConfiguration(context.getConfiguration()); ignoreFailures = conf.getBoolean(ConfigurationVariable.IGNORE_FAILURES); targetFinalPath = new Path(conf.get(S3MapReduceCpConstants.CONF_LABEL_TARGET_FINAL_PATH)); AwsS3ClientFactory awsS3ClientFactory = new AwsS3ClientFactory(); transferManager = TransferManagerBuilder .standard() .withMinimumUploadPartSize(conf.getLong(ConfigurationVariable.MINIMUM_UPLOAD_PART_SIZE)) .withMultipartUploadThreshold(conf.getLong(ConfigurationVariable.MULTIPART_UPLOAD_THRESHOLD)) .withS3Client(awsS3ClientFactory.newInstance(conf)) .withShutDownThreadPools(true) .withExecutorFactory(new ExecutorFactory() { @Override public ExecutorService newExecutor() { return Executors.newFixedThreadPool(conf.getInt(ConfigurationVariable.NUMBER_OF_UPLOAD_WORKERS)); } }) .build(); }
numTransferThreads).create(); TransferManager transferManager = TransferManagerBuilder.standard() .withS3Client(amazonS3Client).withExecutorFactory(() -> service) .withMultipartCopyThreshold(MULTIPART_COPY_THRESHOLD) .build();
public TransferManager newInstance(AmazonS3 targetS3Client, S3S3CopierOptions s3s3CopierOptions) { return TransferManagerBuilder .standard() .withMultipartCopyThreshold(s3s3CopierOptions.getMultipartCopyThreshold()) .withMultipartCopyPartSize(s3s3CopierOptions.getMultipartCopyPartSize()) .withS3Client(targetS3Client) .build(); }
public S3MessageHandler(AmazonS3 amazonS3, String bucket, boolean produceReply) { this(TransferManagerBuilder.standard() .withS3Client(amazonS3) .build(), bucket, produceReply); Assert.notNull(amazonS3, "'amazonS3' must not be null"); }
/** * Implementation of the Mapper::setup() method. This extracts the S3MapReduceCp options specified in the Job's * configuration, to set up the Job. * * @param context Mapper's context. * @throws IOException On IO failure. * @throws InterruptedException If the job is interrupted. */ @Override public void setup(Context context) throws IOException, InterruptedException { conf = new S3MapReduceCpConfiguration(context.getConfiguration()); ignoreFailures = conf.getBoolean(ConfigurationVariable.IGNORE_FAILURES); targetFinalPath = new Path(conf.get(S3MapReduceCpConstants.CONF_LABEL_TARGET_FINAL_PATH)); AwsS3ClientFactory awsS3ClientFactory = new AwsS3ClientFactory(); transferManager = TransferManagerBuilder .standard() .withMinimumUploadPartSize(conf.getLong(ConfigurationVariable.MINIMUM_UPLOAD_PART_SIZE)) .withMultipartUploadThreshold(conf.getLong(ConfigurationVariable.MULTIPART_UPLOAD_THRESHOLD)) .withS3Client(awsS3ClientFactory.newInstance(conf)) .withShutDownThreadPools(true) .withExecutorFactory(new ExecutorFactory() { @Override public ExecutorService newExecutor() { return Executors.newFixedThreadPool(conf.getInt(ConfigurationVariable.NUMBER_OF_UPLOAD_WORKERS)); } }) .build(); }
public TransferManager newInstance(AmazonS3 targetS3Client, S3S3CopierOptions s3s3CopierOptions) { return TransferManagerBuilder .standard() .withMultipartCopyThreshold(s3s3CopierOptions.getMultipartCopyThreshold()) .withMultipartCopyPartSize(s3s3CopierOptions.getMultipartCopyPartSize()) .withS3Client(targetS3Client) .build(); }
public S3MessageHandler(AmazonS3 amazonS3, Expression bucketExpression, boolean produceReply) { this(TransferManagerBuilder.standard() .withS3Client(amazonS3) .build(), bucketExpression, produceReply); Assert.notNull(amazonS3, "'amazonS3' must not be null"); }
public static void copyObjectSimple(String from_bucket, String from_key, String to_bucket, String to_key) { System.out.println("Copying s3 object: " + from_key); System.out.println(" from bucket: " + from_bucket); System.out.println(" to s3 object: " + to_key); System.out.println(" in bucket: " + to_bucket); TransferManager xfer_mgr = TransferManagerBuilder.standard().build(); try { Copy xfer = xfer_mgr.copy(from_bucket, from_key, to_bucket, to_key); // loop with Transfer.isDone() XferMgrProgress.showTransferProgress(xfer); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(xfer); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }
public PrestoS3OutputStream( AmazonS3 s3, String host, String key, File tempFile, boolean sseEnabled, PrestoS3SseType sseType, String sseKmsKeyId, long multiPartUploadMinFileSize, long multiPartUploadMinPartSize, PrestoS3AclType aclType) throws IOException { super(new BufferedOutputStream(new FileOutputStream(requireNonNull(tempFile, "tempFile is null")))); transferManager = TransferManagerBuilder.standard() .withS3Client(requireNonNull(s3, "s3 is null")) .withMinimumUploadPartSize(multiPartUploadMinPartSize) .withMultipartUploadThreshold(multiPartUploadMinFileSize).build(); requireNonNull(aclType, "aclType is null"); this.aclType = aclType.getCannedACL(); this.host = requireNonNull(host, "host is null"); this.key = requireNonNull(key, "key is null"); this.tempFile = tempFile; this.sseEnabled = sseEnabled; this.sseType = requireNonNull(sseType, "sseType is null"); this.sseKmsKeyId = sseKmsKeyId; log.debug("OutputStream for key '%s' using file: %s", key, tempFile); }
public S3MessageHandler(AmazonS3 amazonS3, Expression bucketExpression, boolean produceReply) { this(TransferManagerBuilder.standard() .withS3Client(amazonS3) .build(), bucketExpression, produceReply); Assert.notNull(amazonS3, "'amazonS3' must not be null"); }
public static void uploadFileList(String[] file_paths, String bucket_name, String key_prefix, boolean pause) { System.out.println("file list: " + Arrays.toString(file_paths) + (pause ? " (pause)" : "")); // convert the file paths to a list of File objects (required by the // uploadFileList method) ArrayList<File> files = new ArrayList<File>(); for (String path : file_paths) { files.add(new File(path)); } TransferManager xfer_mgr = TransferManagerBuilder.standard().build(); try { MultipleFileUpload xfer = xfer_mgr.uploadFileList(bucket_name, key_prefix, new File("."), files); // loop with Transfer.isDone() XferMgrProgress.showTransferProgress(xfer); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(xfer); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }
private TransferManager createTransferManager() { return TransferManagerBuilder.standard().withS3Client(getS3Client()).build(); } }
public static void downloadDir(String bucket_name, String key_prefix, String dir_path, boolean pause) { System.out.println("downloading to directory: " + dir_path + (pause ? " (pause)" : "")); TransferManager xfer_mgr = TransferManagerBuilder.standard().build(); try { MultipleFileDownload xfer = xfer_mgr.downloadDirectory( bucket_name, key_prefix, new File(dir_path)); // loop with Transfer.isDone() XferMgrProgress.showTransferProgress(xfer); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(xfer); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }
public S3FilePersistence (String region) { amazonS3 = AmazonS3ClientBuilder.standard() // .enableAccelerateMode() // this fails looking up s3-accelerate.amazonaws.com .withRegion(region) .build(); transferManager = TransferManagerBuilder.standard() .withS3Client(amazonS3) .build(); }
public static void downloadFile(String bucket_name, String key_name, String file_path, boolean pause) { System.out.println("Downloading to file: " + file_path + (pause ? " (pause)" : "")); File f = new File(file_path); TransferManager xfer_mgr = TransferManagerBuilder.standard().build(); try { Download xfer = xfer_mgr.download(bucket_name, key_name, f); // loop with Transfer.isDone() XferMgrProgress.showTransferProgress(xfer); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(xfer); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }
public S3FilePersistence (String region) { amazonS3 = AmazonS3ClientBuilder.standard() // .enableAccelerateMode() // this fails looking up s3-accelerate.amazonaws.com .withRegion(region) .build(); transferManager = TransferManagerBuilder.standard() .withS3Client(amazonS3) .build(); }
public static void uploadDir(String dir_path, String bucket_name, String key_prefix, boolean recursive, boolean pause) { System.out.println("directory: " + dir_path + (recursive ? " (recursive)" : "") + (pause ? " (pause)" : "")); TransferManager xfer_mgr = TransferManagerBuilder.standard().build(); try { MultipleFileUpload xfer = xfer_mgr.uploadDirectory(bucket_name, key_prefix, new File(dir_path), recursive); // loop with Transfer.isDone() XferMgrProgress.showTransferProgress(xfer); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(xfer); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }