public UploadResult get() throws Exception { return mUpload.waitForUploadResult(); } }
mManager.upload(putReq).waitForUploadResult(); if (!mFile.delete()) { LOG.error("Failed to delete temporary file @ {}", mFile.getPath());
upload.waitForUploadResult(); logger.info("Sync Complete For " + path + " to bucket: " + existingBucketName + " with AWS Access Id: " + accessKeyId); new File(path);
@Override public String call() throws Exception { return upload.waitForUploadResult().getETag(); } }, OPERATION_MPU);
@Override public void addMetadataRecord(File input, String name) throws DataStoreException { checkArgument(input != null, "input should not be null"); checkArgument(!Strings.isNullOrEmpty(name), "name should not be empty"); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); Upload upload = tmx.upload(s3ReqDecorator .decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input))); upload.waitForUploadResult(); } catch (InterruptedException e) { LOG.error("Exception in uploading metadata file {}", new Object[] {input, e}); throw new DataStoreException("Error in uploading metadata file", e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
@Override public void addMetadataRecord(File input, String name) throws DataStoreException { checkArgument(input != null, "input should not be null"); checkArgument(!Strings.isNullOrEmpty(name), "name should not be empty"); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); Upload upload = tmx.upload(s3ReqDecorator .decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input))); upload.waitForUploadResult(); } catch (InterruptedException e) { LOG.error("Exception in uploading metadata file {}", new Object[] {input, e}); throw new DataStoreException("Error in uploading metadata file", e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
@Override public void addMetadataRecord(final InputStream input, final String name) throws DataStoreException { checkArgument(input != null, "input should not be null"); checkArgument(!Strings.isNullOrEmpty(name), "name should not be empty"); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); Upload upload = tmx.upload(s3ReqDecorator .decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input, new ObjectMetadata()))); upload.waitForUploadResult(); } catch (InterruptedException e) { LOG.error("Error in uploading", e); throw new DataStoreException("Error in uploading", e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
@Override public void addMetadataRecord(final InputStream input, final String name) throws DataStoreException { checkArgument(input != null, "input should not be null"); checkArgument(!Strings.isNullOrEmpty(name), "name should not be empty"); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); Upload upload = tmx.upload(s3ReqDecorator .decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input, new ObjectMetadata()))); upload.waitForUploadResult(); } catch (InterruptedException e) { LOG.error("Error in uploading", e); throw new DataStoreException("Error in uploading", e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
private void addFile() throws Exception { InputStream is = new ByteArrayInputStream(this.outputStream.toByteArray()); int contentLength = outputStream.size(); TransferManager transferManager = new TransferManager(amazonS3Client); ObjectMetadata metadata = new ObjectMetadata(); metadata.setExpirationTime(DateTime.now().plusDays(365 * 3).toDate()); metadata.setContentLength(contentLength); metadata.addUserMetadata("writer", "org.apache.streams"); for (String s : metaData.keySet()) { metadata.addUserMetadata(s, metaData.get(s)); } String fileNameToWrite = path + fileName; Upload upload = transferManager.upload(bucketName, fileNameToWrite, is, metadata); try { upload.waitForUploadResult(); is.close(); transferManager.shutdownNow(false); LOGGER.info("S3 File Close[{} kb] - {}", contentLength / 1024, path + fileName); } catch (Exception ignored) { LOGGER.trace("Ignoring", ignored); } }
@Override public void close() throws IOException { if (closed.getAndSet(true)) { return; } mBackupOutputStream.close(); LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey); try { final ObjectMetadata om = new ObjectMetadata(); om.setContentLength(mBackupFile.length()); om.setContentType(mContentType); om.setUserMetadata(mMetadata); PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile); putObjectRequest.setMetadata(om); Upload upload = transfers.upload(putObjectRequest); upload.waitForUploadResult(); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException(e.toString()) .initCause(e); } catch (AmazonClientException e) { throw new IOException(String.format("saving output %s %s", mKey, e)); } finally { if (!mBackupFile.delete()) { LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream); } super.close(); } LOG.debug("OutputStream for key '{}' upload complete", mKey); }
@Override public void close() throws IOException { if (closed.getAndSet(true)) { return; } mBackupOutputStream.close(); LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey); try { final ObjectMetadata om = new ObjectMetadata(); om.setContentLength(mBackupFile.length()); om.setContentType(mContentType); om.setUserMetadata(mMetadata); PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile); putObjectRequest.setMetadata(om); Upload upload = transfers.upload(putObjectRequest); upload.waitForUploadResult(); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException(e.toString()) .initCause(e); } catch (AmazonClientException e) { throw new IOException(String.format("saving output %s %s", mKey, e)); } finally { if (!mBackupFile.delete()) { LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream); } super.close(); } LOG.debug("OutputStream for key '{}' upload complete", mKey); }
@Override public void put(String uri, File f) throws AmazonClientException { LOG.trace("Uploading " + uri); String[] parts = pieces(uri); ObjectMetadata om = new ObjectMetadata(); om.setContentLength(f.length()); if (f.getName().endsWith("gzip")) { om.setContentEncoding("gzip"); } uploadsInProgress.incrementAndGet(); try { PutObjectRequest req = new PutObjectRequest(parts[0],parts[1],f); req.setMetadata(om); UploadResult resp = svc.upload(req).waitForUploadResult(); LOG.trace("Uploaded " + uri + " with ETag " + resp.getETag()); } catch (InterruptedException ie) { LOG.error("Interrupted while uploading {} to {}.", f.getPath(), uri); throw Throwables.propagate(ie); } finally { uploadsInProgress.decrementAndGet(); } }
upload.addProgressListener(listener); upload.waitForUploadResult();
bucket, key, file))); up.waitForUploadResult(); LOG.debug("synchronous upload to identifier [{}] completed.", identifier); } catch (Exception e2 ) {
/** * Wait for an upload to complete. * If the waiting for completion is interrupted, the upload will be * aborted before an {@code InterruptedIOException} is thrown. * @param upload upload to wait for * @param key destination key * @return the upload result * @throws InterruptedIOException if the blocking was interrupted. */ UploadResult waitForUploadCompletion(String key, UploadInfo uploadInfo) throws InterruptedIOException { Upload upload = uploadInfo.getUpload(); try { UploadResult result = upload.waitForUploadResult(); incrementPutCompletedStatistics(true, uploadInfo.getLength()); return result; } catch (InterruptedException e) { LOG.info("Interrupted: aborting upload"); incrementPutCompletedStatistics(false, uploadInfo.getLength()); upload.abort(); throw (InterruptedIOException) new InterruptedIOException("Interrupted in PUT to " + keyToQualifiedPath(key)) .initCause(e); } }
bucket, key, file))); up.waitForUploadResult(); LOG.debug("synchronous upload to identifier [{}] completed.", identifier); } catch (Exception e2 ) {
upload.addProgressListener(listener); upload.waitForUploadResult();
PutObjectRequest putObjectRequest = new PutObjectRequest(mBucket, objName, im, om); Upload upload = transfers.upload(putObjectRequest); upload.waitForUploadResult(); OutputStream fakeStream = new OutputStream() {
try (InputStream in = payload.open()) { PutObjectRequest req = new PutObjectRequest(bucket, key, in, meta); UploadResult result = transferManager.upload(req).waitForUploadResult();
up.addProgressListener(progressListener); try { up.waitForUploadResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) {