public ObjectId store(InputStream content, @Nullable String filename, @Nullable String contentType, @Nullable Document metadata) { Assert.notNull(content, "InputStream must not be null!"); return getGridFs().uploadFromStream(filename, content, computeUploadOptionsFor(contentType, metadata)); }
public ObjectId store(InputStream content, @Nullable String filename, @Nullable String contentType, @Nullable Document metadata) { Assert.notNull(content, "InputStream must not be null!"); GridFSUploadOptions options = new GridFSUploadOptions(); Document mData = new Document(); if (StringUtils.hasText(contentType)) { mData.put(GridFsResource.CONTENT_TYPE_FIELD, contentType); } if (metadata != null) { mData.putAll(metadata); } options.metadata(mData); return getGridFs().uploadFromStream(filename, content, options); }
@Override public void createContainer(BucketDirectory bucketDirectory) { LOGGER.debug("createContainer:" + bucketDirectory); GridFSBucket bucket = GridFSBuckets.create(database, bucketDirectory.getObjectHandle().getContainer()); InputStream is = new ByteArrayInputStream(new Date().toString().getBytes()); try { ObjectId objectId = bucket.uploadFromStream(BUCKET_ID_FILENAME, is); LOGGER.debug(" container file has been created " + BUCKET_ID_FILENAME + " with mongo id " + objectId.toString()); } catch (MongoCommandException e) { if (e.getErrorMessage().contains("Too many open files")) { LOGGER.error("****************************************************"); LOGGER.error("Due to the following \"Too many open files exception\""); LOGGER.error("PLEASE READ https://jira.adorsys.de/browse/DOC-22"); LOGGER.error("****************************************************"); } throw new BaseException("exception creating container for " + bucketDirectory, e); } IOUtils.closeQuietly(is); }
/** * Persists the data from 'source' into GridFS using the 'payloadPath' * property as filename. * * @param source - the source input stream * @param metadata - the doc metadata */ public void create(InputStream source, Document metadata) throws StorageException { GridFSUploadOptions options = new GridFSUploadOptions() .metadata(metadata); mongoFileId = getBucket().uploadFromStream(payloadPath, source, options); }
protected void saveDataToGridFS(byte[] data, String fileid) { Bson fileQuery = Filters.regex("filename", getId() + ".*"); try { final ArrayList<GridFSFile> es = new ArrayList<>(); gridFS.find(fileQuery).into(es); for (GridFSFile e : es) { gridFS.delete(e.getObjectId()); } } catch (Exception e) { log.error("failed to delete old gridfsfile", e); } gridFS.uploadFromStream(getId() + fileid, new ByteArrayInputStream(data)); }
protected void saveDataToGridFS(byte[] data, String fileid) { Bson fileQuery = Filters.regex("filename", getId() + ".*"); try { final ArrayList<GridFSFile> es = new ArrayList<>(); gridFS.find(fileQuery).into(es); for (GridFSFile e : es) { gridFS.delete(e.getObjectId()); } } catch (Exception e) { log.error("failed to delete old gridfsfile", e); } gridFS.uploadFromStream(getId() + fileid, new ByteArrayInputStream(data)); }
@Override public Binary getBinary(Blob blob) throws IOException { if (!(blob instanceof FileBlob)) { return super.getBinary(blob); // just open the stream and call getBinary(InputStream) } // we already have a file so can compute the length and digest efficiently File file = blob.getFile(); String digest; try (InputStream in = new FileInputStream(file)) { digest = DigestUtils.md5Hex(in); } // if the digest is not already known then save to GridFS GridFSFile dbFile = gridFSBucket.find(Filters.eq(METADATA_PROPERTY_FILENAME, digest)).first(); if (dbFile == null) { try (InputStream in = new FileInputStream(file)) { gridFSBucket.uploadFromStream(digest, in); } } return new GridFSBinary(digest, blobProviderId); }
@Override public void storeAssociatedDocument(String uniqueId, String fileName, InputStream is, boolean compress, long timestamp, Map<String, String> metadataMap) throws Exception { GridFSBucket gridFS = createGridFSConnection(); if (compress) { is = new DeflaterInputStream(is); } deleteAssociatedDocument(uniqueId, fileName); GridFSUploadOptions gridFSUploadOptions = getGridFSUploadOptions(uniqueId, fileName, compress, timestamp, metadataMap); gridFS.uploadFromStream(fileName, is, gridFSUploadOptions); }
@Override protected void prepareData(TestContext context, Vertx vertx, String path, Handler<AsyncResult<String>> handler) { String filename = PathUtils.join(path, ID); vertx.<String>executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME); GridFSBucket gridFS = GridFSBuckets.create(db); byte[] contents = CHUNK_CONTENT.getBytes(StandardCharsets.UTF_8); gridFS.uploadFromStream(filename, new ByteArrayInputStream(contents)); f.complete(filename); } }, handler); }
private void storeContentFromFieldToBinaryStorage(String bucketName, Document documentToInsert, String fieldName, Object documentId) { if ( documentToInsert.containsKey( fieldName ) ) { GridFSBucket gridFSFilesBucket = getGridFSFilesBucket( mongoDatabase, bucketName ); // We delete the previous entry, first deleteExistingContent( fieldName, documentId, gridFSFilesBucket ); GridFS gridfsObject = documentToInsert.get( fieldName, GridFS.class ); if ( gridfsObject != null ) { ObjectId uploadId = gridFSFilesBucket.uploadFromStream( fileName( fieldName, documentId ), gridfsObject.getInputStream() ); documentToInsert.put( fieldName, uploadId ); } } }
private void storeContentFromFieldToBinaryStorage(String bucketName, Document documentToInsert, String fieldName, Object documentId) { if ( documentToInsert.containsKey( fieldName ) ) { GridFSBucket gridFSFilesBucket = getGridFSFilesBucket( mongoDatabase, bucketName ); // We delete the previous entry, first deleteExistingContent( fieldName, documentId, gridFSFilesBucket ); GridFS gridfsObject = documentToInsert.get( fieldName, GridFS.class ); if ( gridfsObject != null ) { ObjectId uploadId = gridFSFilesBucket.uploadFromStream( fileName( fieldName, documentId ), gridfsObject.getInputStream() ); documentToInsert.put( fieldName, uploadId ); } } }
@Override public void putBlobStream(BucketPath bucketPath, PayloadStream payloadStream) { LOGGER.debug("start putBlobStream for " + bucketPath); GridFSBucket bucket = getGridFSBucket(bucketPath); checkBucketExists(bucket); String filename = bucketPath.getObjectHandle().getName(); GridFSUploadOptions uploadOptions = new GridFSUploadOptions(); uploadOptions.metadata(new Document()); SimpleStorageMetadataImpl storageMetadata = new SimpleStorageMetadataImpl(payloadStream.getStorageMetadata()); storageMetadata.setType(StorageType.BLOB); storageMetadata.setName(BucketPathUtil.getAsString(bucketPath)); uploadOptions.getMetadata().put(STORAGE_METADATA_KEY, gsonHelper.toJson(storageMetadata)); InputStream is = payloadStream.openStream(); ObjectId objectId = bucket.uploadFromStream(filename, is, uploadOptions); IOUtils.closeQuietly(is); deleteAllExcept(bucket, filename, objectId); LOGGER.debug("finished putBlobStream for " + bucketPath); }
@Override protected Binary getBinary(InputStream in) throws IOException { try { // save the file to GridFS String inputName = "tmp-" + System.nanoTime(); ObjectId id = gridFSBucket.uploadFromStream(inputName, in); // now we know length and digest GridFSFile inputFile = gridFSBucket.find(Filters.eq(METADATA_PROPERTY_FILENAME, inputName)).first(); String digest = inputFile.getMD5(); // if the digest is already known then reuse it instead GridFSFile dbFile = gridFSBucket.find(Filters.eq(METADATA_PROPERTY_FILENAME, digest)).first(); if (dbFile == null) { // no existing file, set its filename as the digest gridFSBucket.rename(id, digest); } else { // file already existed, no need for the temporary one gridFSBucket.delete(id); } return new GridFSBinary(digest, blobProviderId); } finally { in.close(); } }
@Override public String save(Media media) { Document doc = new Document() .append("type", media.getType()) .append("subType", media.getSubType()) .append("size", media.getSize()) .append("hash", media.getHash()); if(media.getApi() != null) { doc.append("api", media.getApi()); } GridFSUploadOptions options = new GridFSUploadOptions() .metadata(doc); getGridFs() .uploadFromStream( new BsonString(media.getId()), media.getFileName(), new ByteArrayInputStream(media.getData()), options ); return media.getId(); }
.metadata(Document.parse(metadata.toJson())); ObjectId _id = gridFSBucket.uploadFromStream( filename, sourceStream, .metadata(Document.parse(metadata.toJson())); gridFSBucket.uploadFromStream( _id, filename,