Refine search
DBCursor fileListCursor = fs.getFileList(); try { while (fileListCursor.hasNext()) { GridFS fs = getGridFS(); String fn = args[i + 1]; GridFSDBFile f = fs.findOne(fn); if (f == null) { System.err.println("can't find file: " + fn); GridFS fs = getGridFS(); String fn = args[i + 1]; GridFSInputFile f = fs.createFile(new File(fn)); f.save(); f.validate(); return;
GridFSInputFile gfsFile = endpoint.getGridFs().createFile(ins, filename, true); if (chunkSize != null && chunkSize > 0) { gfsFile.setChunkSize(chunkSize); gfsFile.setContentType(ct); gfsFile.setMetaData(dbObject); gfsFile.save(); exchange.getIn().setHeader(Exchange.FILE_NAME_PRODUCED, gfsFile.getFilename()); exchange.getIn().setHeader(GridFsEndpoint.GRIDFS_FILE_ID_PRODUCED, gfsFile.getId()); } else if ("remove".equals(operation)) { final String filename = exchange.getIn().getHeader(Exchange.FILE_NAME, String.class); endpoint.getGridFs().remove(filename); } else if ("findOne".equals(operation)) { final String filename = exchange.getIn().getHeader(Exchange.FILE_NAME, String.class); GridFSDBFile file = endpoint.getGridFs().findOne(filename); if (file != null) { exchange.getIn().setHeader(GridFsEndpoint.GRIDFS_METADATA, JSON.serialize(file.getMetaData()));
@Override public boolean save(Object id, String fileName, InputStream file) { pipelinefs.remove(new BasicDBObject(MongoDocument.MONGO_ID_KEY, id)); GridFSInputFile inputFile = pipelinefs.createFile(file, fileName); inputFile.put("_id", id); inputFile.save(); return true; }
public String save(InputStream is, String filename, Map<String, Object> attributes){ GridFSInputFile f = fs.createFile(is); f.setChunkSize(chunkSize); f.setFilename(filename); setAttributes(f, attributes); f.save(); return f.getId().toString(); }
MongoClient client = new MongoClient(); GridFS gridFS = new GridFS(client.getDB("test"); GridFSInputFile in = gridFS.createFile(<insert bytes here>); in.put("meta", 5); // insert extra metadata here in.save(); GridFSDBFile out = gridFS.findOne( new BasicDBObject( "_id" , in.getId() ) ); System.out.println(out.get("meta")); // this will print 5
gridfs.remove(currentId); blob = gridfs.createFile((File) content); } else if (content instanceof InputStream) { blob = gridfs.createFile((InputStream) content); } else if (content instanceof ByteBuf) { blob = gridfs.createFile(((ByteBuf) content).array()); blob.setId(currentId); blob.setFilename(fileInfo().getString("filename")); blob.setContentType(contentType != null ? contentType.toString() : "application/octet-stream"); blob.put("parent", fileInfo().getParentId()); blob.save(); String oid = blob.getId().toString();
MongoClient mongo = new MongoClient("localhost", 27017); DB db = mongo.getDB("testDB"); String newFileName = "elasticsearch-Jar"; File imageFile = new File("/home/impadmin/elasticsearch-1.4.2.tar.gz"); GridFS gfs = new GridFS(db); //Insertion GridFSInputFile inputFile = gfs.createFile(imageFile); inputFile.setFilename(newFileName); inputFile.put("name", "devender"); inputFile.put("age", 23); inputFile.save(); //Fetch back GridFSDBFile outputFile = gfs.findOne(newFileName);
@Override public FileInfo saveFile(final InputStream inputStream, final String storeName, final String contentType, final ObjectId fileId) throws MongoDataException, FileExistsException { try { if (gridfs.findOne(storeName) != null) { log.error("A file named {} already exists", storeName); throw new FileExistsException("File with name " + storeName + " already Exists"); } GridFSInputFile savedFile = gridfs.createFile(inputStream, storeName, true); savedFile.setContentType(contentType); if (fileId != null) { log.debug("Saving file with given Id {} probably a update", fileId); savedFile.setId(fileId); } savedFile.save(); FileInfo fileInfo = new FileInfo(savedFile, false); log.debug("File {} was saved " + fileInfo); return fileInfo; } catch (MongoException ex) { log.error("Unable to save file"); throw new MongoDataException("Unable to save file to GridFs", ex); } }
private String saveBlob() throws IOException { BufferedInputStream bis = new BufferedInputStream(is); String md5 = calculateMd5(bis); GridFSDBFile gridFile = gridFS.findOne(new BasicDBObject("md5", md5)); if (gridFile != null) { is.close(); return md5; } GridFSInputFile gridFSInputFile = gridFS.createFile(bis, true); gridFSInputFile.save(); return gridFSInputFile.getMD5(); }
@Override public String saveArtifact(DBKey dbKey, InputStream data, String contentType) { String resultObjectId = null; GridFS gfs = getGridFS(dbKey); GridFSInputFile file = gfs.createFile(data); if (file != null) { file.setContentType(contentType); file.save(); resultObjectId = file.getId().toString(); } return resultObjectId; }
GridFS gfs = new GridFS(mongoDB, fqn); GridFSInputFile gfsFile = gfs.createFile(((Blob)value).getBinaryStream()); gfsFile.setFilename(uuid); gfsFile.save(); return uuid; GridFS gfs = new GridFS(mongoDB, fqn); GridFSInputFile gfsFile = gfs.createFile(((Clob)value).getAsciiStream()); gfsFile.setFilename(uuid); gfsFile.save(); return uuid; GridFS gfs = new GridFS(mongoDB, fqn); GridFSInputFile gfsFile = gfs.createFile(((SQLXML)value).getBinaryStream()); gfsFile.setFilename(uuid); gfsFile.save(); return uuid;
GridFS gfs = new GridFS(db, "zips"); GridFSInputFile gfsFile = gfs.createFile(in); gfsFile.setFilename("sample.zip"); gfsFile.setId(id); gfsFile.save();
@Override public Object save(String fileName, InputStream file) { GridFSInputFile inputFile = pipelinefs.createFile(file, fileName); inputFile.save(); return inputFile.getId(); }
@Override public FileContent save(FileContent entity) { GridFSInputFile inputFile = gridFs.createFile(entity.getData()); inputFile.setId(entity.getId()); inputFile.save(); return entity; }
@Override public String putBlob(String container, Blob blob, PutOptions options) { if (options != null && !options.isMultipart()) { throw new IllegalArgumentException("only multipart is supported by this provider"); } Payload payload = checkNotNull(blob.getPayload()); BlobMetadata metadata = blob.getMetadata(); ContentMetadata contentMetadata = metadata.getContentMetadata(); GridFS gridFS = parseGridFSIdentifier(container).connect(mongo); GridFSInputFile inputFile = gridFS.createFile(payload.getInput(), metadata.getName(), true); inputFile.setContentType(contentMetadata.getContentType()); DBObject fileMetadata = new BasicDBObject(); fileMetadata.putAll(metadata.getUserMetadata()); inputFile.setMetaData(fileMetadata); inputFile.save(); return inputFile.getMD5(); }
case 1: _inFile = gridFS.getGridFS().createFile((File) __targetObject); break; case 2: _inFile = gridFS.getGridFS().createFile((InputStream) __targetObject); break; case 3: _inFile = gridFS.getGridFS().createFile((byte[]) __targetObject); break; default: _inFile.setFilename(__filename); _inFile.setContentType(__contentType); if (__chunkSize > 0) { _inFile.setChunkSize(__chunkSize); _inFile.put(_entry.getKey(), _entry.getValue());
@Override public RepositoryItem createRepositoryItem(String id) { // TODO The file is not written until outputstream is closed. There is a // potentially data race with this unique test if (!gridFS.find(id).isEmpty()) { throw new DuplicateItemException(id); } GridFSInputFile dbFile = gridFS.createFile(id); dbFile.setId(id); return createRepositoryItem(dbFile); }
@Override public RepositoryItem createRepositoryItem() { GridFSInputFile dbFile = gridFS.createFile(); dbFile.setFilename(dbFile.getId().toString()); return createRepositoryItem(dbFile); }
/** * Calls {@link GridFSInputFile#save(long)} with the existing chunk size. * * @throws MongoException if there's a problem saving the file. */ @Override public void save() { save(chunkSize); }