Refine search
private MongoCollection<Document> getCollection(String schema, String table) { return client.getDatabase(schema).getCollection(table); }
/** * The MongoDbClient constructor. * @param url The Mongo server url * @param collectionName The Mongo collection to read/write data */ public MongoDbClient(String url, String collectionName) { //Creates a MongoURI from the given string. MongoClientURI uri = new MongoClientURI(url); //Creates a MongoClient described by a URI. this.client = new MongoClient(uri); //Gets a Database. MongoDatabase db = client.getDatabase(uri.getDatabase()); //Gets a collection. this.collection = db.getCollection(collectionName); }
private Set<String> getTableMetadataNames(String schemaName) throws TableNotFoundException { MongoDatabase db = client.getDatabase(schemaName); MongoCursor<Document> cursor = db.getCollection(schemaCollection) .find().projection(new Document(TABLE_NAME_KEY, true)).iterator(); HashSet<String> names = new HashSet<>(); while (cursor.hasNext()) { names.add((cursor.next()).getString(TABLE_NAME_KEY)); } return names; }
private void createTableMetadata(SchemaTableName schemaTableName, List<MongoColumnHandle> columns) throws TableNotFoundException { String schemaName = schemaTableName.getSchemaName(); String tableName = schemaTableName.getTableName(); MongoDatabase db = client.getDatabase(schemaName); Document metadata = new Document(TABLE_NAME_KEY, tableName); ArrayList<Document> fields = new ArrayList<>(); if (!columns.stream().anyMatch(c -> c.getName().equals("_id"))) { fields.add(new MongoColumnHandle("_id", OBJECT_ID, true).getDocument()); } fields.addAll(columns.stream() .map(MongoColumnHandle::getDocument) .collect(toList())); metadata.append(FIELDS_KEY, fields); MongoCollection<Document> schema = db.getCollection(schemaCollection); schema.createIndex(new Document(TABLE_NAME_KEY, 1), new IndexOptions().unique(true)); schema.insertOne(metadata); }
private boolean deleteTableMetadata(SchemaTableName schemaTableName) { String schemaName = schemaTableName.getSchemaName(); String tableName = schemaTableName.getTableName(); MongoDatabase db = client.getDatabase(schemaName); if (!collectionExists(db, tableName)) { return false; } DeleteResult result = db.getCollection(schemaCollection) .deleteOne(new Document(TABLE_NAME_KEY, tableName)); return result.getDeletedCount() == 1; }
private List<Document> guessTableFields(SchemaTableName schemaTableName) { String schemaName = schemaTableName.getSchemaName(); String tableName = schemaTableName.getTableName(); MongoDatabase db = client.getDatabase(schemaName); Document doc = db.getCollection(tableName).find().first(); if (doc == null) { // no records at the collection return ImmutableList.of(); } ImmutableList.Builder<Document> builder = ImmutableList.builder(); for (String key : doc.keySet()) { Object value = doc.get(key); Optional<TypeSignature> fieldType = guessFieldType(value); if (fieldType.isPresent()) { Document metadata = new Document(); metadata.append(FIELDS_NAME_KEY, key); metadata.append(FIELDS_TYPE_KEY, fieldType.get().toString()); metadata.append(FIELDS_HIDDEN_KEY, key.equals("_id") && fieldType.get().equals(OBJECT_ID.getTypeSignature())); builder.add(metadata); } else { log.debug("Unable to guess field type from %s : %s", value == null ? "null" : value.getClass().getName(), value); } } return builder.build(); }
/** * Obtain the current position of the oplog, and record it in the source. */ protected void recordCurrentOplogPosition() { primaryClient.execute("get oplog position", primary -> { MongoCollection<Document> oplog = primary.getDatabase("local").getCollection("oplog.rs"); Document last = oplog.find().sort(new Document("$natural", -1)).limit(1).first(); // may be null source.offsetStructForEvent(replicaSet.replicaSetName(), last); }); }
private Document getTableMetadata(SchemaTableName schemaTableName) throws TableNotFoundException { String schemaName = schemaTableName.getSchemaName(); String tableName = schemaTableName.getTableName(); MongoDatabase db = client.getDatabase(schemaName); MongoCollection<Document> schema = db.getCollection(schemaCollection); Document doc = schema .find(new Document(TABLE_NAME_KEY, tableName)).first(); if (doc == null) { if (!collectionExists(db, tableName)) { throw new TableNotFoundException(schemaTableName); } else { Document metadata = new Document(TABLE_NAME_KEY, tableName); metadata.append(FIELDS_KEY, guessTableFields(schemaTableName)); schema.createIndex(new Document(TABLE_NAME_KEY, 1), new IndexOptions().unique(true)); schema.insertOne(metadata); return metadata; } } return doc; }
/** * Add field use_raw_configuration_schema to endpointProfile that used to support devices using * SDK version 0.9.0 */ public void transform() { //mongo MongoClient client = new MongoClient(host); MongoDatabase database = client.getDatabase(dbName); MongoCollection<Document> endpointProfile = database.getCollection("endpoint_profile"); endpointProfile.updateMany(new Document(), eq("$set", eq("use_raw_schema", false))); //cassandra Cluster cluster = Cluster.builder().addContactPoint(host).build(); Session session = cluster.connect(dbName); session.execute("ALTER TABLE ep_profile ADD use_raw_schema boolean"); session.close(); cluster.close(); } }
/** * Copy the collection, sending to the recorder a record for each document. * * @param primary the connection to the replica set's primary node; may not be null * @param collectionId the identifier of the collection to be copied; may not be null * @param timestamp the timestamp in milliseconds at which the copy operation was started * @return number of documents that were copied * @throws InterruptedException if the thread was interrupted while the copy operation was running */ protected long copyCollection(MongoClient primary, CollectionId collectionId, long timestamp) throws InterruptedException { RecordsForCollection factory = recordMakers.forCollection(collectionId); MongoDatabase db = primary.getDatabase(collectionId.dbName()); MongoCollection<Document> docCollection = db.getCollection(collectionId.name()); long counter = 0; try (MongoCursor<Document> cursor = docCollection.find().iterator()) { while (running.get() && cursor.hasNext()) { Document doc = cursor.next(); logger.trace("Found existing doc in {}: {}", collectionId, doc); counter += factory.recordObject(collectionId, doc, timestamp); } } return counter; }
private Consumer<MongoClient> createUpdateOneItem(int id, String document) { return client -> client.getDatabase(DB_NAME).getCollection(this.getCollectionName()) .updateOne(Document.parse(String.format("{'_id' : %d}", id)), Document.parse(document)); } }
public static void cleanDatabase(MongoPrimary primary, String dbName) { primary.execute("clean-db", mongo -> { MongoDatabase db1 = mongo.getDatabase(dbName); db1.listCollectionNames().forEach((Consumer<String>) ((String x) -> { logger.info("Removing collection '{}' from database '{}'", x, dbName); db1.getCollection(x).drop(); })); }); } }
MongoCollection<Document> oplog = primary.getDatabase("local").getCollection("oplog.rs"); Document firstEvent = oplog.find().sort(new Document("$natural", 1)).limit(1).first(); // may be null return SourceInfo.extractEventTimestamp(firstEvent);
mongoQueryRunner.getMongoClient().getDatabase("test").getCollection("tmp_map8").insertOne(new Document( ImmutableMap.of("col", new Document(ImmutableMap.of("key1", "value1", "key2", "value2"))))); assertQuery("SELECT col['key1'] FROM test.tmp_map8", "SELECT 'value1'"); mongoQueryRunner.getMongoClient().getDatabase("test").getCollection("tmp_map9").insertOne(new Document( ImmutableMap.of("col", new Document(ImmutableMap.of("key1", "value1", "key2", "value2"))))); assertQuery("SELECT col FROM test.tmp_map9", "SELECT '{ \"key1\" : \"value1\", \"key2\" : \"value2\" }'"); mongoQueryRunner.getMongoClient().getDatabase("test").getCollection("tmp_map10").insertOne(new Document( ImmutableMap.of("col", ImmutableList.of(new Document(ImmutableMap.of("key1", "value1", "key2", "value2")), new Document(ImmutableMap.of("key3", "value3", "key4", "value4")))))); mongoQueryRunner.getMongoClient().getDatabase("test").getCollection("tmp_map11").insertOne(new Document( ImmutableMap.of("col", 10))); assertQuery("SELECT col FROM test.tmp_map11", "SELECT '10'"); mongoQueryRunner.getMongoClient().getDatabase("test").getCollection("tmp_map12").insertOne(new Document( ImmutableMap.of("col", Arrays.asList(10, null, 11)))); assertQuery("SELECT col FROM test.tmp_map12", "SELECT '[10, null, 11]'");
private Consumer<MongoClient> createInsertItemDefault(int id) { return client -> client.getDatabase(DB_NAME).getCollection(this.getCollectionName()) .insertOne(Document.parse("{" + "'_id': " + id + "," + "'dataStr': 'hello'," + "'dataInt': 123," + "'dataLong': 80000000000," + "'dataBoolean': true," + "'dataByte': -1," + "'dataArrayOfStr': ['a','c','e']," + "'nested': {" + "'dataStr': 'hello'," + "'dataInt': 123," + "'dataLong': 80000000000," + "'dataBoolean': true," + "'dataByte': -1" + "}}") ); }
@Test public void simpleMongoDbTest() { MongoClient mongoClient = new MongoClient(mongo.getContainerIpAddress(), mongo.getMappedPort(MONGO_PORT)); MongoDatabase database = mongoClient.getDatabase("test"); MongoCollection<Document> collection = database.getCollection("testCollection"); Document doc = new Document("name", "foo") .append("value", 1); collection.insertOne(doc); Document doc2 = collection.find(new Document("name", "foo")).first(); assertEquals("A record can be inserted into and retrieved from MongoDB", 1, doc2.get("value")); }
/** * Use the given primary to read the oplog. * * @param primary the connection to the replica set's primary node; may not be null */ protected void readOplog(MongoClient primary) { BsonTimestamp oplogStart = source.lastOffsetTimestamp(replicaSet.replicaSetName()); logger.info("Reading oplog for '{}' primary {} starting at {}", replicaSet, primary.getAddress(), oplogStart); // Include none of the cluster-internal operations and only those events since the previous timestamp ... MongoCollection<Document> oplog = primary.getDatabase("local").getCollection("oplog.rs"); Bson filter = Filters.and(Filters.gt("ts", oplogStart), // start just after our last position Filters.exists("fromMigrate", false)); // skip internal movements across shards FindIterable<Document> results = oplog.find(filter) .sort(new Document("$natural", 1)) // force forwards collection scan .oplogReplay(true) // tells Mongo to not rely on indexes .cursorType(CursorType.TailableAwait); // tail and await new data // Read as much of the oplog as we can ... ServerAddress primaryAddress = primary.getAddress(); try (MongoCursor<Document> cursor = results.iterator()) { while (running.get() && cursor.hasNext()) { if (!handleOplogEvent(primaryAddress, cursor.next())) { // Something happened, and we're supposed to stop reading return; } } } }
protected void storeDocuments(String dbName, String collectionName, String pathOnClasspath) { primary().execute("storing documents", mongo -> { Testing.debug("Storing in '" + dbName + "." + collectionName + "' documents loaded from from '" + pathOnClasspath + "'"); MongoDatabase db1 = mongo.getDatabase(dbName); MongoCollection<Document> coll = db1.getCollection(collectionName); coll.drop(); storeDocuments(coll, pathOnClasspath); }); }
MongoDatabase database = client.getDatabase(dbName); MongoCollection<Document> notification = database.getCollection("notification"); MongoCollection<Document> enpNotification = database.getCollection("endpoint_notification");
private MongoDatabase addValidation(final Document validator, final String collectionName) { ValidationOptions options = new ValidationOptions() .validator(validator) .validationLevel(ValidationLevel.MODERATE) .validationAction(ValidationAction.ERROR); MongoDatabase database = getMongoClient().getDatabase(TEST_DB_NAME); database.getCollection(collectionName).drop(); database.createCollection(collectionName, new CreateCollectionOptions().validationOptions(options)); return database; }