Testing.Debug.enable(); int numCreateDatabase = 1; int numCreateTables = 5;
recordConsumer.accept(record); if (Testing.Debug.isEnabled()) { Testing.debug("Consumed record " + recordsConsumed + " / " + numberOfRecords + " (" + (numberOfRecords - recordsConsumed) + " more)");
Testing.Debug.enable(); int numCreateDatabase = 1; int numCreateTables = 5;
protected void assertExpectedRecords(Path path, int batchCount, int recordsPerBatch) throws IOException { assertThat(Files.exists(path)).isTrue(); if (Testing.Debug.isEnabled()) { String content = IoUtil.read(path.toFile()); Testing.debug("expected results file '" + path + "':");
recordConsumer.accept(record); if (Testing.Debug.isEnabled()) { Testing.debug("Consumed record " + recordsConsumed + " / " + numberOfRecords + " (" + (numberOfRecords - recordsConsumed) + " more)");
Testing.Debug.enable(); int numCreateDatabase = 1; int numCreateTables = 5;
protected void appendCommand(Path results, Document command) throws IOException { assertThat(command).isNotNull(); assertThat(Files.exists(results)).isTrue(); Array arrayOfDocuments = readResults(results.toFile()); arrayOfDocuments.add(command); try (OutputStream stream = new FileOutputStream(results.toFile())) { ArrayWriter.prettyWriter().write(arrayOfDocuments, stream); } if (Testing.Debug.isEnabled()) { String content = IoUtil.read(results.toFile()); Testing.debug("expected results file '" + results + "' after appending command:"); Testing.debug(content); } }
@Before public void before() throws Exception { setConsumeTimeout(TestHelper.defaultMessageConsumerPollTimeout(), TimeUnit.SECONDS); initializeConnectorTestFramework(); Testing.Debug.enable(); Testing.Files.delete(TestHelper.DB_HISTORY_PATH); Configuration config = TestHelper.defaultConfig() .with(OracleConnectorConfig.TABLE_WHITELIST, getTableWhitelist()) .build(); start(OracleConnector.class, config); assertConnectorIsRunning(); Thread.sleep(2000); }
protected void assertExpectedRecords(Path path, int batchCount, int recordsPerBatch) throws IOException { assertThat(Files.exists(path)).isTrue(); if (Testing.Debug.isEnabled()) { String content = IoUtil.read(path.toFile()); Testing.debug("expected results file '" + path + "':");
@Test @SkipLongRunning public void shouldStartClusterAndAllowAsynchronousProductionAndAutomaticConsumersToUseIt() throws Exception { Testing.Debug.enable(); final String topicName = "topicA"; final CountDownLatch completion = new CountDownLatch(2);
@Test public void shouldStartClusterAndAllowInteractiveProductionAndAutomaticConsumersToUseIt() throws Exception { Testing.Debug.enable(); final String topicName = "topicA"; final CountDownLatch completion = new CountDownLatch(1);
Testing.Debug.enable(); int numCreateDatabase = 1; int numCreateTables = 5;
@Test @SkipLongRunning public void shouldStartClusterAndAllowProducersAndConsumersToUseIt() throws Exception { Testing.Debug.enable(); final String topicName = "topicA"; final CountDownLatch completion = new CountDownLatch(2); final int numMessages = 100; final AtomicLong messagesRead = new AtomicLong(0); // Start a cluster and create a topic ... cluster.addBrokers(1).startup(); cluster.createTopics(topicName); // Consume messages asynchronously ... Stopwatch sw = Stopwatch.reusable().start(); cluster.useTo().consumeIntegers(topicName, numMessages, 10, TimeUnit.SECONDS, completion::countDown, (key, value) -> { messagesRead.incrementAndGet(); return true; }); // Produce some messages asynchronously ... cluster.useTo().produceIntegers(topicName, numMessages, 1, completion::countDown); // Wait for both to complete ... if (completion.await(10, TimeUnit.SECONDS)) { sw.stop(); Testing.debug("Both consumer and producer completed normally in " + sw.durations()); } else { Testing.debug("Consumer and/or producer did not completed normally"); } assertThat(messagesRead.get()).isEqualTo(numMessages); }
@Before public void beforeEach() { Debug.disable(); Print.disable(); stopConnector(); initializeConnectorTestFramework(); transformation = new UnwrapFromMongoDbEnvelope<>(); transformation.configure(Collections.emptyMap()); // Use the DB configuration to define the connector's configuration ... Configuration config = TestHelper.getConfiguration().edit() .with(MongoDbConnectorConfig.POLL_INTERVAL_MS, 10) .with(MongoDbConnectorConfig.COLLECTION_WHITELIST, DB_NAME + "." + this.getCollectionName()) .with(MongoDbConnectorConfig.LOGICAL_NAME, "mongo") .build(); // Set up the replication context for connections ... context = new MongoDbTaskContext(config); // Cleanup database TestHelper.cleanDatabase(primary(), DB_NAME); // Start the connector ... start(MongoDbConnector.class, config); }
protected void appendCommand(Path results, Document command) throws IOException { assertThat(command).isNotNull(); assertThat(Files.exists(results)).isTrue(); Array arrayOfDocuments = readResults(results.toFile()); arrayOfDocuments.add(command); try (OutputStream stream = new FileOutputStream(results.toFile())) { ArrayWriter.prettyWriter().write(arrayOfDocuments, stream); } if (Testing.Debug.isEnabled()) { String content = IoUtil.read(results.toFile()); Testing.debug("expected results file '" + results + "' after appending command:"); Testing.debug(content); } }
@Before public void before() throws Exception { initializeConnectorTestFramework(); Testing.Debug.enable(); Testing.Files.delete(TestHelper.DB_HISTORY_PATH); Configuration config = TestHelper.defaultConfig() .with(SqlServerConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL) .build(); start(SqlServerConnector.class, config); assertConnectorIsRunning(); Thread.sleep(1000); } }
@Before public void beforeEach() { Testing.Print.disable(); Testing.Debug.disable(); useConfiguration(TestHelper.getConfiguration()); }
@Before public void beforeEach() { Testing.Debug.disable(); Testing.Print.disable(); stopConnector(); initializeConnectorTestFramework(); }