/** * Get a new set of properties for producers that want to talk to this server. * * @param clientId the optional identifier for the client; may be null if not needed * @return the mutable producer properties * @see #getConsumerProperties(String, String, OffsetResetStrategy) */ public Properties getProducerProperties(String clientId) { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList()); props.setProperty(ProducerConfig.ACKS_CONFIG, Integer.toString(1)); if (clientId != null) props.setProperty(ProducerConfig.CLIENT_ID_CONFIG, clientId); return props; }
/** * Get a new set of properties for consumers that want to talk to this server. * * @param groupId the group ID for the consumer; may not be null * @param clientId the optional identifier for the client; may be null if not needed * @param autoOffsetReset how to pick a starting offset when there is no initial offset in ZooKeeper or if an offset is * out of range; may be null for the default to be used * @return the mutable consumer properties * @see #getProducerProperties(String) */ public Properties getConsumerProperties(String groupId, String clientId, OffsetResetStrategy autoOffsetReset) { if (groupId == null) throw new IllegalArgumentException("The groupId is required"); Properties props = new Properties(); props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList()); props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId); props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, Boolean.FALSE.toString()); if (autoOffsetReset != null) { props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset.toString().toLowerCase()); } if (clientId != null) props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, clientId); return props; }
.withDefault(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.brokerList()) .withDefault(ProducerConfig.CLIENT_ID_CONFIG, "intruder") .withDefault(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class)
@Test(expected = ParsingException.class) public void shouldStopOnUnparseableSQL() throws Exception { // Create the empty topic ... kafka.createTopic(topicName, 1, 1); // Create invalid records final ProducerRecord<String, String> invalidSQL = new ProducerRecord<>(topicName, PARTITION_NO, null, "{\"source\":{\"server\":\"my-server\"},\"position\":{\"filename\":\"my-txn-file.log\",\"position\":39},\"databaseName\":\"db1\",\"ddl\":\"xxxDROP TABLE foo;\"}"); final Configuration intruderConfig = Configuration.create() .withDefault(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.brokerList()) .withDefault(ProducerConfig.CLIENT_ID_CONFIG, "intruder") .withDefault(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class) .withDefault(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class) .build(); try (final KafkaProducer<String, String> producer = new KafkaProducer<>(intruderConfig.asProperties())) { producer.send(invalidSQL).get(); } testHistoryTopicContent(false); }
.with(KafkaDatabaseHistory.BOOTSTRAP_SERVERS, kafka.brokerList()) .with(KafkaDatabaseHistory.TOPIC, "dummytopic") .with(DatabaseHistory.NAME, "my-db-history")
private void testHistoryTopicContent(boolean skipUnparseableDDL) { .with(KafkaDatabaseHistory.BOOTSTRAP_SERVERS, kafka.brokerList()) .with(KafkaDatabaseHistory.TOPIC, topicName) .with(DatabaseHistory.NAME, "my-db-history")