@Override protected FlinkKafkaConsumerBase<Row> createKafkaConsumer(String topic, Properties properties, DeserializationSchema<Row> deserializationSchema) { return new FlinkKafkaConsumer011<>(topic, deserializationSchema, properties); } }
/** * If set to true, the Flink producer will wait for all outstanding messages in the Kafka buffers * to be acknowledged by the Kafka producer on a checkpoint. * This way, the producer can guarantee that messages in the Kafka buffers are part of the checkpoint. * * @param flush Flag indicating the flushing mode (true = flush on checkpoint) */ public void setFlushOnCheckpoint(boolean flush) { producer.setFlushOnCheckpoint(flush); }
@Override protected FlinkKafkaConsumerBase<Row> createKafkaConsumer(String topic, Properties properties, DeserializationSchema<Row> deserializationSchema) { return new FlinkKafkaConsumer010<>(topic, deserializationSchema, properties); } }
@Override protected void commit(KafkaTransactionState transaction) { if (transaction.isTransactional()) { try { transaction.producer.commitTransaction(); } finally { recycleTransactionalProducer(transaction.producer); } } }
@Override protected void recoverAndAbort(KafkaTransactionState transaction) { if (transaction.isTransactional()) { try ( FlinkKafkaProducer<byte[], byte[]> producer = initTransactionalProducer(transaction.transactionalId, false)) { producer.initTransactions(); } } }
@Override protected FlinkKafkaConsumerBase<Row> createKafkaConsumer(String topic, Properties properties, DeserializationSchema<Row> deserializationSchema) { return new FlinkKafkaConsumer09<>(topic, deserializationSchema, properties); } }
@Test public void testClosePartitionDiscovererWithCancellation() throws Exception { final DummyPartitionDiscoverer testPartitionDiscoverer = new DummyPartitionDiscoverer(); final TestingFlinkKafkaConsumer<String> consumer = new TestingFlinkKafkaConsumer<>(testPartitionDiscoverer, 100L); testNormalConsumerLifecycle(consumer); assertTrue("partitionDiscoverer should be closed when consumer is closed", testPartitionDiscoverer.isClosed()); }
@Override protected FlinkKafkaConsumerBase<Row> createKafkaConsumer( String topic, Properties properties, DeserializationSchema<Row> deserializationSchema) { return new FlinkKafkaConsumer<Row>(topic, deserializationSchema, properties); } }
@Override public KafkaTransactionContext deserialize( KafkaTransactionContext reuse, DataInputView source) throws IOException { return deserialize(source); }
@Override public KafkaTransactionState deserialize( KafkaTransactionState reuse, DataInputView source) throws IOException { return deserialize(source); }
@Override protected void recoverAndAbort(FlinkKafkaProducer.KafkaTransactionState transaction) { if (transaction.isTransactional()) { try ( FlinkKafkaInternalProducer<byte[], byte[]> producer = initTransactionalProducer(transaction.transactionalId, false)) { producer.initTransactions(); } } }
@Override protected Optional<FlinkKafkaProducer.KafkaTransactionContext> initializeUserContext() { if (semantic != FlinkKafkaProducer.Semantic.EXACTLY_ONCE) { return Optional.empty(); } Set<String> transactionalIds = generateNewTransactionalIds(); resetAvailableTransactionalIdsPool(transactionalIds); return Optional.of(new FlinkKafkaProducer.KafkaTransactionContext(transactionalIds)); }
@Override public FlinkKafkaConsumerBase<T> setStartFromTimestamp(long startupOffsetsTimestamp) { // the purpose of this override is just to publicly expose the method for Kafka 0.10+; // the base class doesn't publicly expose it since not all Kafka versions support the functionality return super.setStartFromTimestamp(startupOffsetsTimestamp); }
/** * Defines whether the producer should fail on errors, or only log them. * If this is set to true, then exceptions will be only logged, if set to false, * exceptions will be eventually thrown and cause the streaming program to * fail (and enter recovery). * * @param logFailuresOnly The flag to indicate logging-only on exceptions. */ public void setLogFailuresOnly(boolean logFailuresOnly) { producer.setLogFailuresOnly(logFailuresOnly); }
@Override public FlinkKafkaProducer.KafkaTransactionState deserialize( FlinkKafkaProducer.KafkaTransactionState reuse, DataInputView source) throws IOException { return deserialize(source); }
@Override public FlinkKafkaProducer.KafkaTransactionContext deserialize( FlinkKafkaProducer.KafkaTransactionContext reuse, DataInputView source) throws IOException { return deserialize(source); }
private void setupConsumer(FlinkKafkaConsumerBase<String> consumer) throws Exception { setupConsumer( consumer, false, null, false, 0, 1); }
@Override protected List<String> getAllTopics() throws WakeupException { checkState(); return allTopics; }
@Override public <T extends Serializable> ListState<T> getSerializableListState(String stateName) throws Exception { // return empty state for the legacy 1.2 Kafka consumer state return new TestingListState<>(); }