private KafkaOffsetBackingStore createKafkaOffsetBackingStore(WorkerConfig workerConfig) { KafkaOffsetBackingStore kafkaOffsetBackingStore = new KafkaOffsetBackingStore(); kafkaOffsetBackingStore.configure(workerConfig); kafkaOffsetBackingStore.start(); return kafkaOffsetBackingStore; }
@Override public void configure(final WorkerConfig config) { String topic = config.getString(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG); if (topic == null || topic.trim().length() == 0) throw new ConfigException("Offset storage topic must be specified"); data = new HashMap<>(); Map<String, Object> originals = config.originals(); Map<String, Object> producerProps = new HashMap<>(originals); producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); producerProps.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, Integer.MAX_VALUE); Map<String, Object> consumerProps = new HashMap<>(originals); consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); Map<String, Object> adminProps = new HashMap<>(originals); NewTopic topicDescription = TopicAdmin.defineTopic(topic). compacted(). partitions(config.getInt(DistributedConfig.OFFSET_STORAGE_PARTITIONS_CONFIG)). replicationFactor(config.getShort(DistributedConfig.OFFSET_STORAGE_REPLICATION_FACTOR_CONFIG)). build(); offsetLog = createKafkaBasedLog(topic, producerProps, consumerProps, consumedCallback, topicDescription, adminProps); }
void setOffsets(Stream<OffsetInfo> offsetInfoStream) { // Set offsets for each connector offsetInfoStream .collect(groupingBy(v -> v.connectorId)) .forEach( (connectorId, v) -> { logger.info("Writing offsets for " + connectorId); OffsetStorageWriter offsetWriter = new OffsetStorageWriter( offsetBackingStore, connectorId, internalConverter, internalConverter); v.forEach( offsetInfo -> { Map<String, Object> partition = TopicPartitionSerDe.asMap( new TopicPartition(offsetInfo.topic, offsetInfo.partition.intValue())); Map<String, Long> offset = MirusSourceTask.offsetMap(offsetInfo.offset); offsetWriter.offset(partition, offset); }); try { offsetBackingStore.start(); offsetWriter.beginFlush(); offsetWriter.doFlush(null).get(); offsetBackingStore.stop(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException("Unable to flush offsets for " + connectorId, e); } }); } }
private static MirusOffsetTool newOffsetTool(Args args) throws IOException { // This needs to be the admin topic properties. // By default these are in the worker properties file, as this has the has admin producer and // consumer settings. Separating these might be wise - also useful for storing state in // source cluster if it proves necessary. final Map<String, String> properties = !args.propertiesFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(args.propertiesFile)) : Collections.emptyMap(); final DistributedConfig config = new DistributedConfig(properties); final KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(); offsetBackingStore.configure(config); // Avoid initializing the entire Kafka Connect plugin system by assuming the // internal.[key|value].converter is org.apache.kafka.connect.json.JsonConverter final Converter internalConverter = new JsonConverter(); internalConverter.configure(config.originalsWithPrefix("internal.key.converter."), true); final OffsetSetter offsetSetter = new OffsetSetter(internalConverter, offsetBackingStore); final OffsetFetcher offsetFetcher = new OffsetFetcher(config, internalConverter); final OffsetSerDe offsetSerDe = OffsetSerDeFactory.create(args.format); return new MirusOffsetTool(args, offsetFetcher, offsetSetter, offsetSerDe); }
workerConfig = new MemoryConfig(offsetBackingStoreProperties); } else if (StreamOptions.KAFKA_BACKING_STORE().getValue().equals(bs)) { offsetBackingStore = new KafkaOffsetBackingStore(); workerConfig = new KafkaConfig(offsetBackingStoreProperties); } else {
public ConnectEmbedded(Properties workerConfig, Properties... connectorConfigs) throws Exception { Time time = new SystemTime(); DistributedConfig config = new DistributedConfig(Utils.propsToStringMap(workerConfig)); KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(); offsetBackingStore.configure(config); //not sure if this is going to work but because we don't have advertised url we can get at least a fairly random String workerId = UUID.randomUUID().toString(); worker = new Worker(workerId, time, config, offsetBackingStore); StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, worker.getInternalValueConverter()); statusBackingStore.configure(config); ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter()); configBackingStore.configure(config); //advertisedUrl = "" as we don't have the rest server - hopefully this will not break anything herder = new DistributedHerder(config, time, worker, statusBackingStore, configBackingStore, ""); this.connectorConfigs = connectorConfigs; shutdownHook = new ShutdownHook(); }
public KafkaConnectDistributed(Map<String, String> workerProps) { Time time = new SystemTime(); ConnectorFactory connectorFactory = new ConnectorFactory(); DistributedConfig config = new DistributedConfig(workerProps); rest = new RestServer(config); URI advertisedUrl = rest.advertisedUrl(); String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort(); KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(); offsetBackingStore.configure(config); Worker worker = new Worker(workerId, time, connectorFactory, config, offsetBackingStore); StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, worker.getInternalValueConverter()); statusBackingStore.configure(config); ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter()); configBackingStore.configure(config); DistributedHerder herder = new DistributedHerder(config, time, worker, statusBackingStore, configBackingStore, advertisedUrl.toString()); connect = new Connect(herder, rest); try { connect.start(); } catch (Throwable t) { LOGGER.error("Failed to start Connector error", t); rest.stop(); connect.stop(); } }
String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort(); KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(); offsetBackingStore.configure(config);
String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort(); KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(); offsetBackingStore.configure(configWithClientIdSuffix(workerProps, "offset"));