private void readConnectorStatus(String key, byte[] value) { String connector = parseConnectorStatusKey(key); if (connector == null || connector.isEmpty()) { log.warn("Discarding record with invalid connector status key {}", key); return; } if (value == null) { log.trace("Removing status for connector {}", connector); remove(connector); return; } ConnectorStatus status = parseConnectorStatus(connector, value); if (status == null) return; synchronized (this) { log.trace("Received connector {} status update {}", connector, status); CacheEntry<ConnectorStatus> entry = getOrAdd(connector); entry.put(status); } }
private void readTaskStatus(String key, byte[] value) { ConnectorTaskId id = parseConnectorTaskId(key); if (id == null) { log.warn("Discarding record with invalid task status key {}", key); return; } if (value == null) { log.trace("Removing task status for {}", id); remove(id); return; } TaskStatus status = parseTaskStatus(id, value); if (status == null) { log.warn("Failed to parse task status with key {}", key); return; } synchronized (this) { log.trace("Received task {} status update {}", id, status); CacheEntry<TaskStatus> entry = getOrAdd(id); entry.put(status); } }
public ConnectEmbedded(Properties workerConfig, Properties... connectorConfigs) throws Exception { Time time = new SystemTime(); DistributedConfig config = new DistributedConfig(Utils.propsToStringMap(workerConfig)); KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(); offsetBackingStore.configure(config); //not sure if this is going to work but because we don't have advertised url we can get at least a fairly random String workerId = UUID.randomUUID().toString(); worker = new Worker(workerId, time, config, offsetBackingStore); StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, worker.getInternalValueConverter()); statusBackingStore.configure(config); ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter()); configBackingStore.configure(config); //advertisedUrl = "" as we don't have the rest server - hopefully this will not break anything herder = new DistributedHerder(config, time, worker, statusBackingStore, configBackingStore, ""); this.connectorConfigs = connectorConfigs; shutdownHook = new ShutdownHook(); }
@Override public void configure(final WorkerConfig config) { this.topic = config.getString(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG); if (this.topic == null || this.topic.trim().length() == 0) throw new ConfigException("Must specify topic for connector status."); Map<String, Object> originals = config.originals(); Map<String, Object> producerProps = new HashMap<>(originals); producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); producerProps.put(ProducerConfig.RETRIES_CONFIG, 0); // we handle retries in this class Map<String, Object> consumerProps = new HashMap<>(originals); consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); Map<String, Object> adminProps = new HashMap<>(originals); NewTopic topicDescription = TopicAdmin.defineTopic(topic). compacted(). partitions(config.getInt(DistributedConfig.STATUS_STORAGE_PARTITIONS_CONFIG)). replicationFactor(config.getShort(DistributedConfig.STATUS_STORAGE_REPLICATION_FACTOR_CONFIG)). build(); Callback<ConsumerRecord<String, byte[]>> readCallback = new Callback<ConsumerRecord<String, byte[]>>() { @Override public void onCompletion(Throwable error, ConsumerRecord<String, byte[]> record) { read(record); } }; this.kafkaLog = createKafkaBasedLog(topic, producerProps, consumerProps, readCallback, topicDescription, adminProps); }
StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter); statusBackingStore.configure(config);
public KafkaConnectDistributed(Map<String, String> workerProps) { Time time = new SystemTime(); ConnectorFactory connectorFactory = new ConnectorFactory(); DistributedConfig config = new DistributedConfig(workerProps); rest = new RestServer(config); URI advertisedUrl = rest.advertisedUrl(); String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort(); KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(); offsetBackingStore.configure(config); Worker worker = new Worker(workerId, time, connectorFactory, config, offsetBackingStore); StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, worker.getInternalValueConverter()); statusBackingStore.configure(config); ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter()); configBackingStore.configure(config); DistributedHerder herder = new DistributedHerder(config, time, worker, statusBackingStore, configBackingStore, advertisedUrl.toString()); connect = new Connect(herder, rest); try { connect.start(); } catch (Throwable t) { LOGGER.error("Failed to start Connector error", t); rest.stop(); connect.stop(); } }
new KafkaStatusBackingStore(time, worker.getInternalValueConverter()); statusBackingStore.configure(configWithClientIdSuffix(workerProps, "status"));