@VisibleForTesting Consumer<byte[], byte[]> newConsumer( String brokerList, @Nullable String groupId, @Nullable Settings config) { return KafkaUtils.newConsumer( brokerList, groupId, config); }
static Producer<byte [], byte []> newProducer(String brokerList, Settings config) { final Properties ps = toProperties(config); ps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); ps.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); ps.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); if (ps.getProperty(ProducerConfig.ACKS_CONFIG) == null) { ps.setProperty(ProducerConfig.ACKS_CONFIG, "1"); } return new KafkaProducer<>(ps); }
@Override public Writer<Pair<byte[], byte[]>> openWriter(int partitionId) { String cacheKey = brokers; Producer<byte[], byte[]> producer = PRODUCERS.get(cacheKey); if (producer == null) { // ~ ok, let's create a new producer (this may take some time) final Producer<byte[], byte[]> p = KafkaUtils.newProducer(brokers, config); // ~ now, let's try to store it in our global cache final Producer<byte[], byte[]> p1 = PRODUCERS.putIfAbsent(cacheKey, p); if (p1 == null) { producer = p; } else { // ~ looks like somebody managed to create concurrently a new // producer in between and store it quicker into the global cache producer = p1; // ~ must close the created one to avoid leaking resources! p.close(); } } final List<PartitionInfo> partitions = producer.partitionsFor(topic); return new ProducerWriter(producer, topic, partitionId % partitions.size()); }
@Override public UnboundedReader<Pair<byte[], byte[]>, Long> openReader() throws IOException { final Consumer<byte[], byte[]> c = KafkaUtils.newConsumer(brokerList, null, config); final List<TopicPartition> partitionList = Collections.singletonList(topicPartition); c.assign(partitionList); if (startOffset > 0) { c.seek(topicPartition, startOffset); } else if (startOffset == 0) { c.seekToBeginning(partitionList); } return new ConsumerReader(c, topicPartition, stopReadingAtStamp); }
static Consumer<byte[], byte[]> newConsumer( String brokerList, @Nullable String groupId, @Nullable Settings config) { Properties ps = toProperties(config); ps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList); ps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); ps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); if (groupId != null) { ps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId); } if (ps.getProperty(ConsumerConfig.CLIENT_ID_CONFIG) == null) { final String name = "euphoria.client-id-" + UUID.randomUUID().toString(); LOG.warn("Autogenerating name of consumer's {} to {}", ConsumerConfig.CLIENT_ID_CONFIG, name); ps.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, name); } return new KafkaConsumer<>(ps); }