void close() { ZookeeperConsumerConnector maybeConnector = connector; if (maybeConnector == null) return; maybeConnector.shutdown(); } }
ExecutorService compute() { ExecutorService pool = streams == 1 ? Executors.newSingleThreadExecutor() : Executors.newFixedThreadPool(streams); Map<String, Integer> topicCountMap = new LinkedHashMap<>(1); topicCountMap.put(topic, streams); for (KafkaStream<byte[], byte[]> stream : connector.get().createMessageStreams(topicCountMap).get(topic)) { pool.execute(guardFailures(new KafkaStreamProcessor(stream, collector, metrics))); } return pool; }
ConsumerConnector buildConnector(Properties properties) { return new ZookeeperConsumerConnector( new ConsumerConfig(properties) ); }
KafkaCollector(Builder builder, Lazy<AsyncSpanConsumer> consumer) { Map<String, Integer> topicCountMap = new LinkedHashMap<>(1); topicCountMap.put(builder.topic, builder.streams); // Settings below correspond to "Old Consumer Configs" // http://kafka.apache.org/documentation.html Properties props = new Properties(); props.put("zookeeper.connect", builder.zookeeper); props.put("group.id", builder.groupId); props.put("fetch.message.max.bytes", String.valueOf(builder.maxMessageSize)); // Same default as zipkin-scala, and keeps tests from hanging props.put("auto.offset.reset", "smallest"); connector = (ZookeeperConsumerConnector) createJavaConsumerConnector(new ConsumerConfig(props)); pool = builder.streams == 1 ? Executors.newSingleThreadExecutor() : Executors.newFixedThreadPool(builder.streams); for (KafkaStream<byte[], byte[]> stream : connector.createMessageStreams(topicCountMap).get(builder.topic)) { pool.execute(new KafkaStreamProcessor(stream, consumer, builder.metrics)); } }
@Override public void close() { pool.shutdown(); connector.shutdown(); } }
KafkaCollector(Builder builder, Lazy<AsyncSpanConsumer> consumer) { Map<String, Integer> topicCountMap = new LinkedHashMap<>(1); topicCountMap.put(builder.topic, builder.streams); // Settings below correspond to "Old Consumer Configs" // http://kafka.apache.org/documentation.html Properties props = new Properties(); props.put("zookeeper.connect", builder.zookeeper); props.put("group.id", builder.groupId); props.put("fetch.message.max.bytes", String.valueOf(builder.maxMessageSize)); // Same default as zipkin-scala, and keeps tests from hanging props.put("auto.offset.reset", "smallest"); connector = (ZookeeperConsumerConnector) createJavaConsumerConnector(new ConsumerConfig(props)); pool = builder.streams == 1 ? Executors.newSingleThreadExecutor() : Executors.newFixedThreadPool(builder.streams); for (KafkaStream<byte[], byte[]> stream : connector.createMessageStreams(topicCountMap).get(builder.topic)) { pool.execute(new KafkaStreamProcessor(stream, consumer, builder.metrics)); } }
@Override public void close() { pool.shutdown(); connector.shutdown(); } }