while (true) { try { channel = new BlockingChannel("localhost", 9092, BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */); channel.connect(); int correlationId = 0; channel.send( new ConsumerMetadataRequest(myGroup, ConsumerMetadataRequest.CurrentVersion(), correlationId++, myClientid)); ConsumerMetadataResponse metadataResponse = ConsumerMetadataResponse.readFrom(channel.receive().buffer()); channel.disconnect(); channel = new BlockingChannel(offsetManager.host(), offsetManager.port(), BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */); channel.connect(); break; } else {
public <T> T execute(Callback<T> callback) { try { if (!channel.isConnected()) { channel.connect(); } return callback.doWithChannel(channel); } finally { if (channel.isConnected()) { channel.disconnect(); } } }
public static BrokerChannel forBroker(String host, int port) { BlockingChannel channel = new BlockingChannel(host, port, BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000); // todo: make this configurable return new BrokerChannel(channel); }
channel.send(fetchRequest.underlying()); OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().buffer()); OffsetMetadataAndError result = fetchResponse.offsets().get(testPartition); if (null == result) { channel.disconnect(); reconnect(); channel.disconnect(); LOGGER.error("Error fetching offset: ", e);
private Map<String, TopicVO> getTopicMetadata(BlockingChannel channel, String... topics) { final TopicMetadataRequest request = new TopicMetadataRequest((short) 0, 0, clientId(), Arrays.asList(topics)); LOG.debug("Sending topic metadata request: {}", request); channel.send(request); final kafka.api.TopicMetadataResponse underlyingResponse = kafka.api.TopicMetadataResponse.readFrom(channel.receive().buffer()); LOG.debug("Received topic metadata response: {}", underlyingResponse); TopicMetadataResponse response = new TopicMetadataResponse(underlyingResponse); return response.topicsMetadata().stream() .filter(tmd -> tmd.errorCode() == ErrorMapping.NoError()) .map(this::processTopicMetadata) .collect(Collectors.toMap(TopicVO::getName, t -> t)); }
(short) 1 /* version */); // version 1 and above commit to Kafka, version 0 commits to ZooKeeper try { channel.send(commitRequest.underlying()); OffsetCommitResponse commitResponse = OffsetCommitResponse.readFrom(channel.receive().buffer()); if (commitResponse.hasError()) { for (Object partitionErrorCodeRaw : commitResponse.errors().values()) { } else if (partitionErrorCode == ErrorMapping.NotCoordinatorForConsumerCode() || partitionErrorCode == ErrorMapping.ConsumerCoordinatorNotAvailableCode()) { channel.disconnect(); reconnect(); } else { channel.disconnect(); reconnect();
private Integer offsetManagerBroker(BlockingChannel channel, String groupId) { final ConsumerMetadataRequest request = new ConsumerMetadataRequest(groupId, (short) 0, 0, clientId()); LOG.debug("Sending consumer metadata request: {}", request); channel.send(request); ConsumerMetadataResponse response = ConsumerMetadataResponse.readFrom(channel.receive().buffer()); LOG.debug("Received consumer metadata response: {}", response); return (response.errorCode() == ErrorMapping.NoError()) ? response.coordinator().id() : null; }
BlockingChannel channel = new BlockingChannel(brokerHost, brokerPort, BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */); channel.connect(); channel.send(new ConsumerMetadataRequest(group, ConsumerMetadataRequest.CurrentVersion(), correlationId++, clientId)); ConsumerMetadataResponse metadataResponse = ConsumerMetadataResponse.readFrom(channel.receive().buffer()); channel.disconnect(); channel = new BlockingChannel(offsetManager.host(), offsetManager.port(), BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */); channel.connect(); } else { (short) 1 /* version */); // version 1 and above commit to Kafka, version 0 commits to ZooKeeper try { channel.send(commitRequest.underlying()); OffsetCommitResponse commitResponse = OffsetCommitResponse.readFrom(channel.receive().buffer()); if (commitResponse.hasError()) { for (final Object partitionErrorCode : commitResponse.errors().values()) { channel.disconnect(); channel.disconnect();
BlockingChannel channel = new BlockingChannel(brokerHost, brokerPort, BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */); channel.connect(); channel.send(new ConsumerMetadataRequest(group, ConsumerMetadataRequest.CurrentVersion(), correlationId++, clientId)); ConsumerMetadataResponse metadataResponse = ConsumerMetadataResponse.readFrom(channel.receive().buffer()); channel.disconnect(); channel = new BlockingChannel(offsetManager.host(), offsetManager.port(), BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */); channel.connect(); } else { clientId); try { channel.send(fetchRequest.underlying()); OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().buffer()); OffsetMetadataAndError result = fetchResponse.offsets().get(partitionToRead); short offsetFetchErrorCode = result.error(); if (offsetFetchErrorCode == ErrorMapping.NotCoordinatorForConsumerCode()) { channel.disconnect(); channel.disconnect();
private OffsetResponse sendOffsetRequest(Integer brokerId, TopicVO topic, PartitionOffsetRequestInfo requestInfo, List<TopicPartitionVO> brokerPartitions) { final OffsetRequest offsetRequest = new OffsetRequest( brokerPartitions.stream() .collect(Collectors.toMap( partition -> new TopicAndPartition(topic.getName(), partition.getId()), partition -> requestInfo)), (short) 0, clientId()); LOG.debug("Sending offset request: {}", offsetRequest); return retryTemplate.execute( context -> brokerChannel(brokerId) .execute(channel -> { channel.send(offsetRequest.underlying()); final kafka.api.OffsetResponse underlyingResponse = kafka.api.OffsetResponse.readFrom(channel.receive().buffer()); LOG.debug("Received offset response: {}", underlyingResponse); return new OffsetResponse(underlyingResponse); })); }