KafkaServer kafkaServer = getKafkaServer(i, tmpKafkaDirs.get(i)); brokers.add(kafkaServer); brokerConnectionString += hostAndPortToUrlString(KAFKA_HOST, kafkaServer.socketServer().boundPort(securityProtocol)); brokerConnectionString += ",";
public static BrokerChannel forBroker(String host, int port) { BlockingChannel channel = new BlockingChannel(host, port, BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000); // todo: make this configurable return new BrokerChannel(channel); }
public <T> T execute(Callback<T> callback) { try { if (!channel.isConnected()) { channel.connect(); } return callback.doWithChannel(channel); } finally { if (channel.isConnected()) { channel.disconnect(); } } }
while (true) { try { channel = new BlockingChannel("localhost", 9092, BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */); channel.connect(); int correlationId = 0; channel.send( new ConsumerMetadataRequest(myGroup, ConsumerMetadataRequest.CurrentVersion(), correlationId++, myClientid)); ConsumerMetadataResponse metadataResponse = ConsumerMetadataResponse.readFrom(channel.receive().buffer()); channel.disconnect(); channel = new BlockingChannel(offsetManager.host(), offsetManager.port(), BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */); channel.connect(); break; } else {
private Integer offsetManagerBroker(BlockingChannel channel, String groupId) { final ConsumerMetadataRequest request = new ConsumerMetadataRequest(groupId, (short) 0, 0, clientId()); LOG.debug("Sending consumer metadata request: {}", request); channel.send(request); ConsumerMetadataResponse response = ConsumerMetadataResponse.readFrom(channel.receive().buffer()); LOG.debug("Received consumer metadata response: {}", response); return (response.errorCode() == ErrorMapping.NoError()) ? response.coordinator().id() : null; }
channel.send(fetchRequest.underlying()); OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().buffer()); OffsetMetadataAndError result = fetchResponse.offsets().get(testPartition); if (null == result) { channel.disconnect(); reconnect(); channel.disconnect(); LOGGER.error("Error fetching offset: ", e);
private String getName(RequestChannel.Session session) { final String principalName = session.principal().getName(); int start = principalName.indexOf("CN="); if (start >= 0) { String tmpName, name = ""; tmpName = principalName.substring(start + 3); int end = tmpName.indexOf(","); if (end > 0) { name = tmpName.substring(0, end); } else { name = tmpName; } return name; } else { return principalName; } } }
/** * Authorize access to a Kafka privilege */ public boolean authorize(RequestChannel.Session session, Operation operation, Resource resource) { List<Authorizable> authorizables = ConvertUtil.convertResourceToAuthorizable(session.clientAddress().getHostAddress(), resource); Set<KafkaAction> actions = Sets.newHashSet(actionFactory.getActionByName(operation.name())); return authProvider.hasAccess(new Subject(getName(session)), authorizables, actions, ActiveRoleSet.ALL); }
BlockingChannel channel = new BlockingChannel(brokerHost, brokerPort, BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */); channel.connect(); channel.send(new ConsumerMetadataRequest(group, ConsumerMetadataRequest.CurrentVersion(), correlationId++, clientId)); ConsumerMetadataResponse metadataResponse = ConsumerMetadataResponse.readFrom(channel.receive().buffer()); channel.disconnect(); channel = new BlockingChannel(offsetManager.host(), offsetManager.port(), BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */); channel.connect(); } else { clientId); try { channel.send(fetchRequest.underlying()); OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().buffer()); OffsetMetadataAndError result = fetchResponse.offsets().get(partitionToRead); short offsetFetchErrorCode = result.error(); if (offsetFetchErrorCode == ErrorMapping.NotCoordinatorForConsumerCode()) { channel.disconnect(); channel.disconnect();
KafkaServer kafkaServer = getKafkaServer(i, tmpKafkaDirs.get(i)); brokers.add(kafkaServer); brokerConnectionString += hostAndPortToUrlString(KAFKA_HOST, kafkaServer.socketServer().boundPort(listenerName)); brokerConnectionString += ",";
private Map<String, TopicVO> getTopicMetadata(BlockingChannel channel, String... topics) { final TopicMetadataRequest request = new TopicMetadataRequest((short) 0, 0, clientId(), Arrays.asList(topics)); LOG.debug("Sending topic metadata request: {}", request); channel.send(request); final kafka.api.TopicMetadataResponse underlyingResponse = kafka.api.TopicMetadataResponse.readFrom(channel.receive().buffer()); LOG.debug("Received topic metadata response: {}", underlyingResponse); TopicMetadataResponse response = new TopicMetadataResponse(underlyingResponse); return response.topicsMetadata().stream() .filter(tmd -> tmd.errorCode() == ErrorMapping.NoError()) .map(this::processTopicMetadata) .collect(Collectors.toMap(TopicVO::getName, t -> t)); }
(short) 1 /* version */); // version 1 and above commit to Kafka, version 0 commits to ZooKeeper try { channel.send(commitRequest.underlying()); OffsetCommitResponse commitResponse = OffsetCommitResponse.readFrom(channel.receive().buffer()); if (commitResponse.hasError()) { for (Object partitionErrorCodeRaw : commitResponse.errors().values()) { } else if (partitionErrorCode == ErrorMapping.NotCoordinatorForConsumerCode() || partitionErrorCode == ErrorMapping.ConsumerCoordinatorNotAvailableCode()) { channel.disconnect(); reconnect(); } else { channel.disconnect(); reconnect();
@Override public boolean authorize(RequestChannel.Session session, Operation operation, Resource resource) { LOG.debug("Authorizing Session: " + session + " for Operation: " + operation + " on Resource: " + resource); final KafkaPrincipal user = session.principal(); if (isSuperUser(user)) { LOG.debug("Allowing SuperUser: " + user + " in " + session + " for Operation: " + operation + " on Resource: " + resource); return true; } LOG.debug("User: " + user + " is not a SuperUser"); return binding.authorize(session, operation, resource); }
BlockingChannel channel = new BlockingChannel(brokerHost, brokerPort, BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */); channel.connect(); channel.send(new ConsumerMetadataRequest(group, ConsumerMetadataRequest.CurrentVersion(), correlationId++, clientId)); ConsumerMetadataResponse metadataResponse = ConsumerMetadataResponse.readFrom(channel.receive().buffer()); channel.disconnect(); channel = new BlockingChannel(offsetManager.host(), offsetManager.port(), BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */); channel.connect(); } else { (short) 1 /* version */); // version 1 and above commit to Kafka, version 0 commits to ZooKeeper try { channel.send(commitRequest.underlying()); OffsetCommitResponse commitResponse = OffsetCommitResponse.readFrom(channel.receive().buffer()); if (commitResponse.hasError()) { for (final Object partitionErrorCode : commitResponse.errors().values()) { channel.disconnect(); channel.disconnect();
KafkaServer kafkaServer = getKafkaServer(i, tmpKafkaDirs.get(i)); brokers.add(kafkaServer); brokerConnectionString += hostAndPortToUrlString(KAFKA_HOST, kafkaServer.socketServer().boundPort(listenerName)); brokerConnectionString += ",";
private OffsetResponse sendOffsetRequest(Integer brokerId, TopicVO topic, PartitionOffsetRequestInfo requestInfo, List<TopicPartitionVO> brokerPartitions) { final OffsetRequest offsetRequest = new OffsetRequest( brokerPartitions.stream() .collect(Collectors.toMap( partition -> new TopicAndPartition(topic.getName(), partition.getId()), partition -> requestInfo)), (short) 0, clientId()); LOG.debug("Sending offset request: {}", offsetRequest); return retryTemplate.execute( context -> brokerChannel(brokerId) .execute(channel -> { channel.send(offsetRequest.underlying()); final kafka.api.OffsetResponse underlyingResponse = kafka.api.OffsetResponse.readFrom(channel.receive().buffer()); LOG.debug("Received offset response: {}", underlyingResponse); return new OffsetResponse(underlyingResponse); })); }
KafkaServer kafkaServer = getKafkaServer(i, tmpKafkaDirs.get(i)); brokers.add(kafkaServer); brokerConnectionString += hostAndPortToUrlString(KAFKA_HOST, kafkaServer.socketServer().boundPort(listenerName)); brokerConnectionString += ",";
public int getKafkaServerPort(int index) { return kafkaServer.get(index).socketServer().boundPort(SecurityProtocol.PLAINTEXT); }
KafkaServer kafkaServer = getKafkaServer(i, tmpKafkaDirs.get(i)); brokers.add(kafkaServer); brokerConnectionString += hostAndPortToUrlString(KAFKA_HOST, kafkaServer.socketServer().boundPort(listenerName)); brokerConnectionString += ",";