public long getOffset(String topic, int partition, long startOffsetTime) { SimpleConsumer simpleConsumer = findLeaderConsumer(partition); if (simpleConsumer == null) { LOG.error("Error consumer is null get offset from partition:" + partition); return -1; } TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1)); OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId()); long[] offsets = simpleConsumer.getOffsetsBefore(request).offsets(topic, partition); if (offsets.length > 0) { return offsets[0]; } else { return NO_OFFSET; } }
private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId) { TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId); // The API implies that this will always return all of the offsets. So it seems a partition can not have // more than Integer.MAX_VALUE-1 segments. // // This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value // should not be 0. PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), Integer.MAX_VALUE); OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest); if (offsetResponse.hasError()) { short errorCode = offsetResponse.errorCode(topicName, partitionId); throw new RuntimeException("could not fetch data from Kafka, error code is '" + errorCode + "'"); } return offsetResponse.offsets(topicName, partitionId); }
while (true) { kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest( partitionToRequestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); response = consumer.getOffsetsBefore(request);
SimpleConsumer consumer = createBrokerConsumer(context, brokers.get(i)); log.info(String.format("Fetching metadata from broker %s with client id %s for %d topic(s) %s", brokers.get(i), consumer.clientId(), metaRequestTopics.size(), metaRequestTopics)); try { for (int iter = 0; iter < NUM_TRIES_TOPIC_METADATA; iter++) { log.warn(String.format( "Fetching topic metadata with client id %s for topics [%s] from broker [%s] failed, iter[%s]", consumer.clientId(), metaRequestTopics, brokers.get(i), iter), e); try { Thread.sleep((long) (Math.random() * (iter + 1) * 1000));
private static long getLatestOffset(SimpleConsumer consumer, TopicAndPartition topicAndPartition) { Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) { logger.warn("Failed to fetch offset for {} due to {}", topicAndPartition, response.errorCode(topicAndPartition.topic(), topicAndPartition.partition())); return -1; } long[] offsets = response.offsets(topicAndPartition.topic(), topicAndPartition.partition()); return offsets[0]; } }
private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId) { TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId); // The API implies that this will always return all of the offsets. So it seems a partition can not have // more than Integer.MAX_VALUE-1 segments. // // This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value // should not be 0. PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 10000); OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest); if (offsetResponse.hasError()) { short errorCode = offsetResponse.errorCode(topicName, partitionId); LOGGER.warn(format("Offset response has error: %d", errorCode)); throw new RakamException("could not fetch data from Kafka, error code is '" + errorCode + "'", HttpResponseStatus.INTERNAL_SERVER_ERROR); } long[] offsets = offsetResponse.offsets(topicName, partitionId); return offsets; }
@Test(expected = RuntimeException.class) public void testWithThreeRetries() { List<Object> mocks = new ArrayList<Object>(); Configuration configuration = EasyMock.createMock(Configuration.class); mocks.add(configuration); EasyMock.expect(configuration.get(EasyMock.anyString())).andReturn(DUMMY_VALUE).anyTimes(); JobContext jobContext = EasyMock.createMock(JobContext.class); mocks.add(jobContext); EasyMock.expect(jobContext.getConfiguration()).andReturn(configuration).anyTimes(); SimpleConsumer simpleConsumer = EasyMock.createMock(SimpleConsumer.class); mocks.add(simpleConsumer); EasyMock.expect(simpleConsumer.clientId()).andReturn(DUMMY_VALUE) .times(EtlInputFormat.NUM_TRIES_TOPIC_METADATA + 1); Exception ex = new RuntimeException("No TopicMeta"); EasyMock.expect(simpleConsumer.send((TopicMetadataRequest) EasyMock.anyObject())).andThrow(ex) .times(EtlInputFormat.NUM_TRIES_TOPIC_METADATA); simpleConsumer.close(); EasyMock.expectLastCall().andVoid().anyTimes(); EasyMock.replay(mocks.toArray()); EtlInputFormat inputFormat = new EtlInputFormatForUnitTest(); EtlInputFormatForUnitTest.consumerType = EtlInputFormatForUnitTest.ConsumerType.MOCK; EtlInputFormatForUnitTest.consumer = simpleConsumer; List<TopicMetadata> actualTopicMetadatas = inputFormat.getKafkaMetadata(jobContext, new ArrayList<String>()); EasyMock.verify(mocks.toArray()); }
private SimpleConsumer mockSimpleConsumer(TopicMetadataResponse metadataResponse, OffsetResponse offsetResponse, FetchResponse fetchResponse) { SimpleConsumer simpleConsumer = EasyMock.createMock(SimpleConsumer.class); mocks.add(simpleConsumer); EasyMock.expect(simpleConsumer.send((TopicMetadataRequest)EasyMock.anyObject())).andReturn(metadataResponse).times(1); EasyMock.expect(simpleConsumer.getOffsetsBefore((OffsetRequest)EasyMock.anyObject())).andReturn(offsetResponse).anyTimes(); simpleConsumer.close(); EasyMock.expectLastCall().andVoid().anyTimes(); EasyMock.expect(simpleConsumer.clientId()).andReturn(KAFKA_CLIENT_ID).times(1); EasyMock.expect(simpleConsumer.fetch((FetchRequest)EasyMock.anyObject())).andReturn(fetchResponse).times(1); EasyMock.expect(simpleConsumer.host()).andReturn("dummyHost").anyTimes(); EasyMock.expect(simpleConsumer.port()).andReturn(8888).anyTimes(); return simpleConsumer; }
private SimpleConsumer mockConsumerThrowsExceptionForOffsetRangeCall(TopicMetadataResponse metadataResponse) { SimpleConsumer simpleConsumer = EasyMock.createMock(SimpleConsumer.class); mocks.add(simpleConsumer); EasyMock.expect(simpleConsumer.send((TopicMetadataRequest)EasyMock.anyObject())) .andReturn(metadataResponse).times(1); EasyMock.expect(simpleConsumer.getOffsetsBefore((OffsetRequest) EasyMock.anyObject())) .andThrow(new RuntimeException()).times(3); EasyMock.expect(simpleConsumer.clientId()).andReturn(KAFKA_CLIENT_ID).times(1); simpleConsumer.close(); EasyMock.expectLastCall().andVoid().times(2); EasyMock.expect(simpleConsumer.host()).andReturn("dummyHost").times(4); EasyMock.expect(simpleConsumer.port()).andReturn(8888).times(4); return simpleConsumer; }
EasyMock.expect(simpleConsumer.clientId()).andReturn(DUMMY_VALUE).times(2); EasyMock.expect(simpleConsumer.send((TopicMetadataRequest) EasyMock.anyObject())).andThrow( new RuntimeException("No TopicMD"));
public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) { TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1)); OffsetRequest request = new OffsetRequest( requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition); if (offsets.length > 0) { return offsets[0]; } else { return NO_OFFSET; } }
public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) { TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1)); OffsetRequest request = new OffsetRequest( requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition); if (offsets.length > 0) { return offsets[0]; } else { return NO_OFFSET; } }
public long getOffset(String topic, int partition, long startOffsetTime) { SimpleConsumer simpleConsumer = findLeaderConsumer(partition); if (simpleConsumer == null) { //LOG.error("Error consumer is null get offset from partition:" + partition); return -1; } TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1)); OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId()); long[] offsets = simpleConsumer.getOffsetsBefore(request).offsets(topic, partition); if (offsets.length > 0) { return offsets[0]; } else { return NO_OFFSET; } }
private long getLatestOffset(SimpleConsumer consumer, TopicAndPartition topicAndPartition) { Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) { logger.warn("Failed to fetch offset for {} due to {}", topicAndPartition, response.errorCode(topicAndPartition.topic(), topicAndPartition.partition())); return -1; } long[] offsets = response.offsets(topicAndPartition.topic(), topicAndPartition.partition()); return offsets[0]; }
private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId) { TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId); // The API implies that this will always return all of the offsets. So it seems a partition can not have // more than Integer.MAX_VALUE-1 segments. // // This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value // should not be 0. PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), Integer.MAX_VALUE); OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest); if (offsetResponse.hasError()) { short errorCode = offsetResponse.errorCode(topicName, partitionId); throw new RuntimeException("could not fetch data from Kafka, error code is '" + errorCode + "'"); } return offsetResponse.offsets(topicName, partitionId); }
/** * Makes a call to kafka to fetch messages. */ private FetchResponse fetchMessages(SimpleConsumer consumer, long offset) { FetchRequest request = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topicPart.getTopic(), topicPart.getPartition(), offset, FETCH_SIZE) .maxWait(MAX_WAIT) .build(); return consumer.fetch(request); }
/** * Makes a call to kafka to fetch messages. */ private FetchResponse fetchMessages(SimpleConsumer consumer, long offset) { FetchRequest request = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topicPart.getTopic(), topicPart.getPartition(), offset, FETCH_SIZE) .maxWait(MAX_WAIT) .build(); return consumer.fetch(request); }
private long getOffsets(Node leader, String topic, int partitionId, long time) { TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId); SimpleConsumer consumer = new SimpleConsumer(leader.host(), leader.port(), 10000, 1024, "Kafka-zk-simpleconsumer"); PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(time, 10000); OffsetRequest offsetRequest = new OffsetRequest( ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest); if (offsetResponse.hasError()) { short errorCode = offsetResponse.errorCode(topic, partitionId); log.warn(format("Offset response has error: %d", errorCode)); throw new ApiException( "could not fetch data from Kafka, error code is '" + errorCode + "'Exception Message:" + offsetResponse.toString()); } long[] offsets = offsetResponse.offsets(topic, partitionId); consumer.close(); return offsets[0]; }
/** * Fetches messages from the given topic, partition and offset using the provided {@link SimpleConsumer}. * * @return A {@link ByteBufferMessageSet} of the given topic, partition for messages fetched * * @throws OffsetOutOfRangeException if the given offset is out of range. * @throws NotLeaderForPartitionException if the broker that the consumer is talking to is not the leader * for the given topic and partition. * @throws UnknownTopicOrPartitionException if the topic or partition is not known by the Kafka server * @throws UnknownServerException if the Kafka server responded with error. */ public static ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, String topic, int partition, int fetchSize, long requestOffset) throws KafkaException { FetchRequest req = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topic, partition, requestOffset, fetchSize) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (fetchResponse.hasError()) { throw Errors.forCode(fetchResponse.errorCode(topic, partition)).exception(); } return fetchResponse.messageSet(topic, partition); }
/** * Fetches messages from the given topic, partition and offset using the provided {@link SimpleConsumer}. * * @return A {@link ByteBufferMessageSet} of the given topic, partition for messages fetched * * @throws OffsetOutOfRangeException if the given offset is out of range. * @throws NotLeaderForPartitionException if the broker that the consumer is talking to is not the leader * for the given topic and partition. * @throws UnknownTopicOrPartitionException if the topic or partition is not known by the Kafka server * @throws UnknownServerException if the Kafka server responded with error. */ public static ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, String topic, int partition, int fetchSize, long requestOffset) throws KafkaException { FetchRequest req = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topic, partition, requestOffset, fetchSize) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (fetchResponse.hasError()) { throw Errors.forCode(fetchResponse.errorCode(topic, partition)).exception(); } return fetchResponse.messageSet(topic, partition); }