/** * Retrieve the behaviour of "auto.offset.reset" from the config properties. * A partition needs to fallback to "auto.offset.reset" as default offset when * we can't find offsets in ZK to start from in {@link StartupMode#GROUP_OFFSETS} startup mode. * * @param config kafka consumer properties * @return either OffsetRequest.LatestTime() or OffsetRequest.EarliestTime() */ private static long getInvalidOffsetBehavior(Properties config) { final String val = config.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "largest"); if (val.equals("largest") || val.equals("latest")) { // largest is kafka 0.8, latest is kafka 0.9 return OffsetRequest.LatestTime(); } else { return OffsetRequest.EarliestTime(); } } }
@Override protected long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException { Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo = Collections.singletonMap(new TopicAndPartition(partition.getTopicName(), partition.getId()), new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); return getOffset(partition, offsetRequestInfo); }
/** * For a set of partitions, if a partition is set with the special offsets {@link OffsetRequest#EarliestTime()} * or {@link OffsetRequest#LatestTime()}, replace them with actual offsets requested via a Kafka consumer. * * @param consumer The consumer connected to lead broker * @param partitions The list of partitions we need offsets for */ private static void requestAndSetEarliestOrLatestOffsetsFromKafka( SimpleConsumer consumer, List<KafkaTopicPartitionState<TopicAndPartition>> partitions) throws Exception { Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>(); for (KafkaTopicPartitionState<TopicAndPartition> part : partitions) { if (part.getOffset() == OffsetRequest.EarliestTime() || part.getOffset() == OffsetRequest.LatestTime()) { requestInfo.put(part.getKafkaPartitionHandle(), new PartitionOffsetRequestInfo(part.getOffset(), 1)); } } requestAndSetOffsetsFromKafka(consumer, partitions, requestInfo); }
@Override public long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException { Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo = Collections.singletonMap(new TopicAndPartition(partition.getTopicName(), partition.getId()), new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); return getOffset(partition, offsetRequestInfo); }
} else { if (jsonOffset == null) { lastCommittedOffset = consumer.getOffset(config.topic, partition, kafka.api.OffsetRequest.LatestTime()); } else { lastCommittedOffset = jsonOffset;
private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId) { TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId); // The API implies that this will always return all of the offsets. So it seems a partition can not have // more than Integer.MAX_VALUE-1 segments. // // This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value // should not be 0. PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), Integer.MAX_VALUE); OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest); if (offsetResponse.hasError()) { short errorCode = offsetResponse.errorCode(topicName, partitionId); throw new RuntimeException("could not fetch data from Kafka, error code is '" + errorCode + "'"); } return offsetResponse.offsets(topicName, partitionId); }
} else if (partition.getOffset() == KafkaTopicPartitionStateSentinel.LATEST_OFFSET) { partition.setOffset(OffsetRequest.LatestTime()); } else if (partition.getOffset() == KafkaTopicPartitionStateSentinel.GROUP_OFFSET) { Long committedOffset = zookeeperOffsetHandler.getCommittedOffset(partition.getKafkaTopicPartition());
offsetRequestTime = kafka.api.OffsetRequest.LatestTime(); } else if (offsetCriteria.isSmallest()) { offsetRequestTime = kafka.api.OffsetRequest.EarliestTime();
private long getOffset(boolean earliest) throws InterruptedException { TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put( topicAndPartition, new PartitionOffsetRequestInfo( earliest ? kafka.api.OffsetRequest.EarliestTime() : kafka.api.OffsetRequest.LatestTime(), 1 ) ); OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId); OffsetResponse response; try { response = consumer.getOffsetsBefore(request); } catch (Exception e) { ensureNotInterrupted(e); log.error(e, "caught exception in getOffsetsBefore [%s] - [%s]", topic, partitionId); return -1; } if (response.hasError()) { log.error( "error fetching data Offset from the Broker [%s]. reason: [%s]", leaderBroker.host(), response.errorCode(topic, partitionId) ); return -1; } long[] offsets = response.offsets(topic, partitionId); return earliest ? offsets[0] : offsets[offsets.length - 1]; }
private long findLastOffset(TopicPartition topicPartition, SimpleConsumer consumer) { TopicAndPartition topicAndPartition = new TopicAndPartition(topicPartition.getTopic(), topicPartition.getPartition()); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo( kafka.api.OffsetRequest.LatestTime(), 1)); final String clientName = getClientName(topicPartition); OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) { throw new RuntimeException("Error fetching offset data. Reason: " + response.errorCode(topicPartition.getTopic(), topicPartition.getPartition())); } long[] offsets = response.offsets(topicPartition.getTopic(), topicPartition.getPartition()); return offsets[0] - 1; }
public static BaseRichSpout getSpout(StormBenchConfig conf) { String topic = conf.topic; String consumerGroup = conf.consumerGroup; String zkHost = conf.zkHost; BrokerHosts brokerHosts = new ZkHosts(zkHost); SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, topic, "", consumerGroup); spoutConfig.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme()); spoutConfig.ignoreZkOffsets = true; spoutConfig.startOffsetTime = OffsetRequest.LatestTime(); return new KafkaSpout(spoutConfig); }
private static long getLatestOffset(SimpleConsumer consumer, TopicAndPartition topicAndPartition) { Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) { logger.warn("Failed to fetch offset for {} due to {}", topicAndPartition, response.errorCode(topicAndPartition.topic(), topicAndPartition.partition())); return -1; } long[] offsets = response.offsets(topicAndPartition.topic(), topicAndPartition.partition()); return offsets[0]; } }
private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId) { TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId); // The API implies that this will always return all of the offsets. So it seems a partition can not have // more than Integer.MAX_VALUE-1 segments. // // This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value // should not be 0. PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 10000); OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest); if (offsetResponse.hasError()) { short errorCode = offsetResponse.errorCode(topicName, partitionId); LOGGER.warn(format("Offset response has error: %d", errorCode)); throw new RakamException("could not fetch data from Kafka, error code is '" + errorCode + "'", HttpResponseStatus.INTERNAL_SERVER_ERROR); } long[] offsets = offsetResponse.offsets(topicName, partitionId); return offsets; }
new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1);
public static ITridentDataSource getTridentSpout(StormBenchConfig conf, boolean opaque) { String topic = conf.topic; String consumerGroup = conf.consumerGroup; String zkHost = conf.zkHost; BrokerHosts brokerHosts = new ZkHosts(zkHost); TridentKafkaConfig tridentKafkaConfig = new TridentKafkaConfig(brokerHosts, topic, consumerGroup); tridentKafkaConfig.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme()); tridentKafkaConfig.ignoreZkOffsets = true; tridentKafkaConfig.startOffsetTime = OffsetRequest.LatestTime(); if (opaque) { return new OpaqueTridentKafkaSpout(tridentKafkaConfig); } else { return new TransactionalTridentKafkaSpout(tridentKafkaConfig); } }
/** * Fetches the latest offset in Kafka. */ public long fetchLatestOffset() { return fetchOffsetBefore(kafka.api.OffsetRequest.LatestTime()); }
/** * getOrigin max offset * @return maxoffset */ private long getMaxOffset() { return getOffset(kafka.api.OffsetRequest.LatestTime()); } }
/** * Fetches the latest offset in Kafka. */ public long fetchLatestOffset() { return fetchOffsetBefore(kafka.api.OffsetRequest.LatestTime()); }
@Override public long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException { Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo = Collections.singletonMap(new TopicAndPartition(partition.getTopicName(), partition.getId()), new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); return getOffset(partition, offsetRequestInfo); }
public long getLatestOffset(SimpleConsumer consumer, String topic, int partition, String clientName) { TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Map<TopicAndPartition, kafka.api.PartitionOffsetRequestInfo> requestInfo = new HashMap<>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) { throw new RuntimeException("Error fetching data offset from the broker. Reason: " + response.errorCode(topic, partition) ); } long[] offsets = response.offsets(topic, partition); return offsets[0]; }