Refine search
FetchResponse fetchResponse = _simpleConsumer.fetch( new FetchRequestBuilder().minBytes(100000).maxWait(timeoutMillis) .addFetch(_topic, _partition, startOffset, 500000).build()); if (!fetchResponse.hasError()) { final Iterable<MessageAndOffset> messageAndOffsetIterable = buildOffsetFilteringIterable(fetchResponse.messageSet(_topic, _partition), startOffset, endOffset); throw exceptionForKafkaErrorCode(fetchResponse.errorCode(_topic, _partition));
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException { FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes) .maxWait(config.fetchWaitMaxMs).build(); FetchResponse fetchResponse = null; SimpleConsumer simpleConsumer = null; fetchResponse = simpleConsumer.fetch(req); } catch (Exception e) { if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException LOG.warn("Network error when fetching messages:", e); if (simpleConsumer != null) { String host = simpleConsumer.host(); int port = simpleConsumer.port(); simpleConsumer = null; throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e); if (fetchResponse.hasError()) { short code = fetchResponse.errorCode(topic, partition); if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) { long startOffset = getOffset(topic, partition, config.startOffsetTime); ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition); return msgs;
consumer = new SimpleConsumer("127.0.0.1", cluster.getKafkaServerPort(0), DEFAULT_SO_TIMEOUT, DEFAULT_BUFFER_SIZE, "foo"); FetchRequest req = new FetchRequestBuilder().addFetch(sinkTopic, 0, 0, DEFAULT_BUFFER_SIZE).build(); FetchResponse resp = consumer.fetch(req); ByteBufferMessageSet ms = resp.messageSet(sinkTopic, 0); assertTrue(ms.validBytes() > 0); } finally { if (consumer != null) { consumer.close();
SimpleConsumer consumer = new SimpleConsumer(broker.getHost(), broker.getPort(), 10000, 100000, ""); final FetchRequestBuilder fetchRequestBuilder = new FetchRequestBuilder() .clientId("KafDrop") .maxWait(5000) // todo: make configurable .minBytes(1); .addFetch(topicName, partitionId, currentOffset, 1024 * 1024) .build(); FetchResponse fetchResponse = consumer.fetch(fetchRequest); final ByteBufferMessageSet messageSet = fetchResponse.messageSet(topicName, partitionId); if (messageSet.validBytes() <= 0) break;
/** * Makes a call to kafka to fetch messages. */ private FetchResponse fetchMessages(SimpleConsumer consumer, long offset) { FetchRequest request = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topicPart.getTopic(), topicPart.getPartition(), offset, FETCH_SIZE) .maxWait(MAX_WAIT) .build(); return consumer.fetch(request); }
" readOffset: " + readOffset); for (int i = 0; i < FETCH_RETRIES; i++) { FetchRequest req = new FetchRequestBuilder() .clientId(FETCH_CLIENT_NAME) .replicaId(Request.DebuggingConsumerId()) // this consumerId enable reads from follower .maxWait(FETCH_MAX_WAIT_MS) .minBytes(ConsumerConfig.MinFetchBytes()) .addFetch(topic, partition, readOffset, FETCH_BUFFER_SIZE) .build(); try { FetchResponse response = consumer.fetch(req);
/** * Makes a call to kafka to fetch messages. */ private FetchResponse fetchMessages(SimpleConsumer consumer, long offset) { FetchRequest request = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topicPart.getTopic(), topicPart.getPartition(), offset, FETCH_SIZE) .maxWait(MAX_WAIT) .build(); return consumer.fetch(request); }
" readOffset: " + readOffset); for (int i = 0; i < FETCH_RETRIES; i++) { FetchRequest req = new FetchRequestBuilder() .clientId(FETCH_CLIENT_NAME) .replicaId(Request.DebuggingConsumerId()) // this consumerId enable reads from follower .maxWait(FETCH_MAX_WAIT_MS) .minBytes(ConsumerConfig.MinFetchBytes()) .addFetch(topic, partition, readOffset, FETCH_BUFFER_SIZE) .build(); try { FetchResponse response = consumer.fetch(req);
ensureConsumer(previousLeader); FetchRequest request = new FetchRequestBuilder() .clientId(clientId) .addFetch(topic, partitionId, offset, FETCH_SIZE) .maxWait(timeoutMs) .minBytes(1) .build(); response = consumer.fetch(request); if (response == null || response.hasError()) { short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode(); log.warn("fetch %s - %s with offset %s encounters error: [%s]", topic, partitionId, offset, errorCode); return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) : EMPTY_MSGS;
private ByteBufferMessageSet fetchMessageSet(long fetchOffset) throws OffsetOutOfRangeException { Preconditions.checkArgument(fetchOffset >= 0, String.format("Illegal fetch offset %d", fetchOffset)); int failureCount = 0; while (true) { SimpleConsumer consumer = getConsumer(); FetchRequest req = new FetchRequestBuilder() .clientId(clientName) .addFetch(topic, partition, fetchOffset, BUFFER_SIZE_BYTES) .maxWait(fetchTimeoutMs) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (!fetchResponse.hasError()) { return fetchResponse.messageSet(topic, partition); } short errorCode = fetchResponse.errorCode(topic, partition); if (++failureCount >= MAX_KAFKA_FETCH_RETRIES) { throw new RuntimeException( String.format("Error fetching data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, errorCode)); } LOG.warn("Error fetching data from broker {}:{} for topic {}, partition {}. Error code: {}", consumer.host(), consumer.port(), topic, partition, errorCode); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { throw new OffsetOutOfRangeException(String.format( "Requested offset %d is out of range for topic %s partition %d", fetchOffset, topic, partition)); } closeConsumer(); } }
if (messageAndOffsetIterator == null) { log.debug("Fetching %d bytes from offset %d (%d - %d). %d messages read so far", KAFKA_READ_BUFFER_SIZE, cursorOffset, split.getStart(), split.getEnd(), totalMessages); FetchRequest req = new FetchRequestBuilder() .clientId("presto-worker-" + Thread.currentThread().getName()) .addFetch(split.getTopicName(), split.getPartitionId(), cursorOffset, KAFKA_READ_BUFFER_SIZE) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (fetchResponse.hasError()) { short errorCode = fetchResponse.errorCode(split.getTopicName(), split.getPartitionId()); log.warn("Fetch response has error: %d", errorCode); throw new RuntimeException("could not fetch data from Kafka, error code is '" + errorCode + "'"); messageAndOffsetIterator = fetchResponse.messageSet(split.getTopicName(), split.getPartitionId()).iterator();
private ByteBufferMessageSet fetchMessageSet(long fetchOffset) throws OffsetOutOfRangeException { Preconditions.checkArgument(fetchOffset >= 0, String.format("Illegal fetch offset %d", fetchOffset)); int failureCount = 0; while (true) { SimpleConsumer consumer = getConsumer(); FetchRequest req = new FetchRequestBuilder() .clientId(clientName) .addFetch(topic, partition, fetchOffset, BUFFER_SIZE_BYTES) .maxWait(fetchTimeoutMs) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (!fetchResponse.hasError()) { return fetchResponse.messageSet(topic, partition); } short errorCode = fetchResponse.errorCode(topic, partition); if (++failureCount >= MAX_KAFKA_FETCH_RETRIES) { throw new RuntimeException( String.format("Error fetching data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, errorCode)); } LOG.warn("Error fetching data from broker {}:{} for topic {}, partition {}. Error code: {}", consumer.host(), consumer.port(), topic, partition, errorCode); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { throw new OffsetOutOfRangeException(String.format( "Requested offset %d is out of range for topic %s partition %d", fetchOffset, topic, partition)); } closeConsumer(); } }
consumer = new SimpleConsumer(broker.host(), broker.port(), soTimeout, bufferSize, clientId); FetchRequestBuilder frb = new FetchRequestBuilder(); frb.clientId(clientId); frb.maxWait(maxWait); frb.minBytes(minBytes); frb.addFetch( partition.getKafkaTopicPartition().getTopic(), partition.getKafkaTopicPartition().getPartition(), kafka.api.FetchRequest fetchRequest = frb.build(); LOG.debug("Issuing fetch request {}", fetchRequest); fetchResponse = consumer.fetch(fetchRequest); consumer.close(); } catch (Throwable t) { LOG.warn("Error while closing consumer connection", t); short code = fetchResponse.errorCode(fp.getTopic(), fp.getPartition()); final KafkaTopicPartitionState<TopicAndPartition> currentPartition = partitionsIterator.next(); final ByteBufferMessageSet messageSet = fetchResponse.messageSet( currentPartition.getTopic(), currentPartition.getPartition());
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException { FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes) .maxWait(config.fetchWaitMaxMs).build(); FetchResponse fetchResponse = null; SimpleConsumer simpleConsumer = null; fetchResponse = simpleConsumer.fetch(req); } catch (Exception e) { if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException String host = simpleConsumer.host(); int port = simpleConsumer.port(); simpleConsumer = null; throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e); if (fetchResponse.hasError()) { fetchResponseCode = fetchResponse.errorCode(topic, partition); if (fetchResponseCode == ErrorMapping.OffsetOutOfRangeCode()) { ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition); return msgs;
final int MAX_MESSAGE_SIZE_BYTES = mConfig.getMaxMessageSizeBytes(); final String clientName = getClientName(topicPartition); kafka.api.FetchRequest request = new FetchRequestBuilder().clientId(clientName) .addFetch(topicPartition.getTopic(), topicPartition.getPartition(), offset, MAX_MESSAGE_SIZE_BYTES) .build(); FetchResponse response = consumer.fetch(request); if (response.hasError()) { consumer.close(); int errorCode = response.errorCode(topicPartition.getTopic(), topicPartition.getPartition()); MessageAndOffset messageAndOffset = response.messageSet( topicPartition.getTopic(), topicPartition.getPartition()).iterator().next(); byte[] keyBytes = null;
public void consumeMessagesWithOldApi(String topic, int nbMessageToRead) throws UnsupportedEncodingException { SimpleConsumer simpleConsumer = new SimpleConsumer(kafkaHostname, kafkaPort, 30000, 2, "test"); System.out.println("Testing single fetch"); kafka.api.FetchRequest req = new FetchRequestBuilder() .clientId("test") .addFetch(topic, 0, 0L, 100) .build(); while (numRead != nbMessageToRead) { FetchResponse fetchResponse = simpleConsumer.fetch(req); printMessages(fetchResponse.messageSet(topic, 0)); numRead++; } }
public static void main(String[] args) throws Exception { final String topic = "topic1"; String clientID = "DemoLowLevelConsumer1"; SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID); FetchRequest req = new FetchRequestBuilder().clientId(clientID) .addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build(); FetchResponse fetchResponse = simpleConsumer.fetch(req); ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0); for (MessageAndOffset messageAndOffset : messageSet) { ByteBuffer payload = messageAndOffset.message().payload(); long offset = messageAndOffset.offset(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8")); } }
/** * Fetches messages from the given topic, partition and offset using the provided {@link SimpleConsumer}. * * @return A {@link ByteBufferMessageSet} of the given topic, partition for messages fetched * * @throws OffsetOutOfRangeException if the given offset is out of range. * @throws NotLeaderForPartitionException if the broker that the consumer is talking to is not the leader * for the given topic and partition. * @throws UnknownTopicOrPartitionException if the topic or partition is not known by the Kafka server * @throws UnknownServerException if the Kafka server responded with error. */ public static ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, String topic, int partition, int fetchSize, long requestOffset) throws KafkaException { FetchRequest req = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topic, partition, requestOffset, fetchSize) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (fetchResponse.hasError()) { throw Errors.forCode(fetchResponse.errorCode(topic, partition)).exception(); } return fetchResponse.messageSet(topic, partition); }
/** * Fetches messages from the given topic, partition and offset using the provided {@link SimpleConsumer}. * * @return A {@link ByteBufferMessageSet} of the given topic, partition for messages fetched * * @throws OffsetOutOfRangeException if the given offset is out of range. * @throws NotLeaderForPartitionException if the broker that the consumer is talking to is not the leader * for the given topic and partition. * @throws UnknownTopicOrPartitionException if the topic or partition is not known by the Kafka server * @throws UnknownServerException if the Kafka server responded with error. */ public static ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, String topic, int partition, int fetchSize, long requestOffset) throws KafkaException { FetchRequest req = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topic, partition, requestOffset, fetchSize) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (fetchResponse.hasError()) { throw Errors.forCode(fetchResponse.errorCode(topic, partition)).exception(); } return fetchResponse.messageSet(topic, partition); }
public static void main(String[] args) throws Exception { generateData(); SimpleConsumer simpleConsumer = new SimpleConsumer(kafka.examples.KafkaProperties.kafkaServerURL, kafka.examples.KafkaProperties.kafkaServerPort, kafka.examples.KafkaProperties.connectionTimeOut, FetchRequest req = new FetchRequestBuilder() .clientId(kafka.examples.KafkaProperties.clientId) .addFetch(kafka.examples.KafkaProperties.topic2, 0, 0L, 100) .build(); FetchResponse fetchResponse = simpleConsumer.fetch(req); printMessages((ByteBufferMessageSet) fetchResponse.messageSet(kafka.examples.KafkaProperties.topic2, 0)); }}); }}; req = new FetchRequestBuilder() .clientId(kafka.examples.KafkaProperties.clientId) .addFetch(kafka.examples.KafkaProperties.topic2, 0, 0L, 100) .addFetch(KafkaProperties.topic3, 0, 0L, 100) .build(); fetchResponse = simpleConsumer.fetch(req); int fetchReq = 0; for (Map.Entry<String, List<Integer>> entry : topicMap.entrySet()) { for (Integer offset : entry.getValue()) { System.out.println("Response from fetch request no: " + ++fetchReq); printMessages((ByteBufferMessageSet) fetchResponse.messageSet(topic, offset));