@Override public void close() throws IOException { int numOfConsumersNotClosed = 0; for (SimpleConsumer consumer : this.activeConsumers.values()) { if (consumer != null) { try { consumer.close(); } catch (Exception e) { log.warn(String.format("Failed to close Kafka Consumer %s:%d", consumer.host(), consumer.port())); numOfConsumersNotClosed++; } } } this.activeConsumers.clear(); if (numOfConsumersNotClosed > 0) { throw new IOException(numOfConsumersNotClosed + " consumer(s) failed to close."); } }
@Override public void close() throws IOException { int numOfConsumersNotClosed = 0; for (SimpleConsumer consumer : this.activeConsumers.values()) { if (consumer != null) { try { consumer.close(); } catch (Exception e) { LOG.warn(String.format("Failed to close Kafka Consumer %s:%d", consumer.host(), consumer.port())); numOfConsumersNotClosed++; } } } this.activeConsumers.clear(); if (numOfConsumersNotClosed > 0) { throw new IOException(numOfConsumersNotClosed + " consumer(s) failed to close."); } } }
if (simpleConsumer != null) { String host = simpleConsumer.host(); int port = simpleConsumer.port(); simpleConsumer = null; throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
private String generateLogWarnForSkippedTopics(Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo, SimpleConsumer consumer) { StringBuilder sb = new StringBuilder(); sb.append("The following topics will be skipped due to failure in fetching latest offsets from leader " + consumer.host() + ":" + consumer.port()); for (TopicAndPartition topicAndPartition : offsetInfo.keySet()) { sb.append(" " + topicAndPartition.topic()); } return sb.toString(); }
protected OffsetResponse getLatestOffsetResponse(SimpleConsumer consumer, Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo, JobContext context) { for (int i = 0; i < NUM_TRIES_FETCH_FROM_LEADER; i++) { try { OffsetResponse offsetResponse = consumer.getOffsetsBefore(new OffsetRequest(offsetInfo, kafka.api.OffsetRequest.CurrentVersion(), CamusJob .getKafkaClientName(context))); if (offsetResponse.hasError()) { throw new RuntimeException("offsetReponse has error."); } return offsetResponse; } catch (Exception e) { log.warn("Fetching offset from leader " + consumer.host() + ":" + consumer.port() + " has failed " + (i + 1) + " time(s). Reason: " + e.getMessage() + " " + (NUM_TRIES_FETCH_FROM_LEADER - i - 1) + " retries left."); if (i < NUM_TRIES_FETCH_FROM_LEADER - 1) { try { Thread.sleep((long) (Math.random() * (i + 1) * 1000)); } catch (InterruptedException e1) { log.error("Caught interrupted exception between retries of getting latest offsets. " + e1.getMessage()); } } } } return null; }
private SimpleConsumer mockConsumerThrowsExceptionForOffsetRangeCall(TopicMetadataResponse metadataResponse) { SimpleConsumer simpleConsumer = EasyMock.createMock(SimpleConsumer.class); mocks.add(simpleConsumer); EasyMock.expect(simpleConsumer.send((TopicMetadataRequest)EasyMock.anyObject())) .andReturn(metadataResponse).times(1); EasyMock.expect(simpleConsumer.getOffsetsBefore((OffsetRequest) EasyMock.anyObject())) .andThrow(new RuntimeException()).times(3); EasyMock.expect(simpleConsumer.clientId()).andReturn(KAFKA_CLIENT_ID).times(1); simpleConsumer.close(); EasyMock.expectLastCall().andVoid().times(2); EasyMock.expect(simpleConsumer.host()).andReturn("dummyHost").times(4); EasyMock.expect(simpleConsumer.port()).andReturn(8888).times(4); return simpleConsumer; }
private SimpleConsumer mockSimpleConsumer(TopicMetadataResponse metadataResponse, OffsetResponse offsetResponse, FetchResponse fetchResponse) { SimpleConsumer simpleConsumer = EasyMock.createMock(SimpleConsumer.class); mocks.add(simpleConsumer); EasyMock.expect(simpleConsumer.send((TopicMetadataRequest)EasyMock.anyObject())).andReturn(metadataResponse).times(1); EasyMock.expect(simpleConsumer.getOffsetsBefore((OffsetRequest)EasyMock.anyObject())).andReturn(offsetResponse).anyTimes(); simpleConsumer.close(); EasyMock.expectLastCall().andVoid().anyTimes(); EasyMock.expect(simpleConsumer.clientId()).andReturn(KAFKA_CLIENT_ID).times(1); EasyMock.expect(simpleConsumer.fetch((FetchRequest)EasyMock.anyObject())).andReturn(fetchResponse).times(1); EasyMock.expect(simpleConsumer.host()).andReturn("dummyHost").anyTimes(); EasyMock.expect(simpleConsumer.port()).andReturn(8888).anyTimes(); return simpleConsumer; }
private String generateLogWarnForSkippedTopics(Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo, SimpleConsumer consumer) { StringBuilder sb = new StringBuilder(); sb.append("The following topics will be skipped due to failure in fetching latest offsets from leader " + consumer.host() + ":" + consumer.port()); for (TopicAndPartition topicAndPartition : offsetInfo.keySet()) { sb.append(" " + topicAndPartition.topic()); } return sb.toString(); }
@Override public void close() throws IOException { int numOfConsumersNotClosed = 0; for (SimpleConsumer consumer : this.activeConsumers.values()) { if (consumer != null) { try { consumer.close(); } catch (Exception e) { LOG.warn(String.format("Failed to close Kafka Consumer %s:%d", consumer.host(), consumer.port())); numOfConsumersNotClosed++; } } } this.activeConsumers.clear(); if (numOfConsumersNotClosed > 0) { throw new IOException(numOfConsumersNotClosed + " consumer(s) failed to close."); } } }
@Override public void close() throws IOException { int numOfConsumersNotClosed = 0; for (SimpleConsumer consumer : this.activeConsumers.values()) { if (consumer != null) { try { consumer.close(); } catch (Exception e) { log.warn(String.format("Failed to close Kafka Consumer %s:%d", consumer.host(), consumer.port())); numOfConsumersNotClosed++; } } } this.activeConsumers.clear(); if (numOfConsumersNotClosed > 0) { throw new IOException(numOfConsumersNotClosed + " consumer(s) failed to close."); } }
@Override public void close() throws IOException { int numOfConsumersNotClosed = 0; for (SimpleConsumer consumer : this.activeConsumers.values()) { if (consumer != null) { try { consumer.close(); } catch (Exception e) { log.warn(String.format("Failed to close Kafka Consumer %s:%d", consumer.host(), consumer.port())); numOfConsumersNotClosed++; } } } this.activeConsumers.clear(); if (numOfConsumersNotClosed > 0) { throw new IOException(numOfConsumersNotClosed + " consumer(s) failed to close."); } }
@Override public void close() throws IOException { int numOfConsumersNotClosed = 0; for (SimpleConsumer consumer : this.activeConsumers.values()) { if (consumer != null) { try { consumer.close(); } catch (Exception e) { LOG.warn(String.format("Failed to close Kafka Consumer %s:%d", consumer.host(), consumer.port())); numOfConsumersNotClosed++; } } } this.activeConsumers.clear(); if (numOfConsumersNotClosed > 0) { throw new IOException(numOfConsumersNotClosed + " consumer(s) failed to close."); } } }
protected OffsetResponse getLatestOffsetResponse(SimpleConsumer consumer, Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo, JobContext context) { for (int i = 0; i < NUM_TRIES_FETCH_FROM_LEADER; i++) { try { OffsetResponse offsetResponse = consumer.getOffsetsBefore(new OffsetRequest(offsetInfo, kafka.api.OffsetRequest.CurrentVersion(), CamusJob .getKafkaClientName(context))); if (offsetResponse.hasError()) { throw new RuntimeException("offsetReponse has error."); } return offsetResponse; } catch (Exception e) { log.warn("Fetching offset from leader " + consumer.host() + ":" + consumer.port() + " has failed " + (i + 1) + " time(s). Reason: " + e.getMessage() + " " + (NUM_TRIES_FETCH_FROM_LEADER - i - 1) + " retries left."); if (i < NUM_TRIES_FETCH_FROM_LEADER - 1) { try { Thread.sleep((long) (Math.random() * (i + 1) * 1000)); } catch (InterruptedException e1) { log.error("Caught interrupted exception between retries of getting latest offsets. " + e1.getMessage()); } } } } return null; }
private ByteBufferMessageSet fetchMessageSet(long fetchOffset) throws OffsetOutOfRangeException { Preconditions.checkArgument(fetchOffset >= 0, String.format("Illegal fetch offset %d", fetchOffset)); int failureCount = 0; while (true) { SimpleConsumer consumer = getConsumer(); FetchRequest req = new FetchRequestBuilder() .clientId(clientName) .addFetch(topic, partition, fetchOffset, BUFFER_SIZE_BYTES) .maxWait(fetchTimeoutMs) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (!fetchResponse.hasError()) { return fetchResponse.messageSet(topic, partition); } short errorCode = fetchResponse.errorCode(topic, partition); if (++failureCount >= MAX_KAFKA_FETCH_RETRIES) { throw new RuntimeException( String.format("Error fetching data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, errorCode)); } LOG.warn("Error fetching data from broker {}:{} for topic {}, partition {}. Error code: {}", consumer.host(), consumer.port(), topic, partition, errorCode); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { throw new OffsetOutOfRangeException(String.format( "Requested offset %d is out of range for topic %s partition %d", fetchOffset, topic, partition)); } closeConsumer(); } }
private ByteBufferMessageSet fetchMessageSet(long fetchOffset) throws OffsetOutOfRangeException { Preconditions.checkArgument(fetchOffset >= 0, String.format("Illegal fetch offset %d", fetchOffset)); int failureCount = 0; while (true) { SimpleConsumer consumer = getConsumer(); FetchRequest req = new FetchRequestBuilder() .clientId(clientName) .addFetch(topic, partition, fetchOffset, BUFFER_SIZE_BYTES) .maxWait(fetchTimeoutMs) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (!fetchResponse.hasError()) { return fetchResponse.messageSet(topic, partition); } short errorCode = fetchResponse.errorCode(topic, partition); if (++failureCount >= MAX_KAFKA_FETCH_RETRIES) { throw new RuntimeException( String.format("Error fetching data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, errorCode)); } LOG.warn("Error fetching data from broker {}:{} for topic {}, partition {}. Error code: {}", consumer.host(), consumer.port(), topic, partition, errorCode); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { throw new OffsetOutOfRangeException(String.format( "Requested offset %d is out of range for topic %s partition %d", fetchOffset, topic, partition)); } closeConsumer(); } }
throw new RuntimeException(String.format( "Error fetching offset data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, response.errorCode(topic, partition))); throw new RuntimeException(String.format( "Got zero offsets in offset response for time %s from broker %s:%d for topic %s, partition %d", timeMillis, consumer.host(), consumer.port(), topic, partition));
throw new RuntimeException(String.format( "Error fetching offset data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, response.errorCode(topic, partition))); throw new RuntimeException(String.format( "Got zero offsets in offset response for time %s from broker %s:%d for topic %s, partition %d", timeMillis, consumer.host(), consumer.port(), topic, partition));
int port = simpleConsumer.port(); simpleConsumer = null; throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
" from broker " + consumer.host() + ":" + consumer.port());
" from broker " + consumer.host() + ":" + consumer.port());