private void handleKafkaException(KafkaException kafkaException) { if (kafkaException instanceof TimeoutException) { //This might happen if the producer cannot send data to the Kafka cluster and thus, its internal buffer fills up. LOG.error(TIMEOUT_CONFIG_HINT, kafkaException.getMessage()); } if (KafkaUtils.exceptionIsFatal(kafkaException)) { LOG.error(String.format(ABORT_MSG, writerId, kafkaException.getMessage(), topic, -1L)); sendExceptionRef.compareAndSet(null, kafkaException); } else { LOG.error(ACTION_ABORT, writerId, topic, writeSemantic, kafkaException.getMessage()); sendExceptionRef.compareAndSet(null, kafkaException); } }
@Override public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { if (!configured()) throw new IllegalStateException("Callback handler not configured"); for (Callback callback : callbacks) { if (callback instanceof OAuthBearerTokenCallback) try { handleTokenCallback((OAuthBearerTokenCallback) callback); } catch (KafkaException e) { throw new IOException(e.getMessage(), e); } else if (callback instanceof SaslExtensionsCallback) try { handleExtensionsCallback((SaslExtensionsCallback) callback); } catch (KafkaException e) { throw new IOException(e.getMessage(), e); } else throw new UnsupportedCallbackException(callback); } }
@Override public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { for (Callback callback : callbacks) { if (callback instanceof OAuthBearerTokenCallback) try { handleCallback((OAuthBearerTokenCallback) callback); } catch (KafkaException e) { throw new IOException(e.getMessage(), e); } else if (callback instanceof SaslExtensionsCallback) { try { handleExtensionsCallback((SaslExtensionsCallback) callback); } catch (KafkaException e) { throw new IOException(e.getMessage(), e); } } else throw new UnsupportedCallbackException(callback); } }
if (ke.getMessage().toLowerCase().contains("record is corrupt")) { for (TopicPartition tp : _consumer.assignment()) { long position = _consumer.position(tp);
@Test public void testConstructorClose() { Properties props = new Properties(); props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testConstructorClose"); props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "invalid-23-8409-adsfsdj"); props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); final int oldInitCount = MockMetricsReporter.INIT_COUNT.get(); final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get(); try { new KafkaConsumer<>(props, new ByteArrayDeserializer(), new ByteArrayDeserializer()); Assert.fail("should have caught an exception and returned"); } catch (KafkaException e) { assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get()); assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get()); assertEquals("Failed to construct kafka consumer", e.getMessage()); } }
@Test public void testConstructorFailureCloseResource() { Properties props = new Properties(); props.setProperty(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose"); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "some.invalid.hostname.foo.bar.local:9999"); props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); final int oldInitCount = MockMetricsReporter.INIT_COUNT.get(); final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get(); try (KafkaProducer<byte[], byte[]> ignored = new KafkaProducer<>(props, new ByteArraySerializer(), new ByteArraySerializer())) { fail("should have caught an exception and returned"); } catch (KafkaException e) { assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get()); assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get()); assertEquals("Failed to construct kafka producer", e.getMessage()); } }
@Override public Set<String> getTableNames() { if (tableNames == null) { try (KafkaConsumer<?, ?> kafkaConsumer = new KafkaConsumer<>(plugin.getConfig().getKafkaConsumerProps())) { tableNames = kafkaConsumer.listTopics().keySet(); } catch(KafkaException e) { throw UserException.dataReadError(e).message("Failed to get tables information").addContext(e.getMessage()) .build(logger); } } return tableNames; } }
/** * Test the case where the client makes a post KIP-74 FetchRequest, but the server replies with only a * partial request. For v3 and later FetchRequests, the implementation of KIP-74 changed the behavior * so that at least one message is always returned. Therefore, this case should not happen, and it indicates * that an internal error has taken place. */ @Test public void testFetchRequestInternalError() { makeFetchRequestWithIncompleteRecord(); try { fetcher.fetchedRecords(); fail("RecordTooLargeException should have been raised"); } catch (KafkaException e) { assertTrue(e.getMessage().startsWith("Failed to make progress reading messages")); // the position should not advance since no data has been returned assertEquals(0, subscriptions.position(tp0).longValue()); } }
private void handleKafkaException(KafkaException kafkaException) { if (kafkaException instanceof TimeoutException) { //This might happen if the producer cannot send data to the Kafka cluster and thus, its internal buffer fills up. LOG.error(TIMEOUT_CONFIG_HINT, kafkaException.getMessage()); } if (KafkaUtils.exceptionIsFatal(kafkaException)) { LOG.error(String.format(ABORT_MSG, writerId, kafkaException.getMessage(), topic, -1L)); sendExceptionRef.compareAndSet(null, kafkaException); } else { LOG.error(ACTION_ABORT, writerId, topic, writeSemantic, kafkaException.getMessage()); sendExceptionRef.compareAndSet(null, kafkaException); } }
@Override public Boolean call() throws Exception { try { ConsumerRecords<String, String> records; synchronized (kConsumer) { records = kConsumer.poll(500); } for (ConsumerRecord<String, String> record : records) { fPendingMsgs.offer(record); } } catch (KafkaException x) { log.debug(fLogTag + ": KafkaException " + x.getMessage()); } catch (java.lang.IllegalStateException | java.lang.IllegalArgumentException x) { log.error(fLogTag + ": Illegal state/arg exception in Kafka consumer; dropping stream. " + x.getMessage()); } return true; } };
if (o == null) { final KafkaException fatalException = new KafkaException("TaskManager is not specified"); log.error(fatalException.getMessage(), fatalException); throw fatalException; log.error(fatalException.getMessage(), fatalException); throw fatalException; if (ai == null) { final KafkaException fatalException = new KafkaException("assignmentErrorCode is not specified"); log.error(fatalException.getMessage(), fatalException); throw fatalException; final KafkaException fatalException = new KafkaException(String.format("%s is not an instance of %s", ai.getClass().getName(), AtomicInteger.class.getName())); log.error(fatalException.getMessage(), fatalException); throw fatalException;