@Override public void close() { try { inputStream.close(); } catch (IOException e) { throw new KafkaException("Failed to close record stream", e); } } };
private void handleKafkaException(KafkaException kafkaException) { if (kafkaException instanceof TimeoutException) { //This might happen if the producer cannot send data to the Kafka cluster and thus, its internal buffer fills up. LOG.error(TIMEOUT_CONFIG_HINT, kafkaException.getMessage()); } if (KafkaUtils.exceptionIsFatal(kafkaException)) { LOG.error(String.format(ABORT_MSG, writerId, kafkaException.getMessage(), topic, -1L)); sendExceptionRef.compareAndSet(null, kafkaException); } else { LOG.error(ACTION_ABORT, writerId, topic, writeSemantic, kafkaException.getMessage()); sendExceptionRef.compareAndSet(null, kafkaException); } }
private Integer getNumPartitions(String topic) { try { return this.kafka.partitionsFor(topic).size(); //returns 1 for new topics } catch (KafkaException e) { LOGGER.error("Topic '" + topic + "' name does not exist. Exception: " + e.getLocalizedMessage()); throw e; } }
final KafkaException fatalException = new KafkaException("TaskManager is not specified"); log.error(fatalException.getMessage(), fatalException); throw fatalException; final KafkaException fatalException = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), TaskManager.class.getName())); log.error(fatalException.getMessage(), fatalException); throw fatalException; final KafkaException fatalException = new KafkaException("assignmentErrorCode is not specified"); log.error(fatalException.getMessage(), fatalException); throw fatalException; final KafkaException fatalException = new KafkaException(String.format("%s is not an instance of %s", ai.getClass().getName(), AtomicInteger.class.getName())); log.error(fatalException.getMessage(), fatalException); throw fatalException;
private void assertFatalError(Class<? extends RuntimeException> cause) { assertTrue(transactionManager.hasError()); try { transactionManager.beginAbort(); fail("Should have raised " + cause.getSimpleName()); } catch (KafkaException e) { assertTrue(cause.isAssignableFrom(e.getCause().getClass())); assertTrue(transactionManager.hasError()); } // Transaction abort cannot clear fatal error state try { transactionManager.beginAbort(); fail("Should have raised " + cause.getSimpleName()); } catch (KafkaException e) { assertTrue(cause.isAssignableFrom(e.getCause().getClass())); assertTrue(transactionManager.hasError()); } }
private void assertAbortableError(Class<? extends RuntimeException> cause) { try { transactionManager.beginCommit(); fail("Should have raised " + cause.getSimpleName()); } catch (KafkaException e) { assertTrue(cause.isAssignableFrom(e.getCause().getClass())); assertTrue(transactionManager.hasError()); } assertTrue(transactionManager.hasError()); transactionManager.beginAbort(); assertFalse(transactionManager.hasError()); }
@Override protected boolean ensureNoneRemaining() { try { return inputStream.read() == -1; } catch (IOException e) { throw new KafkaException("Error checking for remaining bytes after reading batch", e); } }
@Override public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { if (!configured()) throw new IllegalStateException("Callback handler not configured"); for (Callback callback : callbacks) { if (callback instanceof OAuthBearerTokenCallback) try { handleTokenCallback((OAuthBearerTokenCallback) callback); } catch (KafkaException e) { throw new IOException(e.getMessage(), e); } else if (callback instanceof SaslExtensionsCallback) try { handleExtensionsCallback((SaslExtensionsCallback) callback); } catch (KafkaException e) { throw new IOException(e.getMessage(), e); } else throw new UnsupportedCallbackException(callback); } }
@Test public void shouldThrowOnSendIfProducerGotFenced() { buildMockProducer(true); producer.initTransactions(); producer.fenceProducer(); try { producer.send(null); fail("Should have thrown as producer is fenced off"); } catch (KafkaException e) { assertTrue("The root cause of the exception should be ProducerFenced", e.getCause() instanceof ProducerFencedException); } }
/** * Desanitize name that was URL-encoded using {@link #sanitize(String)}. This * is used to obtain the desanitized version of node names in ZooKeeper. */ public static String desanitize(String name) { try { return URLDecoder.decode(name, StandardCharsets.UTF_8.name()); } catch (UnsupportedEncodingException e) { throw new KafkaException(e); } }
@Override public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { for (Callback callback : callbacks) { if (callback instanceof OAuthBearerTokenCallback) try { handleCallback((OAuthBearerTokenCallback) callback); } catch (KafkaException e) { throw new IOException(e.getMessage(), e); } else if (callback instanceof SaslExtensionsCallback) { try { handleExtensionsCallback((SaslExtensionsCallback) callback); } catch (KafkaException e) { throw new IOException(e.getMessage(), e); } } else throw new UnsupportedCallbackException(callback); } }
fail("Expected an InvalidConfigurationException"); } catch (KafkaException e) { assertEquals(InvalidConfigurationException.class, e.getCause().getClass());
/** * Instantiate the class */ public static <T> T newInstance(Class<T> c) { if (c == null) throw new KafkaException("class cannot be null"); try { return c.getDeclaredConstructor().newInstance(); } catch (NoSuchMethodException e) { throw new KafkaException("Could not find a public no-argument constructor for " + c.getName(), e); } catch (ReflectiveOperationException | RuntimeException e) { throw new KafkaException("Could not instantiate class " + c.getName(), e); } }
@Test public void testConstructorClose() { Properties props = new Properties(); props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testConstructorClose"); props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "invalid-23-8409-adsfsdj"); props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); final int oldInitCount = MockMetricsReporter.INIT_COUNT.get(); final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get(); try { new KafkaConsumer<>(props, new ByteArrayDeserializer(), new ByteArrayDeserializer()); Assert.fail("should have caught an exception and returned"); } catch (KafkaException e) { assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get()); assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get()); assertEquals("Failed to construct kafka consumer", e.getMessage()); } }
Runnable guardFailures(final Runnable delegate) { return () -> { try { delegate.run(); } catch (InterruptException e) { // Interrupts are normal on shutdown, intentionally swallow } catch (KafkaException e) { if (e.getCause() instanceof ConfigException) e = (KafkaException) e.getCause(); LOG.error("Kafka worker exited with exception", e); failure.set(CheckResult.failed(e)); } catch (RuntimeException e) { LOG.error("Kafka worker exited with exception", e); failure.set(CheckResult.failed(e)); } catch (Error e) { LOG.error("Kafka worker exited with error", e); failure.set(CheckResult.failed(new RuntimeException(e))); } }; } }
/** * Release resources required for record appends (e.g. compression buffers). Once this method is called, it's only * possible to update the RecordBatch header. */ public void closeForRecordAppends() { if (appendStream != CLOSED_STREAM) { try { appendStream.close(); } catch (IOException e) { throw new KafkaException(e); } finally { appendStream = CLOSED_STREAM; } } }
if (ke.getMessage().toLowerCase().contains("record is corrupt")) { for (TopicPartition tp : _consumer.assignment()) { long position = _consumer.position(tp);
throw (ProducerFencedException) kafkaException.getCause(); } else { throw new StreamsException(
private SecurityStore createKeystore(String type, String path, Password password, Password keyPassword) { if (path == null && password != null) { throw new KafkaException("SSL key store is not specified, but key store password is specified."); } else if (path != null && password == null) { throw new KafkaException("SSL key store is specified, but key store password is not specified."); } else if (path != null && password != null) { return new SecurityStore(type, path, password, keyPassword); } else return null; // path == null, clients may use this path with brokers that don't require client auth }
@Test public void testConstructorFailureCloseResource() { Properties props = new Properties(); props.setProperty(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose"); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "some.invalid.hostname.foo.bar.local:9999"); props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); final int oldInitCount = MockMetricsReporter.INIT_COUNT.get(); final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get(); try (KafkaProducer<byte[], byte[]> ignored = new KafkaProducer<>(props, new ByteArraySerializer(), new ByteArraySerializer())) { fail("should have caught an exception and returned"); } catch (KafkaException e) { assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get()); assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get()); assertEquals("Failed to construct kafka producer", e.getMessage()); } }