@Override public void close() { try { inputStream.close(); } catch (IOException e) { throw new KafkaException("Failed to close record stream", e); } } };
/** * Desanitize name that was URL-encoded using {@link #sanitize(String)}. This * is used to obtain the desanitized version of node names in ZooKeeper. */ public static String desanitize(String name) { try { return URLDecoder.decode(name, StandardCharsets.UTF_8.name()); } catch (UnsupportedEncodingException e) { throw new KafkaException(e); } }
@Override public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { try { // Set input buffer (uncompressed) to 16 KB (none by default) and output buffer (compressed) to // 8 KB (0.5 KB by default) to ensure reasonable performance in cases where the caller passes a small // number of bytes to write (potentially a single byte) return new BufferedOutputStream(new GZIPOutputStream(buffer, 8 * 1024), 16 * 1024); } catch (Exception e) { throw new KafkaException(e); } }
@Override public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { try { return (OutputStream) ZstdConstructors.OUTPUT.invoke(buffer); } catch (Throwable e) { throw new KafkaException(e); } }
@Override void handleFailure(Throwable throwable) { KafkaException exception = new KafkaException("Failed to find brokers to send ListGroups", throwable); all.complete(Collections.singletonList(exception)); } }, nowMetadata);
@Override public InputStream wrapForInput(ByteBuffer inputBuffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { try { return new KafkaLZ4BlockInputStream(inputBuffer, decompressionBufferSupplier, messageVersion == RecordBatch.MAGIC_VALUE_V0); } catch (Throwable e) { throw new KafkaException(e); } } },
/** * Go through incomplete batches and abort them. */ private void abortBatches() { abortBatches(new KafkaException("Producer is closed forcefully.")); }
private void unregister(KafkaMbean mbean) { MBeanServer server = ManagementFactory.getPlatformMBeanServer(); try { if (server.isRegistered(mbean.name())) server.unregisterMBean(mbean.name()); } catch (JMException e) { throw new KafkaException("Error unregistering mbean", e); } }
private RecordBatch loadBatchWithSize(int size, String description) { FileChannel channel = fileRecords.channel(); try { ByteBuffer buffer = ByteBuffer.allocate(size); Utils.readFullyOrFail(channel, buffer, position, description); buffer.rewind(); return toMemoryRecordBatch(buffer); } catch (IOException e) { throw new KafkaException("Failed to load record batch at position " + position + " from " + fileRecords, e); } }
private void reregister(KafkaMbean mbean) { unregister(mbean); try { ManagementFactory.getPlatformMBeanServer().registerMBean(mbean, mbean.name()); } catch (JMException e) { throw new KafkaException("Error registering mbean " + mbean.name(), e); } }
@Override public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { try { return (InputStream) SnappyConstructors.INPUT.invoke(new ByteBufferInputStream(buffer)); } catch (Throwable e) { throw new KafkaException(e); } } },
@Override protected T makeNext() { try { T batch = logInputStream.nextBatch(); if (batch == null) return allDone(); return batch; } catch (IOException e) { throw new KafkaException(e); } } }
@Override protected Record readNext(long baseOffset, long firstTimestamp, int baseSequence, Long logAppendTime) { try { return DefaultRecord.readFrom(inputStream, baseOffset, firstTimestamp, baseSequence, logAppendTime); } catch (EOFException e) { throw new InvalidRecordException("Incorrect declared batch size, premature EOF reached"); } catch (IOException e) { throw new KafkaException("Failed to decompress record stream", e); } }
public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } }
@Override public KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize, MemoryPool memoryPool) throws KafkaException { try { PlaintextTransportLayer transportLayer = new PlaintextTransportLayer(key); Supplier<Authenticator> authenticatorCreator = () -> new PlaintextAuthenticator(configs, transportLayer, listenerName); return new KafkaChannel(id, transportLayer, authenticatorCreator, maxReceiveSize, memoryPool != null ? memoryPool : MemoryPool.NONE); } catch (Exception e) { log.warn("Failed to create channel due to ", e); throw new KafkaException(e); } }
@Override public ProducerRecord<Integer, String> onSend(ProducerRecord<Integer, String> record) { onSendCount++; if (throwExceptionOnSend) throw new KafkaException("Injected exception in AppendProducerInterceptor.onSend"); return new ProducerRecord<>( record.topic(), record.partition(), record.key(), record.value().concat(appendStr)); }
@Test(expected = KafkaException.class) public void testMaybeAddPartitionToTransactionAfterFatalError() { long pid = 13131L; short epoch = 1; doInitTransactions(pid, epoch); transactionManager.transitionToFatalError(new KafkaException()); transactionManager.maybeAddPartitionToTransaction(new TopicPartition("foo", 0)); }
@Test(expected = KafkaException.class) public void testMaybeAddPartitionToTransactionAfterAbortableError() { long pid = 13131L; short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.transitionToAbortableError(new KafkaException()); transactionManager.maybeAddPartitionToTransaction(new TopicPartition("foo", 0)); }
@Test(expected = KafkaException.class) public void testFailIfNotReadyForSendIdempotentProducerFatalError() { TransactionManager idempotentTransactionManager = new TransactionManager(); idempotentTransactionManager.transitionToFatalError(new KafkaException()); idempotentTransactionManager.failIfNotReadyForSend(); }
@Test public void testIsSendToPartitionAllowedWithPendingPartitionAfterAbortableError() { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(tp0); transactionManager.transitionToAbortableError(new KafkaException()); assertFalse(transactionManager.isSendToPartitionAllowed(tp0)); assertTrue(transactionManager.hasAbortableError()); }