public ReplicatorStats getStats() { stats.replicationBacklog = cursor.getNumberOfEntriesInBacklog(); stats.connected = producer != null && producer.isConnected(); stats.replicationDelayInSeconds = getReplicationDelayInSeconds(); ProducerImpl producer = this.producer; if (producer != null) { stats.outboundConnection = producer.getConnectionId(); stats.outboundConnectedSince = producer.getConnectedSince(); } else { stats.outboundConnection = null; stats.outboundConnectedSince = null; } return stats; }
CompletableFuture<MessageId> future = new CompletableFuture<>(); sendAsync(message, new SendCallback() { SendCallback nextCallback = null; long createdAt = System.nanoTime();
private void doBatchSendAndAdd(MessageImpl msg, SendCallback callback, ByteBuf payload) { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Closing out batch to accomodate large message with size {}", topic, producerName, msg.getDataBuffer().readableBytes()); } batchMessageAndSend(); batchMessageContainer.add(msg, callback); payload.release(); }
private ByteBuf sendMessage(long producerId, long sequenceId, int numMessages, MessageMetadata msgMetadata, ByteBuf compressedPayload) throws IOException { ChecksumType checksumType; if (getClientCnx() == null || getClientCnx().getRemoteEndpointProtocolVersion() >= brokerChecksumSupportedVersion()) { checksumType = ChecksumType.Crc32c; } else { checksumType = ChecksumType.None; } return Commands.newSend(producerId, sequenceId, numMessages, checksumType, msgMetadata, compressedPayload); }
setClientCnx(cnx); cnx.registerProducer(producerId, this); if (getState() == State.Closing || getState() == State.Closed) { resetBackoff(); if (!producerCreatedFuture.isDone() && isBatchMessagingEnabled()) { resendMessages(cnx); if (getState() == State.Closing || getState() == State.Closed) { failPendingMessages(cnx(), bqe); setState(State.Terminated); failPendingMessages(cnx(), (PulsarClientException) cause); producerCreatedFuture.completeExceptionally(cause); client.cleanupProducer(this); } else if (producerCreatedFuture.isDone() || // (cause instanceof PulsarClientException && isRetriableError((PulsarClientException) cause) && System.currentTimeMillis() < createProducerTimeout)) { reconnectLater(cause); } else { setState(State.Failed); producerCreatedFuture.completeExceptionally(cause); client.cleanupProducer(this);
if (!isValidProducerState(callback)) { return; if (!canEnqueueRequest(callback)) { return; if (!isBatchMessagingEnabled()) { compressedPayload = compressor.encode(payload); payload.release(); msgMetadata.setSequenceId(sequenceId); if (conf.getCompressionType() != CompressionType.NONE) { msgMetadata.setCompression(convertCompressionType(conf.getCompressionType())); msgMetadata.setUncompressedSize(uncompressedSize); if (isBatchMessagingEnabled()) { if (batchMessageContainer.numMessagesInBatch == maxNumMessagesInBatch || batchMessageContainer.currentBatchSizeBytes >= BatchMessageContainer.MAX_MESSAGE_BATCH_SIZE_BYTES) { batchMessageAndSend(); doBatchSendAndAdd(msg, callback, payload); ByteBuf cmd = sendMessage(producerId, sequenceId, 1, msgMetadata.build(), compressedPayload); msgMetadata.recycle(); ClientCnx cnx = cnx(); if (isConnected()) {
private void resendMessages(ClientCnx cnx) { cnx.ctx().channel().eventLoop().execute(() -> { synchronized (this) { if (getState() == State.Closing || getState() == State.Closed) { log.debug("[{}] [{}] No pending messages to resend {}", topic, producerName, messagesToResend); if (changeToReadyState()) { producerCreatedFuture.complete(ProducerImpl.this); return; final boolean stripChecksum = cnx.getRemoteEndpointProtocolVersion() < brokerChecksumSupportedVersion(); for (OpSendMsg op : pendingMessages) { stripChecksum(op); if (!changeToReadyState()) {
@Override public CompletableFuture<Void> closeAsync() { final State currentState = getAndUpdateState(state -> { if (state == State.Closed) { return state; if (getClientCnx() == null || currentState != State.Ready) { log.info("[{}] [{}] Closed Producer (not connected)", topic, producerName); synchronized (this) { setState(State.Closed); client.cleanupProducer(this); PulsarClientException ex = new PulsarClientException.AlreadyClosedException( ClientCnx cnx = cnx(); cnx.sendRequestWithId(cmd, requestId).handle((v, exception) -> { cnx.removeProducer(producerId); setState(State.Closed); pendingMessages.forEach(msg -> { msg.cmd.release();
ByteBuf compressedPayload = batchMessageContainer.getCompressedBatchMetadataAndPayload(); long sequenceId = batchMessageContainer.sequenceId; ByteBuf cmd = sendMessage(producerId, sequenceId, batchMessageContainer.numMessagesInBatch, batchMessageContainer.setBatchAndBuild(), compressedPayload); if (isConnected()) { cnx().ctx().channel().eventLoop().execute(WriteInEventLoopCallback.create(this, cnx(), op)); stats.updateNumMsgsSent(numMessagesInBatch, op.batchSizeByte); } else {
@Override public boolean isConnected() { for (ProducerImpl producer : producers) { // returns false if any of the partition is not connected if (!producer.isConnected()) { return false; } } return true; }
for (int partitionIndex = 0; partitionIndex < numPartitions; partitionIndex++) { String partitionName = DestinationName.get(topic).getPartition(partitionIndex).toString(); ProducerImpl producer = new ProducerImpl(client, partitionName, null, conf, new CompletableFuture<Producer>(), partitionIndex); producers.add(producer); producer.producerCreatedFuture().handle((prod, createException) -> { if (createException != null) { setState(State.Failed);
failPendingMessages(cnx(), te); stats.incrementSendFailed(pendingMessages.size());
public String getConnectionId() { return cnx() != null ? connectionId : null; }
private synchronized CompletableFuture<Void> closeProducerAsync() { if (producer == null) { STATE_UPDATER.set(this, State.Stopped); return CompletableFuture.completedFuture(null); } CompletableFuture<Void> future = producer.closeAsync(); future.thenRun(() -> { STATE_UPDATER.set(this, State.Stopped); this.producer = null; // deactivate cursor after successfully close the producer this.cursor.setInactive(); }).exceptionally(ex -> { long waitTimeMs = backOff.next(); log.warn( "[{}][{} -> {}] Exception: '{}' occured while trying to close the producer. retrying again in {} s", topicName, localCluster, remoteCluster, ex.getMessage(), waitTimeMs / 1000.0); // BackOff before retrying brokerService.executor().schedule(this::closeProducerAsync, waitTimeMs, TimeUnit.MILLISECONDS); return null; }); return future; }
@Override protected void handleSendReceipt(CommandSendReceipt sendReceipt) { checkArgument(state == State.Ready); long producerId = sendReceipt.getProducerId(); long sequenceId = sendReceipt.getSequenceId(); long ledgerId = -1; long entryId = -1; if (sendReceipt.hasMessageId()) { ledgerId = sendReceipt.getMessageId().getLedgerId(); entryId = sendReceipt.getMessageId().getEntryId(); } if (log.isDebugEnabled()) { log.debug("{} Got receipt for producer: {} -- msg: {} -- id: {}:{}", ctx.channel(), producerId, sequenceId, ledgerId, entryId); } producers.get(producerId).ackReceived(this, sequenceId, ledgerId, entryId); }
producerCreatedFuture); } else { producer = new ProducerImpl(PulsarClientImpl.this, topic, producerName, conf, producerCreatedFuture, -1);
public void updateCursorState() { if (producer != null && producer.isConnected()) { this.cursor.setActive(); } else { this.cursor.setInactive(); } }
public String getConnectedSince() { return cnx() != null ? connectedSince : null; }
@Override public CompletableFuture<MessageId> sendAsync(Message message) { switch (getState()) { case Ready: case Connecting: break; // Ok case Closing: case Closed: return FutureUtil.failedFuture(new PulsarClientException.AlreadyClosedException("Producer already closed")); case Failed: case Uninitialized: return FutureUtil.failedFuture(new PulsarClientException.NotConnectedException()); } int partition = routerPolicy.choosePartition(message); checkArgument(partition >= 0 && partition < numPartitions, "Illegal partition index chosen by the message routing policy"); return producers.get(partition).sendAsync(message); }
producer.sendAsync(msg, ProducerSendCallback.create(this, entry, msg)); atLeastOneMessageSentForReplication = true;