private MessageIdImpl getMessageIdImpl(Message msg) { MessageIdImpl messageId = (MessageIdImpl) msg.getMessageId(); if (messageId instanceof BatchMessageIdImpl) { // messageIds contain MessageIdImpl, not BatchMessageIdImpl messageId = new MessageIdImpl(messageId.getLedgerId(), messageId.getEntryId(), getPartitionIndex()); } return messageId; }
/** * De-serialize a message id from a byte array * * @param data * byte array containing the serialized message id * @return the de-serialized messageId object */ public static MessageId fromByteArray(byte[] data) throws IOException { return MessageIdImpl.fromByteArray(data); }
/** * Clear the internal receiver queue and returns the message id of what was the 1st message in the queue that was * not seen by the application */ private MessageIdImpl clearReceiverQueue() { List<Message> currentMessageQueue = new ArrayList<>(incomingMessages.size()); incomingMessages.drainTo(currentMessageQueue); if (!currentMessageQueue.isEmpty()) { MessageIdImpl nextMessageInQueue = (MessageIdImpl) currentMessageQueue.get(0).getMessageId(); MessageIdImpl previousMessage = new MessageIdImpl(nextMessageInQueue.getLedgerId(), nextMessageInQueue.getEntryId() - 1, nextMessageInQueue.getPartitionIndex()); return previousMessage; } else if (lastDequeuedMessage != null) { // If the queue was empty we need to restart from the message just after the last one that has been dequeued // in the past return lastDequeuedMessage; } else { // No message was received or dequeued by this consumer. Next message would still be the startMessageId return (MessageIdImpl) startMessageId; } }
public int removeMessagesTill(MessageIdImpl msgId) { readLock.lock(); try { int currentSetRemovedMsgCount = currentSet.removeIf(m -> ((m.getLedgerId() < msgId.getLedgerId() || (m.getLedgerId() == msgId.getLedgerId() && m.getEntryId() <= msgId.getEntryId())) && m.getPartitionIndex() == msgId.getPartitionIndex())); int oldSetRemovedMsgCount = oldOpenSet.removeIf(m -> ((m.getLedgerId() < msgId.getLedgerId() || (m.getLedgerId() == msgId.getLedgerId() && m.getEntryId() <= msgId.getEntryId())) && m.getPartitionIndex() == msgId.getPartitionIndex())); return currentSetRemovedMsgCount + oldSetRemovedMsgCount; } finally { readLock.unlock(); } }
private CompletableFuture<? extends Subscription> getNonDurableSubscription(String subscriptionName, MessageId startMessageId) { CompletableFuture<Subscription> subscriptionFuture = new CompletableFuture<>(); Subscription subscription = subscriptions.computeIfAbsent(subscriptionName, name -> { // Create a new non-durable cursor only for the first consumer that connects MessageIdImpl msgId = startMessageId != null ? (MessageIdImpl) startMessageId : (MessageIdImpl) MessageId.latest; Position startPosition = new PositionImpl(msgId.getLedgerId(), msgId.getEntryId()); ManagedCursor cursor = null; try { cursor = ledger.newNonDurableCursor(startPosition); } catch (ManagedLedgerException e) { subscriptionFuture.completeExceptionally(e); } return new PersistentSubscription(this, subscriptionName, cursor); }); if (!subscriptionFuture.isDone()) { subscriptionFuture.complete(subscription); } else { // failed to initialize managed-cursor: clean up created subscription subscriptions.remove(subscriptionName); } return subscriptionFuture; }
public MessageImpl(String msgId, Map<String, String> properties, ByteBuf payload) { long ledgerId = Long.parseLong(msgId.substring(0, msgId.indexOf(':'))); long entryId = Long.parseLong(msgId.substring(msgId.indexOf(':') + 1)); this.messageId = new MessageIdImpl(ledgerId, entryId, -1); this.cnx = null; this.payload = payload; this.properties = Collections.unmodifiableMap(properties); }
boolean markAckForBatchMessage(BatchMessageIdImpl batchMessageId, AckType ackType) { MessageIdImpl message = new MessageIdImpl(batchMessageId.getLedgerId(), batchMessageId.getEntryId(), batchMessageId.getPartitionIndex()); BitSet bitSet = batchMessageAckTracker.get(message); batchMessageAckTracker.keySet().removeIf(m -> (m.compareTo(message) <= 0));
@Override public int compareTo(MessageIdImpl other) { return ComparisonChain.start().compare(this.ledgerId, other.ledgerId).compare(this.entryId, other.entryId) .compare(this.getPartitionIndex(), other.getPartitionIndex()).result(); }
@Override public byte[] toByteArray() { // there is no message batch so we pass -1 return toByteArray(-1); } }
builder.setPartition(messageId.getPartitionIndex()); builder.setLedgerId(messageId.getLedgerId()); builder.setEntryId(messageId.getEntryId()); return builder.build(); }).collect(Collectors.toList());
private CompletableFuture<Void> sendAcknowledge(MessageId messageId, AckType ackType) { MessageIdImpl msgId = (MessageIdImpl) messageId; final ByteBuf cmd = Commands.newAck(consumerId, msgId.getLedgerId(), msgId.getEntryId(), ackType, null);
void setMessageId(long ledgerId, long entryId, int partitionIndex) { if (msg != null) { msg.setMessageId(new MessageIdImpl(ledgerId, entryId, partitionIndex)); } else { for (int batchIndex = 0; batchIndex < msgs.size(); batchIndex++) { msgs.get(batchIndex) .setMessageId(new BatchMessageIdImpl(ledgerId, entryId, partitionIndex, batchIndex)); } } }
@Override public void redeliverUnacknowledgedMessages(Set<MessageIdImpl> messageIds) { for (ConsumerImpl c : consumers) { Set<MessageIdImpl> consumerMessageIds = new HashSet<>(); messageIds.removeIf(messageId -> { if (messageId.getPartitionIndex() == c.getPartitionIndex()) { consumerMessageIds.add(messageId); return true; } return false; }); c.redeliverUnacknowledgedMessages(consumerMessageIds); } }
/** * Record the event that one message has been processed by the application. * * Periodically, it sends a Flow command to notify the broker that it can push more messages */ protected synchronized void messageProcessed(Message msg) { ClientCnx currentCnx = cnx(); ClientCnx msgCnx = ((MessageImpl) msg).getCnx(); lastDequeuedMessage = (MessageIdImpl) msg.getMessageId(); if (msgCnx != currentCnx) { // The processed message did belong to the old queue that was cleared after reconnection. return; } increaseAvailablePermits(currentCnx); stats.updateNumMsgsReceived(msg); if (conf.getAckTimeoutMillis() != 0) { // reset timer for messages that are received by the client MessageIdImpl id = (MessageIdImpl) msg.getMessageId(); if (id instanceof BatchMessageIdImpl) { id = new MessageIdImpl(id.getLedgerId(), id.getEntryId(), getPartitionIndex()); } unAckedMessageTracker.add(id); } }
builder.setLedgerId(startMessageId.getLedgerId()); builder.setEntryId(startMessageId.getEntryId()); startMessageIdData = builder.build(); builder.recycle();
MessageImpl(MessageIdData messageId, MessageMetadata msgMetadata, ByteBuf payload, int partitionIndex, ClientCnx cnx) { this.msgMetadataBuilder = MessageMetadata.newBuilder(msgMetadata); this.messageId = new MessageIdImpl(messageId.getLedgerId(), messageId.getEntryId(), partitionIndex); this.cnx = cnx; // Need to make a copy since the passed payload is using a ref-count buffer that we don't know when could // release, since the Message is passed to the user. Also, the passed ByteBuf is coming from network and is // backed by a direct buffer which we could not expose as a byte[] this.payload = Unpooled.copiedBuffer(payload); if (msgMetadata.getPropertiesCount() > 0) { Map<String, String> properties = Maps.newTreeMap(); for (KeyValue entry : msgMetadata.getPropertiesList()) { properties.put(entry.getKey(), entry.getValue()); } this.properties = Collections.unmodifiableMap(properties); } else { properties = Collections.emptyMap(); } }
@Override protected CompletableFuture<Void> doAcknowledge(MessageId messageId, AckType ackType) { checkArgument(messageId instanceof MessageIdImpl); if (getState() != State.Ready) { return FutureUtil.failedFuture(new PulsarClientException("Consumer already closed")); } if (ackType == AckType.Cumulative) { return FutureUtil.failedFuture(new PulsarClientException.NotSupportedException( "Cumulative acknowledge not supported for partitioned topics")); } else { ConsumerImpl consumer = consumers.get(((MessageIdImpl) messageId).getPartitionIndex()); return consumer.doAcknowledge(messageId, ackType); } }
MessageIdImpl batchMessage = new MessageIdImpl(messageId.getLedgerId(), messageId.getEntryId(), getPartitionIndex()); bitSet.set(0, batchSize);
final boolean isDurable = subscribe.getDurable(); final MessageIdImpl startMessageId = subscribe.hasStartMessageId() ? new MessageIdImpl(subscribe.getStartMessageId().getLedgerId(), subscribe.getStartMessageId().getEntryId(), subscribe.getStartMessageId().getPartition()) : null;
public static MessageId fromByteArray(byte[] data) throws IOException { checkNotNull(data); ByteBufCodedInputStream inputStream = ByteBufCodedInputStream.get(Unpooled.wrappedBuffer(data, 0, data.length)); PulsarApi.MessageIdData.Builder builder = PulsarApi.MessageIdData.newBuilder(); PulsarApi.MessageIdData idData; try { idData = builder.mergeFrom(inputStream, null).build(); } catch (UninitializedMessageException e) { throw new IOException(e); } MessageIdImpl messageId; if (idData.hasBatchIndex()) { messageId = new BatchMessageIdImpl(idData.getLedgerId(), idData.getEntryId(), idData.getPartition(), idData.getBatchIndex()); } else { messageId = new MessageIdImpl(idData.getLedgerId(), idData.getEntryId(), idData.getPartition()); } inputStream.recycle(); builder.recycle(); idData.recycle(); return messageId; }