/** * Create a new PulsarClient object * * @param serviceUrl * the url of the Pulsar endpoint to be used * @param conf * the client configuration * @return a new pulsar client object * @throws PulsarClientException.InvalidServiceURL * if the serviceUrl is invalid */ public static PulsarClient create(String serviceUrl, ClientConfiguration conf) throws PulsarClientException { return new PulsarClientImpl(serviceUrl, conf); }
@Override public CompletableFuture<Producer> createProducerAsync(final String topic, final ProducerConfiguration conf) { return createProducerAsync(topic, conf, null); }
@Override void connectionFailed(PulsarClientException exception) { if (System.currentTimeMillis() > subscribeTimeout && subscribeFuture.completeExceptionally(exception)) { setState(State.Failed); client.cleanupConsumer(this); } }
public ClientCnx(PulsarClientImpl pulsarClient) { super(30, TimeUnit.SECONDS); this.pendingLookupRequestSemaphore = new Semaphore(pulsarClient.getConfiguration().getConcurrentLookupRequest(), true); this.authentication = pulsarClient.getConfiguration().getAuthentication(); this.eventLoopGroup = pulsarClient.eventLoopGroup(); this.maxNumberOfRejectedRequestPerConnection = pulsarClient.getConfiguration() .getMaxNumberOfRejectedRequestPerConnection(); this.state = State.None; }
long requestId = client.newRequestId(); client.timer().newTimeout(batchMessageAndSendTask, conf.getBatchingMaxPublishDelayMs(), TimeUnit.MILLISECONDS); failPendingMessages(cnx(), (PulsarClientException) cause); producerCreatedFuture.completeExceptionally(cause); client.cleanupProducer(this); } else if (producerCreatedFuture.isDone() || // (cause instanceof PulsarClientException && isRetriableError((PulsarClientException) cause) setState(State.Failed); producerCreatedFuture.completeExceptionally(cause); client.cleanupProducer(this);
public ProducerImpl(PulsarClientImpl client, String topic, String producerName, ProducerConfiguration conf, CompletableFuture<Producer> producerCreatedFuture, int partitionIndex) { super(client, topic, conf, producerCreatedFuture); this.producerId = client.newProducerId(); this.producerName = producerName; this.partitionIndex = partitionIndex; this.pendingMessages = Queues.newArrayBlockingQueue(conf.getMaxPendingMessages()); this.pendingCallbacks = Queues.newArrayBlockingQueue(conf.getMaxPendingMessages()); this.semaphore = new Semaphore(conf.getMaxPendingMessages(), true); this.compressor = CompressionCodecProvider .getCompressionCodec(convertCompressionType(conf.getCompressionType())); if (conf.getSendTimeoutMs() > 0) { sendTimeout = client.timer().newTimeout(this, conf.getSendTimeoutMs(), TimeUnit.MILLISECONDS); } this.createProducerTimeout = System.currentTimeMillis() + client.getConfiguration().getOperationTimeoutMs(); if (conf.getBatchingEnabled()) { this.maxNumMessagesInBatch = conf.getBatchingMaxMessages(); this.batchMessageContainer = new BatchMessageContainer(maxNumMessagesInBatch, convertCompressionType(conf.getCompressionType()), topic, producerName); } else { this.maxNumMessagesInBatch = 1; this.batchMessageContainer = null; } if (client.getConfiguration().getStatsIntervalSeconds() > 0) { stats = new ProducerStats(client, conf, this); } else { stats = ProducerStats.PRODUCER_STATS_DISABLED; } grabCnx(); }
private CompletableFuture<PartitionedTopicMetadata> getPartitionedTopicMetadata(InetSocketAddress socketAddress, DestinationName destination) { CompletableFuture<PartitionedTopicMetadata> partitionFuture = new CompletableFuture<PartitionedTopicMetadata>(); client.getCnxPool().getConnection(socketAddress).thenAccept(clientCnx -> { long requestId = client.newRequestId(); ByteBuf request = Commands.newPartitionMetadataRequest(destination.toString(), requestId); clientCnx.newLookup(request, requestId).thenAccept(lookupDataResult -> { try { partitionFuture.complete(new PartitionedTopicMetadata(lookupDataResult.partitions)); } catch (Exception e) { partitionFuture.completeExceptionally(new PulsarClientException.LookupException( format("Failed to parse partition-response redirect=%s , partitions with %s", lookupDataResult.redirect, lookupDataResult.partitions, e.getMessage()))); } }).exceptionally((e) -> { log.warn("[{}] failed to get Partitioned metadata : {}", destination.toString(), e.getCause().getMessage(), e); partitionFuture.completeExceptionally(e); return null; }); }).exceptionally(connectionException -> { partitionFuture.completeExceptionally(connectionException); return null; }); return partitionFuture; }
log.info("Pulsar client config: {}", w.writeValueAsString(pulsarClient.getConfiguration())); } catch (IOException e) { log.error("Failed to dump config info: {}", e); } finally { statTimeout = pulsarClient.timer().newTimeout(stat, statsIntervalSeconds, TimeUnit.SECONDS); statTimeout = pulsarClient.timer().newTimeout(stat, statsIntervalSeconds, TimeUnit.SECONDS);
batchMessageAckTracker.clear(); unAckedMessageTracker.close(); client.cleanupConsumer(this); return CompletableFuture.completedFuture(null); long requestId = client.newRequestId(); ByteBuf cmd = Commands.newCloseConsumer(consumerId, requestId); unAckedMessageTracker.close(); closeFuture.complete(null); client.cleanupConsumer(this); } else { closeFuture.completeExceptionally(exception);
public ConnectionPool(final PulsarClientImpl client, EventLoopGroup eventLoopGroup) { this.eventLoopGroup = eventLoopGroup; this.maxConnectionsPerHosts = client.getConfiguration().getConnectionsPerBroker(); bootstrap.option(ChannelOption.TCP_NODELAY, client.getConfiguration().isUseTcpNoDelay()); bootstrap.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); bootstrap.handler(new ChannelInitializer<SocketChannel>() {
String partitionName = DestinationName.get(topic).getPartition(partitionIndex).toString(); ConsumerImpl consumer = new ConsumerImpl(client, partitionName, subscription, internalConfig, client.externalExecutorProvider().getExecutor(), partitionIndex, new CompletableFuture<Consumer>()); consumers.add(consumer); consumer.subscribeFuture().handle((cons, subscribeException) -> { setState(State.Failed); subscribeFail.compareAndSet(null, subscribeException); client.cleanupConsumer(this); client.cleanupConsumer(this); return null; });
synchronized (this) { setState(State.Closed); client.cleanupProducer(this); PulsarClientException ex = new PulsarClientException.AlreadyClosedException( "Producer was already closed"); long requestId = client.newRequestId(); ByteBuf cmd = Commands.newCloseProducer(producerId, requestId); client.cleanupProducer(this); } else { closeFuture.completeExceptionally(exception);
SubscriptionMode subscriptionMode, MessageId startMessageId) { super(client, topic, subscription, conf, conf.getReceiverQueueSize(), listenerExecutor, subscribeFuture); this.consumerId = client.newConsumerId(); this.subscriptionMode = subscriptionMode; this.startMessageId = startMessageId; AVAILABLE_PERMITS_UPDATER.set(this, 0); this.subscribeTimeout = System.currentTimeMillis() + client.getConfiguration().getOperationTimeoutMs(); this.partitionIndex = partitionIndex; this.receiverQueueRefillThreshold = conf.getReceiverQueueSize() / 2; this.priorityLevel = conf.getPriorityLevel(); this.batchMessageAckTracker = new ConcurrentSkipListMap<>(); if (client.getConfiguration().getStatsIntervalSeconds() > 0) { stats = new ConsumerStats(client, conf, this); } else {
closeFuture.complete(null); log.info("[{}] Closed Partitioned Producer", topic); client.cleanupProducer(this); } else { setState(State.Failed);
@Override public Reader createReader(String topic, MessageId startMessageId, ReaderConfiguration conf) throws PulsarClientException { try { return createReaderAsync(topic, startMessageId, conf).get(); } catch (ExecutionException e) { Throwable t = e.getCause(); if (t instanceof PulsarClientException) { throw (PulsarClientException) t; } else { throw new PulsarClientException(t); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new PulsarClientException(e); } }
@Override public void close() throws PulsarClientException { try { closeAsync().get(); } catch (ExecutionException e) { Throwable t = e.getCause(); if (t instanceof PulsarClientException) { throw (PulsarClientException) t; } else { throw new PulsarClientException(t); } } catch (InterruptedException e) { throw new PulsarClientException(e); } }
private void receiveMessageFromConsumer(ConsumerImpl consumer) { consumer.receiveAsync().thenAccept(message -> { // Process the message, add to the queue and trigger listener or async callback messageReceived(message); if (incomingMessages.size() >= maxReceiverQueueSize || (incomingMessages.size() > sharedQueueResumeThreshold && !pausedConsumers.isEmpty())) { // mark this consumer to be resumed later: if No more space left in shared queue, // or if any consumer is already paused (to create fair chance for already paused consumers) pausedConsumers.add(consumer); } else { // Schedule next receiveAsync() if the incoming queue is not full. Use a different thread to avoid // recursion and stack overflow client.eventLoopGroup().execute(() -> { receiveMessageFromConsumer(consumer); }); } }); }
CompletableFuture<InetSocketAddress> addressFuture = new CompletableFuture<InetSocketAddress>(); client.getCnxPool().getConnection(socketAddress).thenAccept(clientCnx -> { long requestId = client.newRequestId(); ByteBuf request = Commands.newLookup(destination.toString(), authoritative, requestId); clientCnx.newLookup(request, requestId).thenAccept(lookupDataResult -> {
log.info("Pulsar client config: {}", w.writeValueAsString(pulsarClient.getConfiguration())); } catch (IOException e) { log.error("Failed to dump config info: {}", e); } finally { statTimeout = pulsarClient.timer().newTimeout(stat, statsIntervalSeconds, TimeUnit.SECONDS); statTimeout = pulsarClient.timer().newTimeout(stat, statsIntervalSeconds, TimeUnit.SECONDS);
long requestId = client.newRequestId(); client.cleanupConsumer(this); } else {