@Override public Map<MetricName, ? extends Metric> metrics() { return kafkaProducer.metrics(); }
@Override public Map<MetricName, ? extends Metric> metrics() { return kafkaProducer.metrics(); }
@Override public Map<MetricName, ? extends Metric> metrics() { return kafkaProducer.metrics(); }
Map<MetricName, ? extends Metric> metrics = this.producer.metrics();
/** * Tests that partitions list is determinate and correctly provided to custom partitioner. */ @SuppressWarnings("unchecked") @Test public void testPartitionerInvokedWithDeterminatePartitionList() throws Exception { FlinkKafkaPartitioner<String> mockPartitioner = mock(FlinkKafkaPartitioner.class); RuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class); when(mockRuntimeContext.getIndexOfThisSubtask()).thenReturn(0); when(mockRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(1); // out-of-order list of 4 partitions List<PartitionInfo> mockPartitionsList = new ArrayList<>(4); mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 3, null, null, null)); mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 1, null, null, null)); mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 0, null, null, null)); mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 2, null, null, null)); final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), mockPartitioner); producer.setRuntimeContext(mockRuntimeContext); final KafkaProducer mockProducer = producer.getMockKafkaProducer(); when(mockProducer.partitionsFor(anyString())).thenReturn(mockPartitionsList); when(mockProducer.metrics()).thenReturn(null); producer.open(new Configuration()); verify(mockPartitioner, times(1)).open(0, 1); producer.invoke("foobar", SinkContextUtil.forTimestamp(0)); verify(mockPartitioner, times(1)).partition( "foobar", null, "foobar".getBytes(), DummyFlinkKafkaProducer.DUMMY_TOPIC, new int[] {0, 1, 2, 3}); }
@Override public Map<MetricName, ? extends Metric> metrics() { return kafkaProducer.metrics(); }
@Override public Map<MetricName, ? extends Metric> metrics() { return kafkaProducer.metrics(); }
@Override public Map<MetricName, ? extends Metric> metrics() { return kafkaProducer.metrics(); }
@Override public Map<MetricName, ? extends Metric> metrics() { return kafkaProducer.metrics(); }
@Override public String getStat() { Map<MetricName,? extends Metric> metrics = producer.metrics(); StringBuilder sb = new StringBuilder(); // add kafka producer stats, which are rates for( Map.Entry<MetricName,? extends Metric> e : metrics.entrySet() ){ sb.append("kafka.").append(e.getKey()).append(": ").append(e.getValue().value()).append('\n'); } return sb.toString(); }
@Override public String getStat() { Map<MetricName,? extends Metric> metrics = producer.metrics(); StringBuilder sb = new StringBuilder(); // add kafka producer stats, which are rates for( Map.Entry<MetricName,? extends Metric> e : metrics.entrySet() ){ sb.append("kafka.").append(e.getKey()).append(": ").append(e.getValue().value()).append('\n'); } // also report our counters sb.append("messages-in-queue4sink: ").append( this.queue4Sink.size() ).append('\n'); sb.append("queued-jobs: ").append( this.jobQueue.size() ).append('\n'); sb.append("active-threads: ").append( this.senders.getActiveCount() ).append('\n'); sb.append("received-messages: ").append( this.receivedCount.get() ).append('\n'); sb.append("sent-messages: ").append( this.sentCount.get() ).append('\n'); sb.append("sent-bytes: ").append( this.sentByteCount.get() ).append('\n'); sb.append("dropped-messages: ").append( this.droppedCount.get() ).append('\n'); sb.append("requeued-messages: ").append( this.requeuedCount.get() ).append('\n'); return sb.toString(); } }
} else { double totalBytes = producer.metrics().get( new MetricName( "buffer-total-bytes", "client-id", props.getProperty("client.id"))).value(); double availableBytes = producer.metrics().get( new MetricName( "buffer-available-bytes", double memoryRate = consumedMemory / totalBytes; if (memoryRate >= 0.5) { double outgoingRate = producer.metrics().get( new MetricName( "outgoing-byte-rate",
@Override public JsonObject getStats() { Map<MetricName, ? extends Metric> metrics = producer.metrics(); JsonObject stats = new JsonObject() .add("seen_messages", seenMessages.get()) .add("failed_to_send", failedToSendMessageExceptions.get()); // Map to Plog v4-style naming for (Map.Entry<String, MetricName> entry: SHORTNAME_TO_METRICNAME.entrySet()) { Metric metric = metrics.get(entry.getValue()); if (metric != null) { stats.add(entry.getKey(), metric.value()); } else { stats.add(entry.getKey(), 0.0); } } // Use default kafka naming, include all producer metrics for (Map.Entry<MetricName, ? extends Metric> metric : metrics.entrySet()) { double value = metric.getValue().value(); String name = metric.getKey().name().replace("-", "_"); if (value > -Double.MAX_VALUE && value < Double.MAX_VALUE) { stats.add(name, value); } else { stats.add(name, 0.0); } } return stats; }
Map<MetricName, ? extends Metric> metrics = this.producer.metrics();
Map<MetricName, ? extends Metric> metrics = this.producer.metrics();
Map<MetricName, ? extends Metric> metrics = this.producer.metrics();