@Override public void init(List<KafkaMetric> metrics) { for (KafkaMetric metric : metrics) { addMetric(metric); } }
@Override public String toString() { return String.format("routingKey: %s, payload byte size: %d", getRoutingKey(), getPayload().length); }
@Override protected void write(List<Message> msgList) { final List<KeyedMessage<Long, byte[]>> kafkaMsgList = new ArrayList<KeyedMessage<Long, byte[]>>(); for (Message m : msgList) { SuroKeyedMessage keyedMessage = (SuroKeyedMessage) m; kafkaMsgList.add(new KeyedMessage<Long, byte[]>( keyedMessage.getRoutingKey(), keyedMessage.getKey(), keyedMessage.getPayload())); } senders.submit(new Runnable() { @Override public void run() { producer.send(kafkaMsgList); } }); }
for( Message m : msgList ){ SuroKeyedMessage sKeyedMsg = (SuroKeyedMessage) m; msgCopies.add( new SuroKeyedMessage( sKeyedMsg.getKey(), new Message( m.getRoutingKey(), m.getPayload() )));
@Override public void writeTo(MessageContainer message) { long key = msgId++; if (!keyTopicMap.isEmpty()) { try { Map<String, Object> msgMap = message.getEntity(new TypeReference<Map<String, Object>>() {}); Object keyField = msgMap.get(keyTopicMap.get(message.getRoutingKey())); if (keyField != null) { key = keyField.hashCode(); } } catch (Exception e) { log.error("Exception on getting key field: " + e.getMessage()); } } enqueue(new SuroKeyedMessage(key, message.getMessage())); }
@Override public void writeTo(MessageContainer message) { long key = msgId++; if (!keyTopicMap.isEmpty()) { try { Map<String, Object> msgMap = message.getEntity(new TypeReference<Map<String, Object>>() {}); Object keyField = msgMap.get(keyTopicMap.get(message.getRoutingKey())); if (keyField != null) { key = keyField.hashCode(); } } catch (Exception e) { QueuedSink.log.error("Exception on getting key field: " + e.getMessage()); } } QueuedSink.log.trace( "KafkaSink writeTo()" ); receivedCount.incrementAndGet(); enqueue(new SuroKeyedMessage(key, message.getMessage())); }
@Override public void writeTo(final MessageContainer message) { queuedRecords.incrementAndGet(); DynamicCounter.increment( MonitorConfig .builder("queuedRecord") .withTag(TagKey.ROUTING_KEY, message.getRoutingKey()) .build()); runRecordCounterListener(); if (metadataFetchedTopicSet.contains(message.getRoutingKey())) { sendMessage(message); } else { if(!metadataWaitingQueue.offer(message)) { dropMessage(message.getRoutingKey(), "metadataWaitingQueueFull"); } } }
private void sendMessage(final MessageContainer message) { try { List<PartitionInfo> partitionInfos = producer.partitionsFor(message.getRoutingKey()); int partition = retentionPartitioner.getKey(message.getRoutingKey(), partitionInfos); dropMessage(message.getRoutingKey(), e.getClass().getName());
private void dropMessage(final String routingKey, final String reason) { DynamicCounter.increment( MonitorConfig .builder("droppedRecord") .withTag(TagKey.ROUTING_KEY, routingKey) .withTag(TagKey.DROPPED_REASON, reason) .build()); droppedRecords.incrementAndGet(); runRecordCounterListener(); } }
initialize( "kafka_" + clientId, queue4Sink == null ? new MemoryQueue4Sink(10000) : queue4Sink,
initialize("kafka_" + clientId, queue4Sink == null ? new MemoryQueue4Sink(10000) : queue4Sink, batchSize, batchTimeout);
@Override public String toString() { return String.format("routingKey: %s, payload byte size: %d", getRoutingKey(), getPayload().length); }
@Override public void metricChange(KafkaMetric metric) { addMetric(metric); }