timestamp = context.timestamp();
@Override public void invoke( T value, Context context) throws Exception { Long timestamp = context.timestamp(); if (timestamp != null) { data.add( new Tuple4<>( context.currentWatermark(), context.currentProcessingTime(), context.timestamp(), value)); } else { data.add( new Tuple4<>( context.currentWatermark(), context.currentProcessingTime(), null, value)); } } }
timestamp = context.timestamp();
timestamp = context.timestamp();
timestamp = context.timestamp();
timestamp = context.timestamp();
@Override public void invoke(IN value, Context context) throws Exception { checkErroneous(); byte[] serializedValue = schema.serialize(value); MessageBuilder msgBuilder = MessageBuilder.create(); if (null != context.timestamp()) { msgBuilder = msgBuilder.setEventTime(context.timestamp()); } String msgKey = flinkPulsarKeyExtractor.getKey(value); if (null != msgKey) { msgBuilder = msgBuilder.setKey(msgKey); } Message message = msgBuilder .setContent(serializedValue) .build(); if (flushOnCheckpoint) { synchronized (pendingRecordsLock) { pendingRecords++; } } producer.sendAsync(message) .thenApply(successCallback) .exceptionally(failureCallback); }
Bucket<IN, BucketID> onElement(final IN value, final SinkFunction.Context context) throws Exception { final long currentProcessingTime = context.currentProcessingTime(); // setting the values in the bucketer context bucketerContext.update( context.timestamp(), context.currentWatermark(), currentProcessingTime); final BucketID bucketId = bucketAssigner.getBucketId(value, bucketerContext); final Bucket<IN, BucketID> bucket = getOrCreateBucketForBucketId(bucketId); bucket.write(value, currentProcessingTime); // we update the global max counter here because as buckets become inactive and // get removed from the list of active buckets, at the time when we want to create // another part file for the bucket, if we start from 0 we may overwrite previous parts. this.maxPartCounter = Math.max(maxPartCounter, bucket.getPartCounter()); return bucket; }
void onElement(final IN value, final SinkFunction.Context context) throws Exception { final long currentProcessingTime = context.currentProcessingTime(); // setting the values in the bucketer context bucketerContext.update( context.timestamp(), context.currentWatermark(), currentProcessingTime); final BucketID bucketId = bucketAssigner.getBucketId(value, bucketerContext); final Bucket<IN, BucketID> bucket = getOrCreateBucketForBucketId(bucketId); bucket.write(value, currentProcessingTime); // we update the global max counter here because as buckets become inactive and // get removed from the list of active buckets, at the time when we want to create // another part file for the bucket, if we start from 0 we may overwrite previous parts. this.maxPartCounter = Math.max(maxPartCounter, bucket.getPartCounter()); }
@Override protected ProducerRecord<byte[], byte[]> buildProducerRecord( Context context, String topic, Integer partition, byte[] keyBytes, byte[] valueBytes) { Long timestamp = null; if (this.writeTimestampToKafka) { timestamp = context.timestamp(); } return new ProducerRecord<>(topic, partition, timestamp, keyBytes, valueBytes); }
timestamp = context.timestamp();
timestamp = context.timestamp();
Bucket<IN, BucketID> onElement(final IN value, final SinkFunction.Context context) throws Exception { final long currentProcessingTime = context.currentProcessingTime(); // setting the values in the bucketer context bucketerContext.update( context.timestamp(), context.currentWatermark(), currentProcessingTime); final BucketID bucketId = bucketAssigner.getBucketId(value, bucketerContext); final Bucket<IN, BucketID> bucket = getOrCreateBucketForBucketId(bucketId); bucket.write(value, currentProcessingTime); // we update the global max counter here because as buckets become inactive and // get removed from the list of active buckets, at the time when we want to create // another part file for the bucket, if we start from 0 we may overwrite previous parts. this.maxPartCounter = Math.max(maxPartCounter, bucket.getPartCounter()); return bucket; }