@Override public void emitDataStream(DataStream<Row> dataStream) { dataStream .addSink(new JDBCSinkFunction(outputFormat)) .name(TableConnectorUtil.generateRuntimeName(this.getClass(), fieldNames)); }
@Override public void emitDataStream(DataStream<Tuple2<Boolean, Row>> stream) { // add sink stream .addSink(new CollectSink<>(targetAddress, targetPort, serializer)) .name("SQL Client Stream Collect Sink") .setParallelism(1); }
@Override public void emitDataStream(DataStream<Row> dataStream) { final SinkFunction<Row> kafkaProducer = createKafkaProducer( topic, properties, serializationSchema, partitioner); dataStream.addSink(kafkaProducer).name(TableConnectorUtil.generateRuntimeName(this.getClass(), getFieldNames())); }
/** * Writes a DataStream to the standard output stream (stderr). * * <p>For each element of the DataStream the result of {@link Object#toString()} is written. * * <p>NOTE: This will print to stderr on the machine where the code is executed, i.e. the Flink * worker. * * @return The closed DataStream. */ @PublicEvolving public DataStreamSink<T> printToErr() { PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(true); return addSink(printFunction).name("Print to Std. Err"); }
/** * Writes a DataStream to the standard output stream (stdout). * * <p>For each element of the DataStream the result of {@link Object#toString()} is written. * * <p>NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink * worker. * * @return The closed DataStream. */ @PublicEvolving public DataStreamSink<T> print() { PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(); return addSink(printFunction).name("Print to Std. Out"); }
/** * Writes a DataStream to the standard output stream (stdout). * * <p>For each element of the DataStream the result of {@link Object#toString()} is written. * * <p>NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink * worker. * * @param sinkIdentifier The string to prefix the output with. * @return The closed DataStream. */ @PublicEvolving public DataStreamSink<T> print(String sinkIdentifier) { PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(sinkIdentifier, false); return addSink(printFunction).name("Print to Std. Out"); }
/** * Writes a DataStream to the standard output stream (stderr). * * <p>For each element of the DataStream the result of {@link Object#toString()} is written. * * <p>NOTE: This will print to stderr on the machine where the code is executed, i.e. the Flink * worker. * * @param sinkIdentifier The string to prefix the output with. * @return The closed DataStream. */ @PublicEvolving public DataStreamSink<T> printToErr(String sinkIdentifier) { PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(sinkIdentifier, true); return addSink(printFunction).name("Print to Std. Err"); }
@Override public CassandraSink<IN> createSink() throws Exception { final CassandraTupleSink<IN> sink = new CassandraTupleSink<>( query, builder, configBuilder.build(), failureHandler); return new CassandraSink<>(input.addSink(sink).name("Cassandra Sink")); }
@Override public CassandraSink<IN> createSink() throws Exception { final CassandraScalaProductSink<IN> sink = new CassandraScalaProductSink<>( query, builder, configBuilder.build(), failureHandler); return new CassandraSink<>(input.addSink(sink).name("Cassandra Sink")); }
@Override public void emitDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) { final ElasticsearchUpsertSinkFunction upsertFunction = new ElasticsearchUpsertSinkFunction( index, docType, keyDelimiter, keyNullLiteral, serializationSchema, contentType, requestFactory, keyFieldIndices); final SinkFunction<Tuple2<Boolean, Row>> sinkFunction = createSinkFunction( hosts, failureHandler, sinkOptions, upsertFunction); dataStream.addSink(sinkFunction) .name(TableConnectorUtil.generateRuntimeName(this.getClass(), getFieldNames())); }
@Override public CassandraSink<IN> createSink() throws Exception { final CassandraPojoSink<IN> sink = new CassandraPojoSink<>( typeInfo.getTypeClass(), builder, mapperOptions, keyspace, configBuilder.build(), failureHandler); return new CassandraSink<>(input.addSink(sink).name("Cassandra Sink")); }
@Override protected CassandraSink<Row> createSink() throws Exception { final CassandraRowSink sink = new CassandraRowSink( typeInfo.getArity(), query, builder, configBuilder.build(), failureHandler); return new CassandraSink<>(input.addSink(sink).name("Cassandra Sink")); }
public static void main(String[] args) throws Exception { final ParameterTool pt = ParameterTool.fromArgs(args); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); setupEnvironment(env, pt); final MonotonicTTLTimeProvider ttlTimeProvider = setBackendWithCustomTTLTimeProvider(env); TtlTestConfig config = TtlTestConfig.fromArgs(pt); StateTtlConfig ttlConfig = StateTtlConfig.newBuilder(config.ttl) .cleanupIncrementally(5, true) .cleanupFullSnapshot() .build(); env .addSource(new TtlStateUpdateSource(config.keySpace, config.sleepAfterElements, config.sleepTime)) .name("TtlStateUpdateSource") .keyBy(TtlStateUpdate::getKey) .flatMap(new TtlVerifyUpdateFunction(ttlConfig, ttlTimeProvider, config.reportStatAfterUpdatesNum)) .name("TtlVerifyUpdateFunction") .addSink(new PrintSinkFunction<>()) .name("PrintFailedVerifications"); env.execute("State TTL test job"); }
public void invoke(Integer value) throws Exception { }).disableChaining().name("test_sink"); sinkMethod.invoke(sink, resource5);
@Override public void emitDataStream(DataStream<Row> dataStream) { SinkFunction<Row> kafkaProducer = createKafkaProducer( topic, properties, serializationSchema.orElseThrow(() -> new IllegalStateException("No serialization schema defined.")), partitioner); dataStream.addSink(kafkaProducer).name(TableConnectorUtil.generateRuntimeName(this.getClass(), fieldNames)); }
@Override public void emitDataStream(DataStream<Row> dataStream) { SinkFunction<Row> kafkaProducer = createKafkaProducer( topic, properties, serializationSchema.orElseThrow(() -> new IllegalStateException("No serialization schema defined.")), partitioner); dataStream.addSink(kafkaProducer).name(TableConnectorUtil.generateRuntimeName(this.getClass(), fieldNames)); }
/** * Writes a DataStream to the standard output stream (stdout). * * <p>For each element of the DataStream the result of {@link Object#toString()} is written. * * <p>NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink * worker. * * @return The closed DataStream. */ @PublicEvolving public DataStreamSink<T> print() { PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(); return addSink(printFunction).name("Print to Std. Out"); }
@Override public DataStreamSink<?> writeData(DataStream<Row> dataSet) { StreamOutputFormatBuilder builder = new StreamOutputFormatBuilder(); builder.setPrint(print); OutputFormatSinkFunction formatSinkFunction = new OutputFormatSinkFunction(builder.finish()); DataStreamSink<?> dataStreamSink = dataSet.addSink(formatSinkFunction); dataStreamSink.name("streamwriter"); return dataStreamSink; } }