@Override public DataStreamSink<T> disableChaining() { this.transformation.setChainingStrategy(ChainingStrategy.NEVER); return this; }
/** * Sets the name of this sink. This name is * used by the visualization and logging during runtime. * * @return The named sink. */ public DataStreamSink<T> name(String name) { transformation.setName(name); return this; }
@Override public DataStreamSink<T> setParallelism(int parallelism) { transformation.setParallelism(parallelism); return this; }
/** * Transforms a {@code SourceTransformation}. */ private <T> Collection<Integer> transformSink(SinkTransformation<T> sink) { Collection<Integer> inputIds = transform(sink.getInput()); String slotSharingGroup = determineSlotSharingGroup(sink.getSlotSharingGroup(), inputIds); streamGraph.addSink(sink.getId(), slotSharingGroup, sink.getCoLocationGroupKey(), sink.getOperator(), sink.getInput().getOutputType(), null, "Sink: " + sink.getName()); streamGraph.setParallelism(sink.getId(), sink.getParallelism()); streamGraph.setMaxParallelism(sink.getId(), sink.getMaxParallelism()); for (Integer inputId: inputIds) { streamGraph.addEdge(inputId, sink.getId(), 0 ); } if (sink.getStateKeySelector() != null) { TypeSerializer<?> keySerializer = sink.getStateKeyType().createSerializer(env.getConfig()); streamGraph.setOneInputStateKey(sink.getId(), sink.getStateKeySelector(), keySerializer); } return Collections.emptyList(); }
@Override public DataStreamSink<T> setUidHash(String uidHash) { transformation.setUidHash(uidHash); return this; }
@Override public DataStreamSink<T> uid(String uid) { transformation.setUid(uid); return this; }
@Override public DataStreamSink<T> slotSharingGroup(String slotSharingGroup) { transformation.setSlotSharingGroup(slotSharingGroup); return this; } }
private static Integer createDownStreamId(DataStream<?> dataStream) { return dataStream.print().getTransformation().getId(); }
private FlinkKafkaProducer010Configuration( DataStreamSink<T> originalSink, DataStream<T> inputStream, FlinkKafkaProducer010<T> producer) { //noinspection unchecked super(inputStream, originalSink.getTransformation().getOperator()); this.transformation = originalSink.getTransformation(); this.producer = producer; }
@SuppressWarnings("unchecked") protected DataStreamSink(DataStream<T> inputStream, StreamSink<T> operator) { this.transformation = new SinkTransformation<T>(inputStream.getTransformation(), "Unnamed", operator, inputStream.getExecutionEnvironment().getParallelism()); }
/** * Transforms a {@code SourceTransformation}. */ private <T> Collection<Integer> transformSink(SinkTransformation<T> sink) { Collection<Integer> inputIds = transform(sink.getInput()); String slotSharingGroup = determineSlotSharingGroup(sink.getSlotSharingGroup(), inputIds); streamGraph.addSink(sink.getId(), slotSharingGroup, sink.getCoLocationGroupKey(), sink.getOperator(), sink.getInput().getOutputType(), null, "Sink: " + sink.getName()); streamGraph.setParallelism(sink.getId(), sink.getParallelism()); streamGraph.setMaxParallelism(sink.getId(), sink.getMaxParallelism()); for (Integer inputId: inputIds) { streamGraph.addEdge(inputId, sink.getId(), 0 ); } if (sink.getStateKeySelector() != null) { TypeSerializer<?> keySerializer = sink.getStateKeyType().createSerializer(env.getConfig()); streamGraph.setOneInputStateKey(sink.getId(), sink.getStateKeySelector(), keySerializer); } return Collections.emptyList(); }
/** * Sets an user provided hash for this operator. This will be used AS IS the create the JobVertexID. * * <p>The user provided hash is an alternative to the generated hashes, that is considered when identifying an * operator through the default hash mechanics fails (e.g. because of changes between Flink versions). * * <p><strong>Important</strong>: this should be used as a workaround or for trouble shooting. The provided hash * needs to be unique per transformation and job. Otherwise, job submission will fail. Furthermore, you cannot * assign user-specified hash to intermediate nodes in an operator chain and trying so will let your job fail. * * <p>A use case for this is in migration between Flink versions or changing the jobs in a way that changes the * automatically generated hashes. In this case, providing the previous hashes directly through this method (e.g. * obtained from old logs) can help to reestablish a lost mapping from states to their target operator. * * @param uidHash The user provided hash for this operator. This will become the JobVertexID, which is shown in the * logs and web ui. * @return The operator with the user provided hash. */ @PublicEvolving public DataStreamSink<T> setUidHash(String uidHash) { transformation.setUidHash(uidHash); return this; }
/** * Sets an ID for this operator. * * <p>The specified ID is used to assign the same operator ID across job * submissions (for example when starting a job from a savepoint). * * <p><strong>Important</strong>: this ID needs to be unique per * transformation and job. Otherwise, job submission will fail. * * @param uid The unique user-specified ID of this transformation. * @return The operator with the specified ID. */ @PublicEvolving public DataStreamSink<T> uid(String uid) { transformation.setUid(uid); return this; }
/** * Sets the slot sharing group of this operation. Parallel instances of * operations that are in the same slot sharing group will be co-located in the same * TaskManager slot, if possible. * * <p>Operations inherit the slot sharing group of input operations if all input operations * are in the same slot sharing group and no slot sharing group was explicitly specified. * * <p>Initially an operation is in the default slot sharing group. An operation can be put into * the default group explicitly by setting the slot sharing group to {@code "default"}. * * @param slotSharingGroup The slot sharing group name. */ @PublicEvolving public DataStreamSink<T> slotSharingGroup(String slotSharingGroup) { transformation.setSlotSharingGroup(slotSharingGroup); return this; } }
assertTrue(env.getStreamGraph().getStreamNode(sink.getTransformation().getId()).getStatePartitioner1() == null); assertTrue(env.getStreamGraph().getStreamNode(sink.getTransformation().getId()).getInEdges().get(0).getPartitioner() instanceof ForwardPartitioner); assertNotNull(env.getStreamGraph().getStreamNode(sink2.getTransformation().getId()).getStatePartitioner1()); assertNotNull(env.getStreamGraph().getStreamNode(sink2.getTransformation().getId()).getStateKeySerializer()); assertNotNull(env.getStreamGraph().getStreamNode(sink2.getTransformation().getId()).getStateKeySerializer()); assertEquals(key1, env.getStreamGraph().getStreamNode(sink2.getTransformation().getId()).getStatePartitioner1()); assertTrue(env.getStreamGraph().getStreamNode(sink2.getTransformation().getId()).getInEdges().get(0).getPartitioner() instanceof KeyGroupStreamPartitioner); assertTrue(env.getStreamGraph().getStreamNode(sink3.getTransformation().getId()).getStatePartitioner1() != null); assertEquals(key2, env.getStreamGraph().getStreamNode(sink3.getTransformation().getId()).getStatePartitioner1()); assertTrue(env.getStreamGraph().getStreamNode(sink3.getTransformation().getId()).getInEdges().get(0).getPartitioner() instanceof KeyGroupStreamPartitioner);
private FlinkKafkaProducer010Configuration( DataStreamSink<T> originalSink, DataStream<T> inputStream, FlinkKafkaProducer010<T> producer) { //noinspection unchecked super(inputStream, originalSink.getTransformation().getOperator()); this.transformation = originalSink.getTransformation(); this.producer = producer; }
@SuppressWarnings("unchecked") protected DataStreamSink(DataStream<T> inputStream, StreamSink<T> operator) { this.transformation = new SinkTransformation<T>(inputStream.getTransformation(), "Unnamed", operator, inputStream.getExecutionEnvironment().getParallelism()); }
/** * Transforms a {@code SourceTransformation}. */ private <T> Collection<Integer> transformSink(SinkTransformation<T> sink) { Collection<Integer> inputIds = transform(sink.getInput()); String slotSharingGroup = determineSlotSharingGroup(sink.getSlotSharingGroup(), inputIds); streamGraph.addSink(sink.getId(), slotSharingGroup, sink.getCoLocationGroupKey(), sink.getOperator(), sink.getInput().getOutputType(), null, "Sink: " + sink.getName()); streamGraph.setParallelism(sink.getId(), sink.getParallelism()); streamGraph.setMaxParallelism(sink.getId(), sink.getMaxParallelism()); for (Integer inputId: inputIds) { streamGraph.addEdge(inputId, sink.getId(), 0 ); } if (sink.getStateKeySelector() != null) { TypeSerializer<?> keySerializer = sink.getStateKeyType().createSerializer(env.getConfig()); streamGraph.setOneInputStateKey(sink.getId(), sink.getStateKeySelector(), keySerializer); } return Collections.emptyList(); }
@Override public DataStreamSink<T> name(String name) { transformation.setName(name); return this; }
/** * Sets an user provided hash for this operator. This will be used AS IS the create the JobVertexID. * * <p>The user provided hash is an alternative to the generated hashes, that is considered when identifying an * operator through the default hash mechanics fails (e.g. because of changes between Flink versions). * * <p><strong>Important</strong>: this should be used as a workaround or for trouble shooting. The provided hash * needs to be unique per transformation and job. Otherwise, job submission will fail. Furthermore, you cannot * assign user-specified hash to intermediate nodes in an operator chain and trying so will let your job fail. * * <p>A use case for this is in migration between Flink versions or changing the jobs in a way that changes the * automatically generated hashes. In this case, providing the previous hashes directly through this method (e.g. * obtained from old logs) can help to reestablish a lost mapping from states to their target operator. * * @param uidHash The user provided hash for this operator. This will become the JobVertexID, which is shown in the * logs and web ui. * @return The operator with the user provided hash. */ @PublicEvolving public CassandraSink<IN> setUidHash(String uidHash) { if (useDataStreamSink) { getSinkTransformation().setUidHash(uidHash); } else { getStreamTransformation().setUidHash(uidHash); } return this; }