public WriteSinkFunction(String path, WriteFormat<IN> format) { this.path = path; this.format = format; cleanFile(path); }
/** * Abort a transaction that was rejected by a coordinator after a failure. */ protected void recoverAndAbort(TXN transaction) { abort(transaction); }
@Override public void open(Configuration parameters) throws Exception { super.open(parameters); StreamingRuntimeContext context = (StreamingRuntimeContext) getRuntimeContext(); writer.open(context.getIndexOfThisSubtask(), context.getNumberOfParallelSubtasks()); }
@Override public void open(Configuration parameters) throws Exception { super.open(parameters); }
@Override public void close() throws Exception { super.close(); } }).setParallelism(1);
/** * Implementation of the invoke method of the SinkFunction class. Collects * the incoming tuples in tupleList and appends the list to the end of the * target file if updateCondition() is true or the current tuple is the * endTuple. */ @Override public void invoke(IN tuple) { tupleList.add(tuple); if (updateCondition()) { format.write(path, tupleList); resetParameters(); } }
/** * This method must be the only place to call {@link #beginTransaction()} to ensure that the * {@link TransactionHolder} is created at the same time. */ private TransactionHolder<TXN> beginTransactionInternal() throws Exception { return new TransactionHolder<>(beginTransaction(), clock.millis()); }
/** * Disables the propagation of exceptions thrown when committing presumably timed out Kafka * transactions during recovery of the job. If a Kafka transaction is timed out, a commit will * never be successful. Hence, use this feature to avoid recovery loops of the Job. Exceptions * will still be logged to inform the user that data loss might have occurred. * * <p>Note that we use {@link System#currentTimeMillis()} to track the age of a transaction. * Moreover, only exceptions thrown during the recovery are caught, i.e., the producer will * attempt at least one commit of the transaction before giving up.</p> */ @Override public FlinkKafkaProducer011<IN> ignoreFailuresAfterTransactionTimeout() { super.ignoreFailuresAfterTransactionTimeout(); return this; }
/** * Writes the given value to the sink. This function is called for every record. * * <p>You have to override this method when implementing a {@code SinkFunction}, this is a * {@code default} method for backward compatibility with the old-style method only. * * @param value The input record. * @param context Additional context about the input record. * * @throws Exception This method may throw exceptions. Throwing an exception will cause the operation * to fail and may trigger recovery. */ default void invoke(IN value, Context context) throws Exception { invoke(value); }
@Override public void run() { try { // need two messages here: send a fin to cancel the client state:FIN_WAIT_2 while the server is CLOSE_WAIT simpleSink.invoke(TEST_MESSAGE + '\n', SinkContextUtil.forTimestamp(0)); } catch (Throwable t) { error.set(t); } } };
@Override protected StateSerializer<TXN, CONTEXT> createOuterSerializerWithNestedSerializers(TypeSerializer<?>[] nestedSerializers) { @SuppressWarnings("unchecked") final TypeSerializer<TXN> transactionSerializer = (TypeSerializer<TXN>) nestedSerializers[0]; @SuppressWarnings("unchecked") final TypeSerializer<CONTEXT> contextSerializer = (TypeSerializer<CONTEXT>) nestedSerializers[1]; return new StateSerializer<>(transactionSerializer, contextSerializer); }
/** * Invoked on recovered transactions after a failure. User implementation must ensure that this call will eventually * succeed. If it fails, Flink application will be restarted and it will be invoked again. If it does not succeed * eventually, a data loss will occur. Transactions will be recovered in an order in which they were created. */ protected void recoverAndCommit(TXN transaction) { commit(transaction); }
@Override public final void invoke( IN value, Context context) throws Exception { invoke(currentTransactionHolder.handle, value, context); }
@Override public State<TXN, CONTEXT> deserialize( State<TXN, CONTEXT> reuse, DataInputView source) throws IOException { return deserialize(source); }
@Override public State<TXN, CONTEXT> copy( State<TXN, CONTEXT> from, State<TXN, CONTEXT> reuse) { return copy(from); }
@Override public StateSerializerSnapshot<TXN, CONTEXT> snapshotConfiguration() { return new StateSerializerSnapshot<>(this); } }
@SuppressWarnings("WeakerAccess") public StateSerializerSnapshot() { super(correspondingSerializerClass()); }
@Override public void close() throws Exception { super.close(); }
private static TypeSerializer<TwoPhaseCommitSinkFunction.State<Integer, String>> intStringStateSerializerSupplier() { return new TwoPhaseCommitSinkFunction.StateSerializer<>(IntSerializer.INSTANCE, StringSerializer.INSTANCE); } }