private void mapSinkOption( DescriptorProperties descriptorProperties, Map<SinkOption, String> options, String fromKey, SinkOption toKey) { descriptorProperties.getOptionalString(fromKey).ifPresent(v -> options.put(toKey, v)); } }
public boolean isChangelogMode() { return properties.getOptionalString(EXECUTION_RESULT_MODE) .map((v) -> v.equals(EXECUTION_RESULT_MODE_VALUE_CHANGELOG)) .orElse(false); }
public boolean isStreamingExecution() { return properties.getOptionalString(EXECUTION_TYPE) .map((v) -> v.equals(EXECUTION_TYPE_VALUE_STREAMING)) .orElse(false); }
public boolean isBatchExecution() { return properties.getOptionalString(EXECUTION_TYPE) .map((v) -> v.equals(EXECUTION_TYPE_VALUE_BATCH)) .orElse(false); }
public boolean isTableMode() { return properties.getOptionalString(EXECUTION_RESULT_MODE) .map((v) -> v.equals(EXECUTION_RESULT_MODE_VALUE_TABLE)) .orElse(false); }
/** * Returns a string value under the given existing key. */ public String getString(String key) { return getOptionalString(key).orElseThrow(exceptionSupplier(key)); }
public String getGatewayAddress() { return properties.getOptionalString(DEPLOYMENT_GATEWAY_ADDRESS) .orElseGet(() -> useDefaultValue(DEPLOYMENT_GATEWAY_ADDRESS, "")); }
public TimeCharacteristic getTimeCharacteristic() { return properties.getOptionalString(EXECUTION_TIME_CHARACTERISTIC) .flatMap((v) -> { switch (v) { case EXECUTION_TIME_CHARACTERISTIC_VALUE_EVENT_TIME: return Optional.of(TimeCharacteristic.EventTime); case EXECUTION_TIME_CHARACTERISTIC_VALUE_PROCESSING_TIME: return Optional.of(TimeCharacteristic.ProcessingTime); default: return Optional.empty(); } }) .orElseGet(() -> useDefaultValue( EXECUTION_TIME_CHARACTERISTIC, TimeCharacteristic.EventTime, EXECUTION_TIME_CHARACTERISTIC_VALUE_EVENT_TIME)); }
@SuppressWarnings("unchecked") private Optional<FlinkKafkaPartitioner<Row>> getFlinkKafkaPartitioner(DescriptorProperties descriptorProperties) { return descriptorProperties .getOptionalString(CONNECTOR_SINK_PARTITIONER) .flatMap((String partitionerString) -> { switch (partitionerString) { case CONNECTOR_SINK_PARTITIONER_VALUE_FIXED: return Optional.of(new FlinkFixedPartitioner<>()); case CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN: return Optional.empty(); case CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM: final Class<? extends FlinkKafkaPartitioner> partitionerClass = descriptorProperties.getClass(CONNECTOR_SINK_PARTITIONER_CLASS, FlinkKafkaPartitioner.class); return Optional.of((FlinkKafkaPartitioner<Row>) InstantiationUtil.instantiate(partitionerClass)); default: throw new TableException("Unsupported sink partitioner. Validator should have checked that."); } }); }
final Map<KafkaTopicPartition, Long> specificOffsets = new HashMap<>(); final StartupMode startupMode = descriptorProperties .getOptionalString(CONNECTOR_STARTUP_MODE) .map(modeString -> { switch (modeString) {
mapSinkOption(descriptorProperties, options, CONNECTOR_BULK_FLUSH_INTERVAL, SinkOption.BULK_FLUSH_INTERVAL); descriptorProperties.getOptionalString(CONNECTOR_BULK_FLUSH_BACKOFF_TYPE) .ifPresent(v -> { options.put(
private ActionRequestFailureHandler getFailureHandler(DescriptorProperties descriptorProperties) { final String failureHandler = descriptorProperties .getOptionalString(CONNECTOR_FAILURE_HANDLER) .orElse(DEFAULT_FAILURE_HANDLER); switch (failureHandler) { case CONNECTOR_FAILURE_HANDLER_VALUE_FAIL: return new NoOpFailureHandler(); case CONNECTOR_FAILURE_HANDLER_VALUE_IGNORE: return new IgnoringFailureHandler(); case CONNECTOR_FAILURE_HANDLER_VALUE_RETRY: return new RetryRejectedExecutionFailureHandler(); case CONNECTOR_FAILURE_HANDLER_VALUE_CUSTOM: final Class<? extends ActionRequestFailureHandler> clazz = descriptorProperties .getClass(CONNECTOR_FAILURE_HANDLER_CLASS, ActionRequestFailureHandler.class); return InstantiationUtil.instantiate(clazz); default: throw new IllegalArgumentException("Unknown failure handler."); } }
@Override public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); return createElasticsearchUpsertTableSink( descriptorProperties.isValue(UPDATE_MODE(), UPDATE_MODE_VALUE_APPEND()), descriptorProperties.getTableSchema(SCHEMA()), getHosts(descriptorProperties), descriptorProperties.getString(CONNECTOR_INDEX), descriptorProperties.getString(CONNECTOR_DOCUMENT_TYPE), descriptorProperties.getOptionalString(CONNECTOR_KEY_DELIMITER).orElse(DEFAULT_KEY_DELIMITER), descriptorProperties.getOptionalString(CONNECTOR_KEY_NULL_LITERAL).orElse(DEFAULT_KEY_NULL_LITERAL), getSerializationSchema(properties), SUPPORTED_CONTENT_TYPE, getFailureHandler(descriptorProperties), getSinkOptions(descriptorProperties)); }
.getOptionalString(SCHEMA + '.' + i + '.' + SCHEMA_FROM) .orElse(fieldName); builder.field(aliasName, fieldType);
@Override public AppendStreamTableSink<Row> getAppendStreamTableSink(ExternalCatalogTable table) throws IOException { DescriptorProperties params = new DescriptorProperties(true); table.addProperties(params); String topic = params.getString(TOPIC_NAME_KEY); Properties conf = new Properties(); conf.putAll(params.getPrefix(KAFKA_CONFIG_PREFIX)); String partitionerClass = params.getOptionalString(PARTITIONER_CLASS_NAME_KEY) .orElse(PARTITIONER_CLASS_NAME_DEFAULT); FlinkKafkaPartitioner<Row> partitioner; try { partitioner = KafkaUtils.instantiatePartitioner(partitionerClass); } catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) { throw new IOException(e); } return new Kafka09JsonTableSink(topic, conf, partitioner); }
private void mapSinkOption( DescriptorProperties descriptorProperties, Map<SinkOption, String> options, String fromKey, SinkOption toKey) { descriptorProperties.getOptionalString(fromKey).ifPresent(v -> options.put(toKey, v)); } }
private void mapSinkOption( DescriptorProperties descriptorProperties, Map<SinkOption, String> options, String fromKey, SinkOption toKey) { descriptorProperties.getOptionalString(fromKey).ifPresent(v -> options.put(toKey, v)); } }
/** * Returns a string value under the given existing key. */ public String getString(String key) { return getOptionalString(key).orElseThrow(exceptionSupplier(key)); }
/** * Returns a string value under the given existing key. */ public String getString(String key) { return getOptionalString(key).orElseThrow(exceptionSupplier(key)); }
@Override public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); final TableSchema schema = descriptorProperties.getTableSchema(SCHEMA()); return createElasticsearchUpsertTableSink( descriptorProperties.isValue(UPDATE_MODE(), UPDATE_MODE_VALUE_APPEND()), schema, getHosts(descriptorProperties), descriptorProperties.getString(CONNECTOR_INDEX), descriptorProperties.getString(CONNECTOR_DOCUMENT_TYPE), descriptorProperties.getOptionalString(CONNECTOR_KEY_DELIMITER).orElse(DEFAULT_KEY_DELIMITER), descriptorProperties.getOptionalString(CONNECTOR_KEY_NULL_LITERAL).orElse(DEFAULT_KEY_NULL_LITERAL), getSerializationSchema(properties), SUPPORTED_CONTENT_TYPE, getFailureHandler(descriptorProperties), getSinkOptions(descriptorProperties)); }