@Override public final Map<String, String> toProperties() { final DescriptorProperties properties = new DescriptorProperties(); properties.putString(FormatDescriptorValidator.FORMAT_TYPE, type); properties.putInt(FormatDescriptorValidator.FORMAT_PROPERTY_VERSION, version); properties.putProperties(toFormatProperties()); return properties.asMap(); }
private TypeInformation<Row> createTypeInformation(DescriptorProperties descriptorProperties) { if (descriptorProperties.containsKey(JsonValidator.FORMAT_SCHEMA)) { return (RowTypeInfo) descriptorProperties.getType(JsonValidator.FORMAT_SCHEMA); } else if (descriptorProperties.containsKey(JsonValidator.FORMAT_JSON_SCHEMA)) { return JsonRowSchemaConverter.convert(descriptorProperties.getString(JsonValidator.FORMAT_JSON_SCHEMA)); } else { return deriveSchema(descriptorProperties.asMap()).toRowType(); } }
private void validateHosts(DescriptorProperties properties) { final Map<String, Consumer<String>> hostsValidators = new HashMap<>(); hostsValidators.put(CONNECTOR_HOSTS_HOSTNAME, (key) -> properties.validateString(key, false, 1)); hostsValidators.put(CONNECTOR_HOSTS_PORT, (key) -> properties.validateInt(key, false, 0, 65535)); hostsValidators.put(CONNECTOR_HOSTS_PROTOCOL, (key) -> properties.validateString(key, false, 1)); properties.validateFixedIndexedProperties(CONNECTOR_HOSTS, false, hostsValidators); }
@Override protected void validate(DescriptorProperties properties) { properties.validateString(TABLES_HISTORY_TABLE, false, 1); properties.validateArray( TABLES_PRIMARY_KEY, (key) -> properties.validateString(key, false, 1), 1, 1); // currently, composite primary keys are not supported properties.validateString(TABLES_TIME_ATTRIBUTE, false, 1); } }
@Override protected void validate(DescriptorProperties properties) { properties.validateLong(DEPLOYMENT_RESPONSE_TIMEOUT, true, 0); properties.validateString(DEPLOYMENT_GATEWAY_ADDRESS, true, 0); properties.validateInt(DEPLOYMENT_GATEWAY_PORT, true, 0, 65535); }
private void validateConnectionProperties(DescriptorProperties properties) { properties.validateInt(CONNECTOR_CONNECTION_MAX_RETRY_TIMEOUT, true, 1); properties.validateString(CONNECTOR_CONNECTION_PATH_PREFIX, true); } }
/** * Normalizes key-value properties from Yaml in the normalized format of the Table API. */ public static DescriptorProperties normalizeYaml(Map<String, Object> yamlMap) { final Map<String, String> normalized = new HashMap<>(); yamlMap.forEach((k, v) -> normalizeYamlObject(normalized, k, v)); final DescriptorProperties properties = new DescriptorProperties(true); properties.putProperties(normalized); return properties; }
final DescriptorProperties descriptorProperties = new DescriptorProperties(); descriptorProperties.putProperties(properties); final TableSchema baseSchema = descriptorProperties.getTableSchema(SCHEMA); for (int i = 0; i < baseSchema.getFieldCount(); i++) { final String fieldName = baseSchema.getFieldNames()[i]; .getOptionalBoolean(SCHEMA + '.' + i + '.' + SCHEMA_PROCTIME) .orElse(false); final String timestampKey = SCHEMA + '.' + i + '.' + ROWTIME_TIMESTAMPS_TYPE; final boolean isRowtime = descriptorProperties.containsKey(timestampKey); if (!isProctime && !isRowtime) { .getOptionalString(SCHEMA + '.' + i + '.' + SCHEMA_FROM) .orElse(fieldName); builder.field(aliasName, fieldType); descriptorProperties.isValue(timestampKey, ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD)) { final String aliasName = descriptorProperties .getString(SCHEMA + '.' + i + '.' + ROWTIME_TIMESTAMPS_FROM); builder.field(aliasName, fieldType);
@Override public final Map<String, String> toProperties() { final DescriptorProperties properties = new DescriptorProperties(); properties.putString(CONNECTOR_TYPE, type); properties.putLong(CONNECTOR_PROPERTY_VERSION, version); properties.putProperties(toConnectorProperties()); return properties.asMap(); }
@Override protected Map<String, String> toFormatProperties() { final DescriptorProperties properties = new DescriptorProperties(); if (null != recordClass) { properties.putClass(AvroValidator.FORMAT_RECORD_CLASS, recordClass); } if (null != avroSchema) { properties.putString(AvroValidator.FORMAT_AVRO_SCHEMA, avroSchema); } return properties.asMap(); } }
@Override public StreamTableSource<Row> create(Map<String, String> properties) { DescriptorProperties params = new DescriptorProperties(true); params.putProperties(properties); TableSchema schema = params.getTableSchema(TOPIC_SCHEMA_KEY); String topic = params.getString(TOPIC_NAME_KEY); Properties conf = new Properties(); conf.putAll(params.getPrefix(KAFKA_CONFIG_PREFIX)); return new JsonTableSource(topic, conf, schema); } }
@Override protected Map<String, String> toFormatProperties() { final DescriptorProperties properties = new DescriptorProperties(); if (deriveSchema != null) { properties.putBoolean(FORMAT_DERIVE_SCHEMA, deriveSchema); } if (jsonSchema != null) { properties.putString(FORMAT_JSON_SCHEMA, jsonSchema); } if (schema != null) { properties.putString(FORMAT_SCHEMA, schema); } if (failOnMissingField != null) { properties.putBoolean(FORMAT_FAIL_ON_MISSING_FIELD, failOnMissingField); } return properties.asMap(); } }
@Override protected Map<String, String> toConnectorProperties() { final DescriptorProperties properties = new DescriptorProperties(); properties.putProperties(internalProperties); final List<List<String>> hostValues = hosts.stream() .map(host -> Arrays.asList(host.hostname, String.valueOf(host.port), host.protocol)) .collect(Collectors.toList()); properties.putIndexedFixedProperties( CONNECTOR_HOSTS, Arrays.asList(CONNECTOR_HOSTS_HOSTNAME, CONNECTOR_HOSTS_PORT, CONNECTOR_HOSTS_PROTOCOL), hostValues); return properties.asMap(); } }
@Override public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); return createElasticsearchUpsertTableSink( descriptorProperties.isValue(UPDATE_MODE(), UPDATE_MODE_VALUE_APPEND()), descriptorProperties.getTableSchema(SCHEMA()), getHosts(descriptorProperties), descriptorProperties.getString(CONNECTOR_INDEX), descriptorProperties.getString(CONNECTOR_DOCUMENT_TYPE), descriptorProperties.getOptionalString(CONNECTOR_KEY_DELIMITER).orElse(DEFAULT_KEY_DELIMITER), descriptorProperties.getOptionalString(CONNECTOR_KEY_NULL_LITERAL).orElse(DEFAULT_KEY_NULL_LITERAL), getSerializationSchema(properties), SUPPORTED_CONTENT_TYPE, getFailureHandler(descriptorProperties), getSinkOptions(descriptorProperties)); }
@Override public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); final TableSchema schema = descriptorProperties.getTableSchema(SCHEMA()); final String topic = descriptorProperties.getString(CONNECTOR_TOPIC); final Optional<String> proctime = SchemaValidator.deriveProctimeAttribute(descriptorProperties); final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors = SchemaValidator.deriveRowtimeAttributes(descriptorProperties); // see also FLINK-9870 if (proctime.isPresent() || !rowtimeAttributeDescriptors.isEmpty() || checkForCustomFieldMapping(descriptorProperties, schema)) { throw new TableException("Time attributes and custom field mappings are not supported yet."); } return createKafkaTableSink( schema, topic, getKafkaProperties(descriptorProperties), getFlinkKafkaPartitioner(descriptorProperties), getSerializationSchema(properties)); }
protected void addPropertyAndVerify(Descriptor descriptor, String property, String newValue) { final DescriptorProperties properties = new DescriptorProperties(); properties.putProperties(descriptor.toProperties()); final DescriptorProperties copy = properties.withoutKeys(Collections.singletonList(property)); copy.putString(property, newValue); validator().validate(copy); }
private void validateStartupMode(DescriptorProperties properties) { final Map<String, Consumer<String>> specificOffsetValidators = new HashMap<>(); specificOffsetValidators.put( CONNECTOR_SPECIFIC_OFFSETS_PARTITION, (key) -> properties.validateInt( key, false, 0, Integer.MAX_VALUE)); specificOffsetValidators.put( CONNECTOR_SPECIFIC_OFFSETS_OFFSET, (key) -> properties.validateLong( key, false, 0, Long.MAX_VALUE)); final Map<String, Consumer<String>> startupModeValidation = new HashMap<>(); startupModeValidation.put(CONNECTOR_STARTUP_MODE_VALUE_GROUP_OFFSETS, noValidation()); startupModeValidation.put(CONNECTOR_STARTUP_MODE_VALUE_EARLIEST, noValidation()); startupModeValidation.put(CONNECTOR_STARTUP_MODE_VALUE_LATEST, noValidation()); startupModeValidation.put( CONNECTOR_STARTUP_MODE_VALUE_SPECIFIC_OFFSETS, key -> properties.validateFixedIndexedProperties(CONNECTOR_SPECIFIC_OFFSETS, false, specificOffsetValidators)); properties.validateEnum(CONNECTOR_STARTUP_MODE, true, startupModeValidation); }
private void validateBulkFlush(DescriptorProperties properties) { properties.validateBoolean(CONNECTOR_FLUSH_ON_CHECKPOINT, true); properties.validateInt(CONNECTOR_BULK_FLUSH_MAX_ACTIONS, true, 1); properties.validateMemorySize(CONNECTOR_BULK_FLUSH_MAX_SIZE, true, 1024 * 1024); // only allow MB precision properties.validateLong(CONNECTOR_BULK_FLUSH_INTERVAL, true, 0); properties.validateEnumValues(CONNECTOR_BULK_FLUSH_BACKOFF_TYPE, true, Arrays.asList( CONNECTOR_BULK_FLUSH_BACKOFF_TYPE_VALUE_DISABLED, CONNECTOR_BULK_FLUSH_BACKOFF_TYPE_VALUE_CONSTANT, CONNECTOR_BULK_FLUSH_BACKOFF_TYPE_VALUE_EXPONENTIAL)); properties.validateInt(CONNECTOR_BULK_FLUSH_BACKOFF_MAX_RETRIES, true, 1); properties.validateLong(CONNECTOR_BULK_FLUSH_BACKOFF_DELAY, true, 0); }
/** * Configures a failure handling strategy in case a request to Elasticsearch fails. * * <p>This strategy ignores failures and drops the request. */ public Elasticsearch failureHandlerIgnore() { internalProperties.putString(CONNECTOR_FAILURE_HANDLER, ElasticsearchValidator.CONNECTOR_FAILURE_HANDLER_VALUE_IGNORE); return this; }
public static ViewEntry create(String name, String query) { final DescriptorProperties properties = new DescriptorProperties(true); properties.putString(TABLES_QUERY, query); return new ViewEntry(name, properties); } }