/** * Configures a failure handling strategy in case a request to Elasticsearch fails. * * <p>This strategy ignores failures and drops the request. */ public Elasticsearch failureHandlerIgnore() { internalProperties.putString(CONNECTOR_FAILURE_HANDLER, ElasticsearchValidator.CONNECTOR_FAILURE_HANDLER_VALUE_IGNORE); return this; }
/** * Configures how to buffer elements before sending them in bulk to the cluster for efficiency. * * <p>Sets a constant backoff type to use when flushing bulk requests. */ public Elasticsearch bulkFlushBackoffConstant() { internalProperties.putString( CONNECTOR_BULK_FLUSH_BACKOFF_TYPE, ElasticsearchValidator.CONNECTOR_BULK_FLUSH_BACKOFF_TYPE_VALUE_CONSTANT); return this; }
/** * Sets connection properties to be used during REST communication to Elasticsearch. * * <p>Adds a path prefix to every REST communication. * * @param pathPrefix prefix string to be added to every REST communication */ public Elasticsearch connectionPathPrefix(String pathPrefix) { internalProperties.putString(CONNECTOR_CONNECTION_PATH_PREFIX, pathPrefix); return this; }
/** * Sets the Elasticsearch version to be used. Required. * * @param version Elasticsearch version. E.g., "6". */ public Elasticsearch version(String version) { internalProperties.putString(CONNECTOR_VERSION, version); return this; }
/** * Declares the Elasticsearch index for every record. Required. * * @param index Elasticsearch index */ public Elasticsearch index(String index) { internalProperties.putString(CONNECTOR_INDEX, index); return this; }
/** * Sets a custom representation for null fields in keys. Optional. * * @param keyNullLiteral key null literal string; e.g. "N/A" would result in IDs "KEY1_N/A_KEY3" */ public Elasticsearch keyNullLiteral(String keyNullLiteral) { internalProperties.putString(CONNECTOR_KEY_NULL_LITERAL, keyNullLiteral); return this; }
/** * Declares the Elasticsearch document type for every record. Required. * * @param documentType Elasticsearch document type */ public Elasticsearch documentType(String documentType) { internalProperties.putString(CONNECTOR_DOCUMENT_TYPE, documentType); return this; }
/** * Sets a custom key delimiter in case the Elasticsearch ID needs to be constructed from * multiple fields. Optional. * * @param keyDelimiter key delimiter; e.g., "$" would result in IDs "KEY1$KEY2$KEY3" */ public Elasticsearch keyDelimiter(String keyDelimiter) { internalProperties.putString(CONNECTOR_KEY_DELIMITER, keyDelimiter); return this; }
/** * Configures a failure handling strategy in case a request to Elasticsearch fails. * * <p>This strategy throws an exception if a request fails and thus causes a job failure. */ public Elasticsearch failureHandlerFail() { internalProperties.putString(CONNECTOR_FAILURE_HANDLER, ElasticsearchValidator.CONNECTOR_FAILURE_HANDLER_VALUE_FAIL); return this; }
/** * Configures a failure handling strategy in case a request to Elasticsearch fails. * * <p>This strategy re-adds requests that have failed due to queue capacity saturation. */ public Elasticsearch failureHandlerRetryRejected() { internalProperties.putString(CONNECTOR_FAILURE_HANDLER, ElasticsearchValidator.CONNECTOR_FAILURE_HANDLER_VALUE_RETRY); return this; }
/** * Configures how to buffer elements before sending them in bulk to the cluster for efficiency. * * <p>Sets an exponential backoff type to use when flushing bulk requests. */ public Elasticsearch bulkFlushBackoffExponential() { internalProperties.putString( CONNECTOR_BULK_FLUSH_BACKOFF_TYPE, ElasticsearchValidator.CONNECTOR_BULK_FLUSH_BACKOFF_TYPE_VALUE_EXPONENTIAL); return this; }
/** * Configures a failure handling strategy in case a request to Elasticsearch fails. * * <p>This strategy allows for custom failure handling using a {@link ActionRequestFailureHandler}. */ public Elasticsearch failureHandlerCustom(Class<? extends ActionRequestFailureHandler> failureHandlerClass) { internalProperties.putString(CONNECTOR_FAILURE_HANDLER, ElasticsearchValidator.CONNECTOR_FAILURE_HANDLER_VALUE_CUSTOM); internalProperties.putClass(CONNECTOR_FAILURE_HANDLER_CLASS, failureHandlerClass); return this; }
public static ViewEntry create(String name, String query) { final DescriptorProperties properties = new DescriptorProperties(true); properties.putString(TABLES_QUERY, query); return new ViewEntry(name, properties); } }
properties.putString(CONNECTOR_VERSION, version); properties.putString(CONNECTOR_TOPIC, topic); properties.putString(CONNECTOR_STARTUP_MODE, KafkaValidator.normalizeStartupMode(startupMode)); properties.putString(CONNECTOR_SINK_PARTITIONER, sinkPartitionerType); if (sinkPartitionerClass != null) { properties.putClass(CONNECTOR_SINK_PARTITIONER_CLASS, sinkPartitionerClass);
@Override protected Map<String, String> toFormatProperties() { final DescriptorProperties properties = new DescriptorProperties(); if (deriveSchema != null) { properties.putBoolean(FORMAT_DERIVE_SCHEMA, deriveSchema); } if (jsonSchema != null) { properties.putString(FORMAT_JSON_SCHEMA, jsonSchema); } if (schema != null) { properties.putString(FORMAT_SCHEMA, schema); } if (failOnMissingField != null) { properties.putBoolean(FORMAT_FAIL_ON_MISSING_FIELD, failOnMissingField); } return properties.asMap(); } }
@Override protected Map<String, String> toFormatProperties() { final DescriptorProperties properties = new DescriptorProperties(); if (null != recordClass) { properties.putClass(AvroValidator.FORMAT_RECORD_CLASS, recordClass); } if (null != avroSchema) { properties.putString(AvroValidator.FORMAT_AVRO_SCHEMA, avroSchema); } return properties.asMap(); } }
@Override public final Map<String, String> toProperties() { final DescriptorProperties properties = new DescriptorProperties(); properties.putString(FormatDescriptorValidator.FORMAT_TYPE, type); properties.putInt(FormatDescriptorValidator.FORMAT_PROPERTY_VERSION, version); properties.putProperties(toFormatProperties()); return properties.asMap(); }
@Override public final Map<String, String> toProperties() { final DescriptorProperties properties = new DescriptorProperties(); properties.putString(CONNECTOR_TYPE, type); properties.putLong(CONNECTOR_PROPERTY_VERSION, version); properties.putProperties(toConnectorProperties()); return properties.asMap(); }
protected void addPropertyAndVerify(Descriptor descriptor, String property, String newValue) { final DescriptorProperties properties = new DescriptorProperties(); properties.putProperties(descriptor.toProperties()); final DescriptorProperties copy = properties.withoutKeys(Collections.singletonList(property)); copy.putString(property, newValue); validator().validate(copy); }
@Override public void addConnectorProperties(DescriptorProperties properties) { properties.putTableSchema(TABLE_SCHEMA_CONNECTOR_PROPERTY, tableSchema); properties.putString(TABLE_DATA_CONNECTOR_PROPERTY, serializeRows()); } };