private Supplier<TableException> exceptionSupplier(String key) { return () -> { throw new TableException( "Property with key '" + key + "' could not be found. " + "This is a bug because the validation logic should have checked that before."); }; }
public static byte[] md5(String string) { try { return MessageDigest.getInstance("MD5").digest(string.getBytes(UTF_8)); } catch (NoSuchAlgorithmException e) { throw new TableException("Unsupported MD5 algorithm.", e); } }
throw new TableException( "Number of field names and field types must be equal.\n" + "Number of names is " + fieldNames.length + ", number of types is " + fieldTypes.length + ".\n" + throw new TableException( "Field names must be unique.\n" + "List of duplicate fields: " + duplicateNames.toString() + "\n" +
@SuppressWarnings("unchecked") private Optional<FlinkKafkaPartitioner<Row>> getFlinkKafkaPartitioner(DescriptorProperties descriptorProperties) { return descriptorProperties .getOptionalString(CONNECTOR_SINK_PARTITIONER) .flatMap((String partitionerString) -> { switch (partitionerString) { case CONNECTOR_SINK_PARTITIONER_VALUE_FIXED: return Optional.of(new FlinkFixedPartitioner<>()); case CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN: return Optional.empty(); case CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM: final Class<? extends FlinkKafkaPartitioner> partitionerClass = descriptorProperties.getClass(CONNECTOR_SINK_PARTITIONER_CLASS, FlinkKafkaPartitioner.class); return Optional.of((FlinkKafkaPartitioner<Row>) InstantiationUtil.instantiate(partitionerClass)); default: throw new TableException("Unsupported sink partitioner. Validator should have checked that."); } }); }
return StartupMode.SPECIFIC_OFFSETS; default: throw new TableException("Unsupported startup mode. Validator should have checked that.");
@Override public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); final TableSchema schema = descriptorProperties.getTableSchema(SCHEMA()); final String topic = descriptorProperties.getString(CONNECTOR_TOPIC); final Optional<String> proctime = SchemaValidator.deriveProctimeAttribute(descriptorProperties); final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors = SchemaValidator.deriveRowtimeAttributes(descriptorProperties); // see also FLINK-9870 if (proctime.isPresent() || !rowtimeAttributeDescriptors.isEmpty() || checkForCustomFieldMapping(descriptorProperties, schema)) { throw new TableException("Time attributes and custom field mappings are not supported yet."); } return createKafkaTableSink( schema, topic, getKafkaProperties(descriptorProperties), getFlinkKafkaPartitioner(descriptorProperties), getSerializationSchema(properties)); }
throw new TableException( "A string representation for custom POJO types is not supported yet.");
private Supplier<TableException> exceptionSupplier(String key) { return () -> { throw new TableException( "Property with key '" + key + "' could not be found. " + "This is a bug because the validation logic should have checked that before."); }; }
private Supplier<TableException> exceptionSupplier(String key) { return () -> { throw new TableException( "Property with key '" + key + "' could not be found. " + "This is a bug because the validation logic should have checked that before."); }; }
@Override public void visit(BatchExecSink<?> sink) { throw new TableException("could not reach sink here."); }
public static byte[] md5(String string) { try { return MessageDigest.getInstance("MD5").digest(string.getBytes(UTF_8)); } catch (NoSuchAlgorithmException e) { throw new TableException("Unsupported MD5 algorithm.", e); } }
public static byte[] md5(String string) { try { return MessageDigest.getInstance("MD5").digest(string.getBytes(UTF_8)); } catch (NoSuchAlgorithmException e) { throw new TableException("Unsupported MD5 algorithm.", e); } }
private static Column[] validate(String[] names, InternalType[] types, boolean[] nulls) { if (names.length != types.length) { throw new TableException( "Number of column indexes and column names must be equal.\n" + "Column names count is [" + names.length + "]\n" + "Column types count is [" + types.length + "]\n" + "Column names: " + Arrays.toString(names) + "\n" + "Column types: " + Arrays.toString(types) ); } if (names.length != nulls.length) { throw new TableException( "Number of column names and nullabilities must be equal.\n" + "Column names count is: " + names.length + "\n" + "Column nullabilities count is: " + nulls.length + "\n" + "List of all field names: " + Arrays.toString(names) + "\n" + "List of all field nullabilities: " + Arrays.toString(nulls) ); } List<Column> columns = new ArrayList<>(); for (int i = 0; i < names.length; i++) { columns.add(new Column(names[i], types[i], nulls[i])); } return columns.toArray(new Column[columns.size()]); }
throw new TableException( "Table column names must be unique.\n" + "The duplicate columns are: " + duplicateNames.toString() + "\n" + throw new TableException( "Primary key field: " + primaryKeys[i] + " not found in table schema." ); throw new TableException("Unique key should not be empty."); throw new TableException( "Unique key field: " + uniqueKey[j] + " not found in table schema." );
throw new TableException( "Number of field names and field types must be equal.\n" + "Number of names is " + fieldNames.length + ", number of types is " + fieldTypes.length + ".\n" + throw new TableException( "Field names must be unique.\n" + "List of duplicate fields: " + duplicateNames.toString() + "\n" +
private int calculate(ExecNode<?, ?> execNode) { if (calculatedResultMap.containsKey(execNode)) { return calculatedResultMap.get(execNode); } int result; if (execNode instanceof BatchExecScan) { result = calculateSource((BatchExecScan) execNode); } else if (execNode.getInputNodes().size() == 1) { result = calculateSingleNode(execNode); } else if (execNode.getInputNodes().size() == 2) { result = calculateBiNode(execNode); } else { throw new TableException("could not reach here. " + execNode.getClass()); } calculatedResultMap.put(execNode, result); return result; }
@SuppressWarnings("unchecked") private Optional<FlinkKafkaPartitioner<Row>> getFlinkKafkaPartitioner(DescriptorProperties descriptorProperties) { return descriptorProperties .getOptionalString(CONNECTOR_SINK_PARTITIONER) .flatMap((String partitionerString) -> { switch (partitionerString) { case CONNECTOR_SINK_PARTITIONER_VALUE_FIXED: return Optional.of(new FlinkFixedPartitioner<>()); case CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN: return Optional.empty(); case CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM: final Class<? extends FlinkKafkaPartitioner> partitionerClass = descriptorProperties.getClass(CONNECTOR_SINK_PARTITIONER_CLASS, FlinkKafkaPartitioner.class); return Optional.of(InstantiationUtil.instantiate(partitionerClass)); default: throw new TableException("Unsupported sink partitioner. Validator should have checked that."); } }); }
@SuppressWarnings("unchecked") private Optional<FlinkKafkaPartitioner<Row>> getFlinkKafkaPartitioner(DescriptorProperties descriptorProperties) { return descriptorProperties .getOptionalString(CONNECTOR_SINK_PARTITIONER) .flatMap((String partitionerString) -> { switch (partitionerString) { case CONNECTOR_SINK_PARTITIONER_VALUE_FIXED: return Optional.of(new FlinkFixedPartitioner<>()); case CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN: return Optional.empty(); case CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM: final Class<? extends FlinkKafkaPartitioner> partitionerClass = descriptorProperties.getClass(CONNECTOR_SINK_PARTITIONER_CLASS, FlinkKafkaPartitioner.class); return Optional.of(InstantiationUtil.instantiate(partitionerClass)); default: throw new TableException("Unsupported sink partitioner. Validator should have checked that."); } }); }
return StartupMode.SPECIFIC_OFFSETS; default: throw new TableException("Unsupported startup mode. Validator should have checked that.");
return StartupMode.SPECIFIC_OFFSETS; default: throw new TableException("Unsupported startup mode. Validator should have checked that.");