/** * Builds the druid query as sql and returns it as a string. * * @param druidQuery The query to convert to sql. * @param apiToFieldMapper The mapping between api and physical names for the query. * * @return the sql equivalent of the query. */ public String buildSqlQuery(DruidAggregationQuery<?> druidQuery, ApiToFieldMapper apiToFieldMapper) { SqlPhysicalTable sqlTable = (SqlPhysicalTable) druidQuery.getDataSource() .getPhysicalTable() .getSourceTable(); LOG.debug( "Querying table {} with schema {} using timestampColumn {}", sqlTable.getName(), sqlTable.getSchemaName(), sqlTable.getTimestampColumn() ); RelNode query = convertDruidQueryToRelNode(druidQuery, apiToFieldMapper, sqlTable); RelToSqlConverter relToSql = calciteHelper.getNewRelToSqlConverter(); SqlPrettyWriter sqlWriter = calciteHelper.getNewSqlWriter(); return writeSql(sqlWriter, relToSql, query); }
@Override public ConfigPhysicalTable build(ResourceDictionaries dictionaries, DataSourceMetadataService metadataService) { return new SqlPhysicalTable( getName(), getTimeGrain(), buildColumns(dictionaries.getDimensionDictionary()), getLogicalToPhysicalNames(), new EternalAvailability(DataSourceName.of(getName().asName()), metadataService), schemaName, timestampColumn ); }
SqlPhysicalTable sqlTable ) { RelBuilder builder = calciteHelper.getNewRelBuilder(sqlTable.getSchemaName()); return builder.scan(sqlTable.getName()) .filter( getAllWhereFilters(builder, druidQuery, apiToFieldMapper, sqlTable.getTimestampColumn()) druidQuery, apiToFieldMapper, sqlTable.getTimestampColumn() )), getAllQueryAggregations(builder, druidQuery, apiToFieldMapper) NO_OFFSET, getLimit(druidQuery), getSort(builder, druidQuery, apiToFieldMapper, sqlTable.getTimestampColumn())