if (physicalTableDictionary.containsKey(currentTableName)) { return physicalTableDictionary.get(currentTableName); .forEach( tableName -> { if (!physicalTableDictionary.containsKey(tableName)) { physicalTableDictionary.put( tableName.asName(), buildPhysicalTableWithDependency( getDataSourceMetadataService() ); physicalTableDictionary.put(currentTableName, currentTableBuilt);
@Override public void run() { physicalTableDictionary.values().stream() .map(PhysicalTable::getDataSourceNames) .flatMap(Set::stream) .distinct() .peek(dataSourceName -> LOG.trace("Querying metadata for datasource: {}", dataSourceName)) .forEach(this::queryDataSourceMetadata); lastRunTimestamp.set(DateTime.now()); }
UriInfo uriInfo ) throws BadApiRequestException { if (tableDictionary.isEmpty()) { String msg = EMPTY_DICTIONARY.logFormat("Slices cannot be found. Physical Table"); throw new BadApiRequestException(msg); LinkedHashSet<Map<String, String>> generated = tableDictionary.entrySet().stream() .map( e -> {
/** * Transforms dependent table names to metrics. * * @param tableNames The names of the tables to be mapped * @param physicalTableDictionary The physical table dictionary to resolve the names * * @return set of PhysicalTables from the ResourceDictionaries */ private static Set<ConfigPhysicalTable> mapNamestoTables( Collection<String> tableNames, PhysicalTableDictionary physicalTableDictionary ) { Set<String> missingTableNames = tableNames.stream() .filter(it -> ! physicalTableDictionary.containsKey(it)) .collect(Collectors.toSet()); if (!missingTableNames.isEmpty()) { String message = String.format(MISSING_DEPENDANT_TABLE_FORMAT, missingTableNames); throw new IllegalArgumentException(message); } return tableNames.stream() .map(it -> physicalTableDictionary.get(it)) .collect(Collectors.toSet()); }
@Override public ConfigPhysicalTable build(ResourceDictionaries dictionaries, DataSourceMetadataService metadataService) { Map<ConfigPhysicalTable, DataSourceFilter> availabilityFilters = tablePartDefinitions.entrySet().stream() .collect(Collectors.toMap( entry -> dictionaries.getPhysicalDictionary().get(entry.getKey().asName()), entry -> new DimensionIdFilter(toDimensionValuesMap( entry.getValue(), dictionaries.getDimensionDictionary() )) )); return new BaseCompositePhysicalTable( getName(), getTimeGrain(), buildColumns(dictionaries.getDimensionDictionary()), availabilityFilters.keySet(), getLogicalToPhysicalNames(), PartitionAvailability.build(availabilityFilters) ); }
/** * Constructor. */ public ResourceDictionaries() { physical = new PhysicalTableDictionary(); logical = new LogicalTableDictionary(); metric = new MetricDictionary(); dimension = new DimensionDictionary(); }
@Override protected VolatileIntervalsService getVolatileIntervalsService() { PhysicalTableDictionary physicalTableDictionary = getConfigurationLoader().getPhysicalTableDictionary(); Map<PhysicalTable, VolatileIntervalsFunction> hourlyMonthlyVolatileIntervals = new LinkedHashMap<>(); if (physicalTableDictionary.containsKey(HOURLY.asName())) { hourlyMonthlyVolatileIntervals.put( getConfigurationLoader().getPhysicalTableDictionary().get(HOURLY.asName()), () -> new SimplifiedIntervalList( Collections.singleton( new Interval(new DateTime(2016, 8, 15, 0, 0), new DateTime(2016, 8, 16, 0, 0)) ) ) ); } if (physicalTableDictionary.containsKey(MONTHLY.asName())) { hourlyMonthlyVolatileIntervals.put( getConfigurationLoader().getPhysicalTableDictionary().get(MONTHLY.asName()), () -> new SimplifiedIntervalList( Collections.singleton( new Interval(new DateTime(2016, 8, 1, 0, 0), new DateTime(2016, 9, 1, 0, 0)) ) ) ); } return new DefaultingVolatileIntervalsService( NoVolatileIntervalsFunction.INSTANCE, hourlyMonthlyVolatileIntervals ); }
@Override public boolean handleRequest( RequestContext context, DataApiRequest request, DruidAggregationQuery<?> druidQuery, ResponseProcessor response ) { if (!(response instanceof MappingResponseProcessor)) { throw new IllegalStateException("Volatile data request handler requires a mapping response."); } MappingResponseProcessor mappingResponse = (MappingResponseProcessor) response; // Gather the volatile intervals. A volatile interval in one data source make that interval volatile overall. SimplifiedIntervalList volatileIntervals = volatileIntervalsService.getVolatileIntervals( druidQuery.getGranularity(), druidQuery.getIntervals(), physicalTableDictionary.get( druidQuery .getInnermostQuery() .getDataSource() .getPhysicalTable() .getName() ) ); if (!volatileIntervals.isEmpty()) { ResponseContext responseContext = response.getResponseContext(); responseContext.put(VOLATILE_INTERVALS_CONTEXT_KEY.getName(), volatileIntervals); } return next.handleRequest(context, request, druidQuery, mappingResponse); }
@Override protected VolatileIntervalsService getVolatileIntervalsService() { PhysicalTableDictionary physicalTableDictionary = getConfigurationLoader().getPhysicalTableDictionary(); Map<PhysicalTable, VolatileIntervalsFunction> hourlyMonthlyVolatileIntervals = new LinkedHashMap<>(); if (physicalTableDictionary.containsKey(HOURLY.asName())) { hourlyMonthlyVolatileIntervals.put( getConfigurationLoader().getPhysicalTableDictionary().get(HOURLY.asName()), () -> new SimplifiedIntervalList( Collections.singleton( new Interval(new DateTime(2016, 8, 15, 0, 0), new DateTime(2016, 8, 16, 0, 0)) ) ) ); } if (physicalTableDictionary.containsKey(MONTHLY.asName())) { hourlyMonthlyVolatileIntervals.put( getConfigurationLoader().getPhysicalTableDictionary().get(MONTHLY.asName()), () -> new SimplifiedIntervalList( Collections.singleton( new Interval(new DateTime(2016, 8, 1, 0, 0), new DateTime(2016, 9, 1, 0, 0)) ) ) ); } return new DefaultingVolatileIntervalsService( NoVolatileIntervalsFunction.INSTANCE, hourlyMonthlyVolatileIntervals ); }
/** * DruidDimensionRowProvider fetches data from Druid and adds it to the dimension cache. * The dimensions to be loaded can be passed in as a parameter. * * @param physicalTableDictionary The physical tables * @param dimensionDictionary The dimension dictionary to load dimensions from. * @param dimensionsToLoad The dimensions to use. * @param druidWebService The druid webservice to query. */ public DruidDimensionValueLoader( PhysicalTableDictionary physicalTableDictionary, DimensionDictionary dimensionDictionary, List<String> dimensionsToLoad, DruidWebService druidWebService ) { this.dimensions = dimensionsToLoad.stream() .map(dimensionDictionary::findByApiName) .collect(Collectors.toCollection(LinkedHashSet::new)); this.dataSources = physicalTableDictionary.values().stream() .map(table -> table.withConstraint(DataSourceConstraint.unconstrained(table))) .map(TableDataSource::new) .collect(Collectors.toCollection(LinkedHashSet::new)); this.druidWebService = druidWebService; }
/** * SqlDimensionValueLoader fetches data from Sql and adds it to the dimension cache. * The dimensions to be loaded can be passed in as a parameter. * * @param physicalTableDictionary The physical tables * @param dimensionDictionary The dimension dictionary to load dimensions from. * @param dimensionsToLoad The dimensions to be loaded. * @param sqlBackedClient The sql backed client. */ public SqlDimensionValueLoader( PhysicalTableDictionary physicalTableDictionary, DimensionDictionary dimensionDictionary, List<String> dimensionsToLoad, SqlBackedClient sqlBackedClient ) { this.dimensions = dimensionsToLoad.stream() .map(dimensionDictionary::findByApiName) .collect(Collectors.toCollection(LinkedHashSet::new)); this.dataSources = physicalTableDictionary.values().stream() .map(table -> table.withConstraint(DataSourceConstraint.unconstrained(table))) .map(TableDataSource::new) .collect(Collectors.toCollection(LinkedHashSet::new)); this.sqlBackedClient = sqlBackedClient; }