@Override public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> buildDatasourceScanRuntime( MetadataProvider metadataProvider, IDataSource<DataSourceId> dataSource, List<LogicalVariable> scanVariables, List<LogicalVariable> projectVariables, boolean projectPushed, List<LogicalVariable> minFilterVars, List<LogicalVariable> maxFilterVars, ITupleFilterFactory tupleFilterFactory, long outputLimit, IOperatorSchema opSchema, IVariableTypeEnvironment typeEnv, JobGenContext context, JobSpecification jobSpec, Object implConfig) throws AlgebricksException { if (tupleFilterFactory != null || outputLimit >= 0) { throw CompilationException.create(ErrorCode.COMPILATION_ILLEGAL_STATE, "tuple filter and limit are not supported by LoadableDataSource"); } LoadableDataSource alds = (LoadableDataSource) dataSource; ARecordType itemType = (ARecordType) alds.getLoadedType(); IAdapterFactory adapterFactory = metadataProvider.getConfiguredAdapterFactory(alds.getTargetDataset(), alds.getAdapter(), alds.getAdapterProperties(), itemType, null); RecordDescriptor rDesc = JobGenHelper.mkRecordDescriptor(typeEnv, opSchema, context); return metadataProvider.buildLoadableDatasetScan(jobSpec, adapterFactory, rDesc); }
public LoadableDataSource(Dataset targetDataset, IAType itemType, IAType metaItemType, String adapter, Map<String, String> properties) throws AlgebricksException, IOException { super(new DataSourceId("loadable_dv", "loadable_ds"), itemType, metaItemType, Type.LOADABLE, null); this.targetDataset = targetDataset; this.adapter = adapter; this.adapterProperties = properties; partitioningKeys = targetDataset.getPrimaryKeys(); ARecordType recType = (ARecordType) itemType; isPKAutoGenerated = ((InternalDatasetDetails) targetDataset.getDatasetDetails()).isAutogenerated(); if (isPKAutoGenerated) { // Since the key is auto-generated, we need to use another // record type (possibly nested) which has all fields except the PK recType = getStrippedPKType(new LinkedList<>(partitioningKeys.get(0)), recType); } schemaTypes = new IAType[] { recType }; }
lds = new LoadableDataSource(dataset, itemType, metaItemType, clffs.getAdapter(), clffs.getProperties()); } catch (IOException e) { throw new CompilationException(ErrorCode.COMPILATION_ERROR, sourceLoc, e.toString(), e);
private ARecordType getStrippedPKType(List<String> partitioningKeys, ARecordType recType) throws AlgebricksException, HyracksDataException { List<String> fieldNames = new LinkedList<>(); List<IAType> fieldTypes = new LinkedList<>(); int j = 0; for (int i = 0; i < recType.getFieldNames().length; i++) { IAType fieldType; if (partitioningKeys.get(0).equals(recType.getFieldNames()[j])) { if (recType.getFieldTypes()[j].getTypeTag() == ATypeTag.OBJECT) { if (j != 0) { throw new AsterixException("Autogenerated key " + StringUtils.join(partitioningKeys, '.') + " should be a first field of the type " + recType.getTypeName()); } partitioningKeys.remove(0); fieldType = getStrippedPKType(partitioningKeys, (ARecordType) recType.getFieldTypes()[j]); } else { j++; continue; } } else { fieldType = recType.getFieldTypes()[j]; } fieldTypes.add(fieldType); fieldNames.add(recType.getFieldNames()[j]); j++; } return new ARecordType(recType.getTypeName(), fieldNames.toArray(new String[0]), fieldTypes.toArray(new IAType[0]), recType.isOpen()); }