public OLAPContext(int seq) { this.id = seq; this.storageContext = new StorageContext(seq); this.sortColumns = Lists.newArrayList(); this.sortOrders = Lists.newArrayList(); Map<String, String> parameters = _localPrarameters.get(); if (parameters != null) { String acceptPartialResult = parameters.get(PRM_ACCEPT_PARTIAL_RESULT); if (acceptPartialResult != null) { this.storageContext.setAcceptPartialResult(Boolean.parseBoolean(acceptPartialResult)); } String acceptUserInfo = parameters.get(PRM_USER_AUTHEN_INFO); if (null != acceptUserInfo) this.olapAuthen.parseUserInfo(acceptUserInfo); } }
Cuboid cuboid = ctx.storageContext.getCuboid(); if (cuboid != null) {
public void applyLimitPushDown(IRealization realization, StorageLimitLevel storageLimitLevel) { if (storageLimitLevel == StorageLimitLevel.NO_LIMIT) { return; } if (!realization.supportsLimitPushDown()) { logger.warn("Not enabling limit push down because cube storage type not supported"); return; } long temp = this.getOffset() + this.getLimit(); if (!isValidPushDownLimit(temp)) { logger.warn("Not enabling limit push down because current limit is invalid: " + this.getLimit()); return; } this.finalPushDownLimit = (int) temp; this.storageLimitLevel = storageLimitLevel; logger.info("Enabling limit push down: {} at level: {}", temp, storageLimitLevel); }
if (context.hasSort()) { storageLimitLevel = StorageLimitLevel.NO_LIMIT; logger.debug("storageLimitLevel set to NO_LIMIT because the query has order by"); context.applyLimitPushDown(cubeInstance, storageLimitLevel);
public GTCubeStorageQueryRequest getStorageQueryRequest(StorageContext context, SQLDigest sqlDigest, TupleInfo returnTupleInfo) { context.setStorageQuery(this); dimensionsD.addAll(otherDimsD); Cuboid cuboid = findCuboid(cubeInstance, dimensionsD, metrics); context.setCuboid(cuboid); context.setMapping(mapping); context.setNeedStorageAggregation(isNeedStorageAggregation(cuboid, groupsD, singleValuesD)); context.setExactAggregation(exactAggregation); groupsD.addAll(loosenedColumnD); TupleFilter.collectColumns(filterD, filterColumnD); context.setFilterMask(getQueryFilterMask(filterColumnD)); cubeInstance.getName(), cuboid.getId(), groupsD, filterColumnD, context.getFinalPushDownLimit(), context.getStorageLimitLevel(), context.isNeedStorageAggregation());
private void setLimit(TupleFilter filter, StorageContext context) { boolean goodAggr = context.isExactAggregation(); boolean goodFilter = filter == null || (TupleFilter.isEvaluableRecursively(filter) && context.isCoprocessorEnabled()); boolean goodSort = context.hasSort() == false; if (goodAggr && goodFilter && goodSort) { logger.info("Enable limit " + context.getLimit()); context.enableLimit(); } }
@Override public boolean hasNext() { // 1. check limit if (context.isLimitEnabled() && scanCount >= context.getLimit()) { return false; } // 2. check partial result if (context.isAcceptPartialResult() && scanCount > partialResultLimit) { context.setPartialResultReturned(true); return false; } // 3. check threshold if (scanCount >= context.getThreshold()) { throw new ScanOutOfLimitException("Scan row count exceeded threshold: " + context.getThreshold() + ", please add filter condition to narrow down backend scan range, like where clause."); } // 4. check cube segments return segmentIteratorIterator.hasNext() || segmentIterator.hasNext(); }
private void setConnectionProperties() { OptiqConnection conn = (OptiqConnection) optiqContext.getQueryProvider(); Properties connProps = conn.getProperties(); String propThreshold = connProps.getProperty(OLAPQuery.PROP_SCAN_THRESHOLD); int threshold = Integer.valueOf(propThreshold); olapContext.storageContext.setThreshold(threshold); } }
dimensionsD.addAll(othersD); Cuboid cuboid = identifyCuboid(dimensionsD); context.setCuboid(cuboid); context.setExactAggregation(isExactAggregation); HConnection conn = HBaseConnection.get(context.getConnUrl()); return new SerializedHBaseTupleIterator(conn, scans, cubeInstance, dimensionsD, filterD, groupsCopD, valueDecoders, context);
IGTStore store = new HBaseReadonlyStore(cellListIterator, scanRequest, rawScans.get(0).hbaseColumns, hbaseColumnsToGT, cubeSeg.getRowKeyPreambleSize(), false, storageContext.isExactAggregation()); IGTScanner rawScanner = store.scan(scanRequest);
private static boolean isCoprocessorBeneficial(CubeInstance cube, Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context) { String forceFlag = System.getProperty(FORCE_COPROCESSOR); if (forceFlag != null) { return Boolean.parseBoolean(forceFlag); } Boolean cubeOverride = CUBE_OVERRIDES.get(cube.getName()); if (cubeOverride != null) { return cubeOverride.booleanValue(); } if (RowValueDecoder.hasMemHungryCountDistinct(rowValueDecoders)) { logger.info("Coprocessor is disabled because there is memory hungry count distinct"); return false; } if (context.isExactAggregation()) { logger.info("Coprocessor is disabled because exactAggregation is true"); return false; } Cuboid cuboid = context.getCuboid(); Set<TblColRef> toAggr = Sets.newHashSet(cuboid.getAggregationColumns()); toAggr.removeAll(groupBy); if (toAggr.isEmpty()) { logger.info("Coprocessor is disabled because no additional columns to aggregate"); return false; } logger.info("Coprocessor is enabled to aggregate " + toAggr + ", returning " + groupBy); return true; }
@Before public void setUp() throws Exception { this.createTestMetadata(); CubeManager cubeMgr = CubeManager.getInstance(getTestConfig()); cube = cubeMgr.getCube("TEST_KYLIN_CUBE_WITHOUT_SLR_EMPTY"); Assert.assertNotNull(cube); storageEngine = StorageEngineFactory.getStorageEngine(cube); String url = KylinConfig.getInstanceFromEnv().getStorageUrl(); context = new StorageContext(); context.setConnUrl(url); }
@Override public void implementOLAP(OLAPImplementor implementor) { Preconditions.checkState(columnRowType == null, "OLAPTableScan MUST NOT be shared by more than one prent"); // create context in case of non-join if (implementor.getContext() == null || !(implementor.getParentNode() instanceof OLAPJoinRel) || implementor.isNewOLAPContextRequired()) { implementor.allocateContext(); } context = implementor.getContext(); context.allTableScans.add(this); columnRowType = buildColumnRowType(); if (context.olapSchema == null) { OLAPSchema schema = olapTable.getSchema(); context.olapSchema = schema; context.storageContext.setConnUrl(schema.getStorageUrl()); } if (context.firstTableScan == null) { context.firstTableScan = this; } if (needCollectionColumns(implementor)) { // OLAPToEnumerableConverter on top of table scan, should be a select * from table for (TblColRef tblColRef : columnRowType.getAllColumns()) { if (!tblColRef.getName().startsWith("_KY_")) { context.allColumns.add(tblColRef); } } } }
public SerializedHBaseTupleIterator(HConnection conn, List<HBaseKeyRange> segmentKeyRanges, CubeInstance cube, Collection<TblColRef> dimensions, TupleFilter filter, Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context) { this.context = context; int limit = context.getLimit(); this.partialResultLimit = Math.max(limit, PARTIAL_DEFAULT_LIMIT); this.segmentIteratorList = new ArrayList<CubeSegmentTupleIterator>(segmentKeyRanges.size()); Map<CubeSegment, List<HBaseKeyRange>> rangesMap = makeRangesMap(segmentKeyRanges); for (Map.Entry<CubeSegment, List<HBaseKeyRange>> entry : rangesMap.entrySet()) { CubeSegmentTupleIterator segIter = new CubeSegmentTupleIterator(entry.getKey(), entry.getValue(), conn, dimensions, filter, groupBy, rowValueDecoders, context); this.segmentIteratorList.add(segIter); } this.segmentIteratorIterator = this.segmentIteratorList.iterator(); if (this.segmentIteratorIterator.hasNext()) { this.segmentIterator = this.segmentIteratorIterator.next(); } else { this.segmentIterator = ITupleIterator.EMPTY_TUPLE_ITERATOR; } }
private void assertLimitWasEnabled() { OLAPContext context = getFirstOLAPContext(); assertTrue(context.storageContext.isLimitEnabled()); }
public GTCubeStorageQueryRequest getStorageQueryRequest(StorageContext context, SQLDigest sqlDigest, TupleInfo returnTupleInfo) { context.setStorageQuery(this); dimensionsD.addAll(otherDimsD); Cuboid cuboid = findCuboid(cubeInstance, dimensionsD, metrics); context.setCuboid(cuboid); context.setMapping(mapping); context.setNeedStorageAggregation(isNeedStorageAggregation(cuboid, groupsD, singleValuesD)); context.setExactAggregation(exactAggregation); groupsD.addAll(loosenedColumnD); TupleFilter.collectColumns(filterD, filterColumnD); context.setFilterMask(getQueryFilterMask(filterColumnD)); cubeInstance.getName(), cuboid.getId(), groupsD, filterColumnD, context.getFinalPushDownLimit(), context.getStorageLimitLevel(), context.isNeedStorageAggregation());
private void setThreshold(Collection<TblColRef> dimensions, List<RowValueDecoder> valueDecoders, StorageContext context) { if (RowValueDecoder.hasMemHungryCountDistinct(valueDecoders) == false) { return; } int rowSizeEst = dimensions.size() * 3; for (RowValueDecoder decoder : valueDecoders) { MeasureDesc[] measures = decoder.getMeasures(); BitSet projectionIndex = decoder.getProjectionIndex(); for (int i = projectionIndex.nextSetBit(0); i >= 0; i = projectionIndex.nextSetBit(i + 1)) { FunctionDesc func = measures[i].getFunction(); rowSizeEst += func.getReturnDataType().getSpaceEstimate(); } } long rowEst = MEM_BUDGET_PER_QUERY / rowSizeEst; context.setThreshold((int) rowEst); }
builder.setIsExactAggregate(storageContext.isExactAggregation());
if (context.hasSort()) { storageLimitLevel = StorageLimitLevel.NO_LIMIT; logger.debug("storageLimitLevel set to NO_LIMIT because the query has order by"); context.applyLimitPushDown(cubeInstance, storageLimitLevel);
@Override public void implementOLAP(OLAPImplementor implementor) { Preconditions.checkState(columnRowType == null, "OLAPTableScan MUST NOT be shared by more than one prent"); // create context in case of non-join if (implementor.getContext() == null || !(implementor.getParentNode() instanceof OLAPJoinRel) || implementor.isNewOLAPContextRequired()) { implementor.allocateContext(); } context = implementor.getContext(); context.allTableScans.add(this); columnRowType = buildColumnRowType(); if (context.olapSchema == null) { OLAPSchema schema = olapTable.getSchema(); context.olapSchema = schema; context.storageContext.setConnUrl(schema.getStorageUrl()); } if (context.firstTableScan == null) { context.firstTableScan = this; } if (needCollectionColumns(implementor)) { // OLAPToEnumerableConverter on top of table scan, should be a select * from table for (TblColRef tblColRef : columnRowType.getAllColumns()) { if (!tblColRef.getName().startsWith("_KY_")) { context.allColumns.add(tblColRef); } } } }