@Override public long getOption(LongValidator validator) { return getOptionSafe(validator).getNumVal(); }
/** * Get the average number of cores in executor nodes. * This will be used as the default value of MAX_WIDTH_PER_NODE * * @return average number of executor cores */ public long getAverageExecutorCores(final OptionManager optionManager) { long configuredMaxWidthPerNode = optionManager.getOption(ExecConstants.MAX_WIDTH_PER_NODE_KEY).getNumVal(); if (configuredMaxWidthPerNode == 0) { Preconditions.checkState(averageExecutorCores > 0, "No executors are available"); /* user has not overridden the default, use the default MAX_WIDTH_PER_NODE which is average * number of cores as computed by ClusterResourceInformation. */ return Math.round(averageExecutorCores * 0.7); } else { return configuredMaxWidthPerNode; } } }
public long getSliceTarget(){ long sliceTarget = options.getOption(ExecConstants.SLICE_TARGET).getNumVal(); if (isLeafLimitsEnabled() && minimumSampleSize > 0) { return Math.min(sliceTarget, minimumSampleSize); } return sliceTarget; }
@Override public long getOption(LongValidator validator) { return getOption(validator.getOptionName()).getNumVal(); }
public long getIdentifierMaxLength(){ return options.getOption(IDENTIFIER_MAX_LENGTH.getOptionName()).getNumVal(); }
public long getPlanningMemoryLimit() { return options.getOption(PLANNER_MEMORY_LIMIT.getOptionName()).getNumVal(); }
private void scheduleNextCacheRefresh(CacheRefresher refresher) { long cacheUpdateDelay; try { cacheUpdateDelay = getOptionManager().getOption(MATERIALIZATION_CACHE_REFRESH_DELAY_MILLIS); } catch (Exception e) { logger.warn("Failed to retrieve materialization cache refresh delay", e); cacheUpdateDelay = MATERIALIZATION_CACHE_REFRESH_DELAY_MILLIS.getDefault().getNumVal(); } schedulerService.get().schedule(scheduleForRunningOnceAt(ofEpochMilli(System.currentTimeMillis() + cacheUpdateDelay)), refresher); }
static int getNumberPartitions(OperatorContext context, HashPartitionSender config){ final OptionManager optMgr = context.getOptions(); long sliceTarget = optMgr.getOption(ExecConstants.SLICE_TARGET).getNumVal(); int threadFactor = optMgr.getOption(PlannerSettings.PARTITION_SENDER_THREADS_FACTOR.getOptionName()).getNumVal() .intValue(); int tmpParts = 1; int outGoingBatchCount = config.getDestinations().size(); double cost = config.getCost(); if ( sliceTarget != 0 && outGoingBatchCount != 0 ) { tmpParts = (int) Math.round((((cost / (sliceTarget*1.0)) / (outGoingBatchCount*1.0)) / (threadFactor*1.0))); if ( tmpParts < 1) { tmpParts = 1; } } final int imposedThreads = optMgr.getOption(PlannerSettings.PARTITION_SENDER_SET_THREADS.getOptionName()).getNumVal() .intValue(); if (imposedThreads > 0 ) { return imposedThreads; } else { return Math.min(tmpParts, optMgr.getOption(PlannerSettings.PARTITION_SENDER_MAX_THREADS.getOptionName()).getNumVal() .intValue()); } }
public AbstractRecordReader(final OperatorContext context, final List<SchemaPath> columns) { this.context = context; if (context == null) { this.numRowsPerBatch = ExecConstants.TARGET_BATCH_RECORDS_MAX.getDefault().getNumVal(); } else { this.numRowsPerBatch = context.getTargetBatchSize(); } if (context == null || context.getOptions() == null || context.getOptions().getOption(ExecConstants.OPERATOR_TARGET_BATCH_BYTES) == null) { this.numBytesPerBatch = ExecConstants.OPERATOR_TARGET_BATCH_BYTES_VALIDATOR.getDefault().getNumVal(); } else { this.numBytesPerBatch = context.getOptions().getOption(ExecConstants.OPERATOR_TARGET_BATCH_BYTES).getNumVal(); } if (columns != null) { setColumns(columns); } }
@Override public void validate(OptionValue v) { super.validate(v); if (!isPowerOfTwo(v.getNumVal())) { throw UserException.validationError() .message(String.format("Option %s must be a power of two.", getOptionName())) .build(logger); } }
long janinoThreshold = (value != null) ? value.getNumVal() : defaultJaninoThreshold;
@Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery relMetadataQuery) { if(PrelUtil.getSettings(getCluster()).useDefaultCosting()) { //We use multiplier 0.05 for TopN operator, and 0.1 for Sort, to make TopN a preferred choice. return super.computeSelfCost(planner).multiplyBy(.1); } RelNode child = this.getInput(); double inputRows = relMetadataQuery.getRowCount(child); // int rowWidth = child.getRowType().getPrecision(); int numSortFields = this.collation.getFieldCollations().size(); double cpuCost = DremioCost.COMPARE_CPU_COST * numSortFields * inputRows * (Math.log(inputRows)/Math.log(2)); double diskIOCost = 0; // assume in-memory for now until we enforce operator-level memory constraints // TODO: use rowWidth instead of avgFieldWidth * numFields // avgFieldWidth * numFields * inputRows double numFields = this.getRowType().getFieldCount(); long fieldWidth = PrelUtil.getPlannerSettings(planner).getOptions() .getOption(ExecConstants.AVERAGE_FIELD_WIDTH_KEY).getNumVal(); double memCost = fieldWidth * numFields * inputRows; Factory costFactory = (Factory) planner.getCostFactory(); return costFactory.makeCost(inputRows, cpuCost, diskIOCost, 0, memCost); }
@Override public OptionValueWrapper next() { final OptionValue value = mergedOptions.next(); final Status status; if (value.getType() == OptionType.BOOT) { status = Status.BOOT; } else { final OptionValue def = fragmentOptions.getValidator(value.getName()).getDefault(); if (value.equalsIgnoreType(def)) { status = Status.DEFAULT; } else { status = Status.CHANGED; } } return new OptionValueWrapper(value.getName(), value.getKind(), value.getType(), value.getNumVal(), value.getStringVal(), value.getBoolVal(), value.getFloatVal(), status); }
@SuppressWarnings("rawtypes") private Setting toSetting(OptionValue option){ // display the value if it is the whitelist or has been set. final boolean showOutsideWhitelist = options.isSet(option.getName()); switch(option.getKind()){ case BOOLEAN: return new Setting.BooleanSetting(option.getName(), option.getBoolVal(), showOutsideWhitelist); case DOUBLE: return new Setting.FloatSetting(option.getName(), option.getFloatVal(), showOutsideWhitelist); case LONG: return new Setting.IntegerSetting(option.getName(), option.getNumVal(), showOutsideWhitelist); case STRING: return new Setting.TextSetting(option.getName(), option.getStringVal(), showOutsideWhitelist); default: throw new IllegalStateException("Unable to handle kind " + option.getKind()); } } }
@Test public void testGroupScanWithPartitionIdentificationOff() throws Exception { long defaultValue = ExecConstants.PARQUET_MAX_PARTITION_COLUMNS_VALIDATOR.getDefault().getNumVal(); try { QueryTestUtil.test(getRpcClient(), "alter system set \"store.parquet" + ".partition_column_limit\" = 0"); List<String> partitionColumnList = getPartitionColumnsForDataSet ("datasets/parquet_no_partition_identification"); Assert.assertEquals(1, partitionColumnList.size()); Assert.assertEquals( "$_dremio_$_update_$", partitionColumnList.get(0)); } finally { QueryTestUtil.test(getRpcClient(), "alter system set \"store.parquet" + ".partition_column_limit\" = " + defaultValue); } }
@Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { if(PrelUtil.getSettings(getCluster()).useDefaultCosting()) { return super.computeSelfCost(planner).multiplyBy(.1); } final RelNode child = this.getInput(); double inputRows = mq.getRowCount(child); int numGroupByFields = this.getGroupCount(); int numAggrFields = this.aggCalls.size(); // cpu cost of hashing each grouping key double cpuCost = DremioCost.HASH_CPU_COST * numGroupByFields * inputRows; // add cpu cost for computing the aggregate functions cpuCost += DremioCost.FUNC_CPU_COST * numAggrFields * inputRows; double diskIOCost = 0; // assume in-memory for now until we enforce operator-level memory constraints // TODO: use distinct row count // + hash table template stuff double factor = PrelUtil.getPlannerSettings(planner).getOptions() .getOption(ExecConstants.HASH_AGG_TABLE_FACTOR_KEY).getFloatVal(); long fieldWidth = PrelUtil.getPlannerSettings(planner).getOptions() .getOption(ExecConstants.AVERAGE_FIELD_WIDTH_KEY).getNumVal(); // table + hashValues + links double memCost = ( (fieldWidth * numGroupByFields) + IntHolder.WIDTH + IntHolder.WIDTH ) * inputRows * factor; Factory costFactory = (Factory) planner.getCostFactory(); return costFactory.makeCost(inputRows, cpuCost, diskIOCost, 0 /* network cost */, memCost); }
@Test @Ignore // TODO file JIRA to fix this public void testFix2967() throws Exception { setSessionOption(PlannerSettings.BROADCAST.getOptionName(), "false"); setSessionOption(PlannerSettings.HASHJOIN.getOptionName(), "false"); setSessionOption(ExecConstants.SLICE_TARGET, "1"); setSessionOption(ExecConstants.MAX_WIDTH_PER_NODE_KEY, "23"); final String TEST_RES_PATH = TestTools.getWorkingPath() + "/src/test/resources"; try { test("select * from dfs.\"%s/join/j1\" j1 left outer join dfs.\"%s/join/j2\" j2 on (j1.c_varchar = j2.c_varchar)", TEST_RES_PATH, TEST_RES_PATH); } finally { setSessionOption(PlannerSettings.BROADCAST.getOptionName(), String.valueOf(PlannerSettings.BROADCAST.getDefault ().getBoolVal())); setSessionOption(PlannerSettings.HASHJOIN.getOptionName(), String.valueOf(PlannerSettings.HASHJOIN.getDefault() .getBoolVal())); setSessionOption(ExecConstants.SLICE_TARGET, String.valueOf(ExecConstants.SLICE_TARGET_DEFAULT)); setSessionOption(ExecConstants.MAX_WIDTH_PER_NODE_KEY, String.valueOf(ExecConstants.MAX_WIDTH_PER_NODE .getDefault().getNumVal())); } }
@Test // see DRILL-2408 public void testWriteEmptyFileAfterFlush() throws Exception { final String outputFile = "testparquetwriteremptyfiles_test_write_empty_file_after_flush"; deleteTableIfExists(outputFile); try { // this specific value will force a flush just after the final row is written // this may cause the creation of a new "empty" parquet file test("ALTER SESSION SET \"store.parquet.block-size\" = 19926"); final String query = "SELECT * FROM cp.\"employee.json\" LIMIT 100"; test("CREATE TABLE dfs_test.%s AS %s", outputFile, query); // this query will fail if an "empty" file was created testBuilder() .unOrdered() .sqlQuery("SELECT * FROM dfs_test.%s", outputFile) .sqlBaselineQuery(query) .go(); } finally { // restore the session option test("ALTER SESSION SET \"store.parquet.block-size\" = %d", ExecConstants.PARQUET_BLOCK_SIZE_VALIDATOR.getDefault().getNumVal()); deleteTableIfExists(outputFile); } }