@Test public void testNegPushLimitPastUnionExchange() throws Exception { // Negative case: should not push limit past through UnionExchange. try { test("alter session set \"planner.slice_target\" = 1"); final String[] expectedPlan ={}; // case 1. Only "offset", but no "limit" : should not push "limit" down. final String sql = String.format("select * from dfs_root.\"%s/tpchmulti/region\" offset 2", TEST_RES_PATH); final String[] excludedPlan = {"(?s)Limit\\(offset=\\[2\\].*UnionExchange.*Limit.*Scan"}; // case 2. "limit" is higher than # of rowcount in table : should not push "limit" down. final String sql2 = String.format("select * from dfs_root.\"%s/tpchmulti/region\" limit 100", TEST_RES_PATH); final String[] excludedPlan2 = {"(?s)Limit\\(fetch=\\[100\\].*UnionExchange.*Limit.*Scan"}; testLimitHelper(sql2, expectedPlan, excludedPlan2, 5); } finally { test("alter session set \"planner.slice_target\" = " + ExecConstants.SLICE_TARGET_OPTION.getDefault().getValue()); } }
@PUT @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) @Path("/acceleration/settings") public void saveSystemSettings(final SystemSettingsApiDescriptor descriptor) { Preconditions.checkArgument(descriptor.getLimit() != null, "limit is required"); Preconditions.checkArgument(descriptor.getLimit() > 0, "limit must be positive"); Preconditions.checkArgument(descriptor.getAccelerateAggregation() != null, "accelerateAggregation is required"); Preconditions.checkArgument(descriptor.getAccelerateRaw() != null, "accelerateRaw is required"); SystemOptionManager optionManager = context.getOptionManager(); optionManager.setOption(OptionValue.createLong(OptionValue.OptionType.SYSTEM, ReflectionOptions.MAX_AUTOMATIC_REFLECTIONS.getOptionName(), descriptor.getLimit())); optionManager.setOption(OptionValue.createBoolean(OptionValue.OptionType.SYSTEM, ReflectionOptions.ENABLE_AUTOMATIC_AGG_REFLECTIONS.getOptionName(), descriptor.getAccelerateAggregation())); optionManager.setOption(OptionValue.createBoolean(OptionValue.OptionType.SYSTEM, ReflectionOptions.ENABLE_AUTOMATIC_RAW_REFLECTIONS.getOptionName(), descriptor.getAccelerateRaw())); if (descriptor.getLayoutRefreshMaxAttempts() != null) { optionManager.setOption(OptionValue.createLong(OptionValue.OptionType.SYSTEM, ExecConstants.LAYOUT_REFRESH_MAX_ATTEMPTS.getOptionName(), descriptor.getLayoutRefreshMaxAttempts())); } } }
@Override public void validate(OptionValue v) { super.validate(v); if (!isPowerOfTwo(v.getNumVal())) { throw UserException.validationError() .message(String.format("Option %s must be a power of two.", getOptionName())) .build(logger); } }
} finally { test("alter session set \"planner.enable_trivial_singular\" = true"); test("alter session set \"planner.slice_target\" = " + ExecConstants.SLICE_TARGET_OPTION.getDefault().getValue());
public SpillManager(SabotConfig sabotConfig, OptionManager optionManager, String id, Configuration hadoopConf, SpillService spillService, String caller) { final List<String> directories = new ArrayList<>(sabotConfig.getStringList(ExecConstants.SPILL_DIRS)); if (directories.isEmpty()) { throw UserException.dataWriteError().message("No spill locations specified.").build(logger); } this.id = id; this.caller = caller; this.hadoopConf = hadoopConf; this.spillService = spillService; // load options if (optionManager != null) { this.minDiskSpacePercentage = optionManager.getOption(ExecConstants.SPILL_DISK_SPACE_LIMIT_PERCENTAGE); this.minDiskSpace = optionManager.getOption(ExecConstants.SPILL_DISK_SPACE_LIMIT_BYTES); this.healthCheckInterval = optionManager.getOption(ExecConstants.SPILL_DISK_SPACE_CHECK_INTERVAL); } else { this.minDiskSpacePercentage = ExecConstants.SPILL_DISK_SPACE_LIMIT_PERCENTAGE.getDefault().getFloatVal(); this.minDiskSpace = ExecConstants.SPILL_DISK_SPACE_LIMIT_BYTES.getDefault().getNumVal(); this.healthCheckInterval = ExecConstants.SPILL_DISK_SPACE_CHECK_INTERVAL.getDefault().getNumVal(); } try { spillService.makeSpillSubdirs(id); } catch (UserException e) { throw UserException.dataWriteError(e) .addContext("Caller", caller) .build(logger); } }
@Test @Ignore // TODO file JIRA to fix this public void testFix2967() throws Exception { setSessionOption(PlannerSettings.BROADCAST.getOptionName(), "false"); setSessionOption(PlannerSettings.HASHJOIN.getOptionName(), "false"); setSessionOption(ExecConstants.SLICE_TARGET, "1"); setSessionOption(ExecConstants.MAX_WIDTH_PER_NODE_KEY, "23"); final String TEST_RES_PATH = TestTools.getWorkingPath() + "/src/test/resources"; try { test("select * from dfs.\"%s/join/j1\" j1 left outer join dfs.\"%s/join/j2\" j2 on (j1.c_varchar = j2.c_varchar)", TEST_RES_PATH, TEST_RES_PATH); } finally { setSessionOption(PlannerSettings.BROADCAST.getOptionName(), String.valueOf(PlannerSettings.BROADCAST.getDefault ().getBoolVal())); setSessionOption(PlannerSettings.HASHJOIN.getOptionName(), String.valueOf(PlannerSettings.HASHJOIN.getDefault() .getBoolVal())); setSessionOption(ExecConstants.SLICE_TARGET, String.valueOf(ExecConstants.SLICE_TARGET_DEFAULT)); setSessionOption(ExecConstants.MAX_WIDTH_PER_NODE_KEY, String.valueOf(ExecConstants.MAX_WIDTH_PER_NODE .getDefault().getNumVal())); } }
public AbstractRecordReader(final OperatorContext context, final List<SchemaPath> columns) { this.context = context; if (context == null) { this.numRowsPerBatch = ExecConstants.TARGET_BATCH_RECORDS_MAX.getDefault().getNumVal(); } else { this.numRowsPerBatch = context.getTargetBatchSize(); } if (context == null || context.getOptions() == null || context.getOptions().getOption(ExecConstants.OPERATOR_TARGET_BATCH_BYTES) == null) { this.numBytesPerBatch = ExecConstants.OPERATOR_TARGET_BATCH_BYTES_VALIDATOR.getDefault().getNumVal(); } else { this.numBytesPerBatch = context.getOptions().getOption(ExecConstants.OPERATOR_TARGET_BATCH_BYTES).getNumVal(); } if (columns != null) { setColumns(columns); } }
@Before public void setup() { optionManager = mock(OptionManager.class); when(optionManager.getOption(eq(ExecConstants.SLICE_TARGET))) .thenReturn(ExecConstants.SLICE_TARGET_OPTION.getDefault()); when(optionManager.getOption(eq(PlannerSettings.ENABLE_LEAF_LIMITS.getOptionName()))) .thenReturn(PlannerSettings.ENABLE_LEAF_LIMITS.getDefault()); when(optionManager.getOption(eq(PlannerSettings.ENABLE_TRIVIAL_SINGULAR.getOptionName()))) .thenReturn(PlannerSettings.ENABLE_TRIVIAL_SINGULAR.getDefault()); ClusterResourceInformation info = mock(ClusterResourceInformation.class); when(info.getExecutorNodeCount()).thenReturn(1); plannerSettings = new PlannerSettings(DremioTest.DEFAULT_SABOT_CONFIG, optionManager, info); cluster = RelOptCluster.create(new VolcanoPlanner(plannerSettings), rexBuilder); }
protected static void setDeletionGracePeriod(long periodInSeconds) { l(ContextService.class).get().getOptionManager().setOption( OptionValue.createLong(SYSTEM, REFLECTION_DELETION_GRACE_PERIOD.getOptionName(), periodInSeconds)); }
protected static void setManagerRefreshDelayMs(long delayInMillis) { l(ContextService.class).get().getOptionManager().setOption( OptionValue.createLong(SYSTEM, REFLECTION_MANAGER_REFRESH_DELAY_MILLIS.getOptionName(), delayInMillis)); }
protected TypeValidator newValidator(long max, long def) { return new TypeValidators.PositiveLongValidator("test-option", max, def); }
private void scheduleNextCacheRefresh(CacheRefresher refresher) { long cacheUpdateDelay; try { cacheUpdateDelay = getOptionManager().getOption(MATERIALIZATION_CACHE_REFRESH_DELAY_MILLIS); } catch (Exception e) { logger.warn("Failed to retrieve materialization cache refresh delay", e); cacheUpdateDelay = MATERIALIZATION_CACHE_REFRESH_DELAY_MILLIS.getDefault().getNumVal(); } schedulerService.get().schedule(scheduleForRunningOnceAt(ofEpochMilli(System.currentTimeMillis() + cacheUpdateDelay)), refresher); }
protected static void setMaterializationCacheSettings(boolean enabled, long refreshDelayInSeconds) { l(ContextService.class).get().getOptionManager().setOption( OptionValue.createBoolean(SYSTEM, MATERIALIZATION_CACHE_ENABLED.getOptionName(), enabled)); l(ContextService.class).get().getOptionManager().setOption( OptionValue.createLong(SYSTEM, ReflectionOptions.MATERIALIZATION_CACHE_REFRESH_DELAY_MILLIS.getOptionName(), refreshDelayInSeconds*1000)); }
@Override public void validate(OptionValue v) { super.validate(v); if (v.getNumVal() > max || v.getNumVal() < 1) { throw UserException.validationError() .message(String.format("Option %s must be between %d and %d.", getOptionName(), 1, max)) .build(logger); } } }