static int getNumberPartitions(OperatorContext context, HashPartitionSender config){ final OptionManager optMgr = context.getOptions(); long sliceTarget = optMgr.getOption(ExecConstants.SLICE_TARGET).getNumVal(); int threadFactor = optMgr.getOption(PlannerSettings.PARTITION_SENDER_THREADS_FACTOR.getOptionName()).getNumVal() .intValue(); int tmpParts = 1; int outGoingBatchCount = config.getDestinations().size(); double cost = config.getCost(); if ( sliceTarget != 0 && outGoingBatchCount != 0 ) { tmpParts = (int) Math.round((((cost / (sliceTarget*1.0)) / (outGoingBatchCount*1.0)) / (threadFactor*1.0))); if ( tmpParts < 1) { tmpParts = 1; } } final int imposedThreads = optMgr.getOption(PlannerSettings.PARTITION_SENDER_SET_THREADS.getOptionName()).getNumVal() .intValue(); if (imposedThreads > 0 ) { return imposedThreads; } else { return Math.min(tmpParts, optMgr.getOption(PlannerSettings.PARTITION_SENDER_MAX_THREADS.getOptionName()).getNumVal() .intValue()); } }
@Test public void testGroupScanWithPartitionIdentificationOff() throws Exception { long defaultValue = ExecConstants.PARQUET_MAX_PARTITION_COLUMNS_VALIDATOR.getDefault().getNumVal(); try { QueryTestUtil.test(getRpcClient(), "alter system set \"store.parquet" + ".partition_column_limit\" = 0"); List<String> partitionColumnList = getPartitionColumnsForDataSet ("datasets/parquet_no_partition_identification"); Assert.assertEquals(1, partitionColumnList.size()); Assert.assertEquals( "$_dremio_$_update_$", partitionColumnList.get(0)); } finally { QueryTestUtil.test(getRpcClient(), "alter system set \"store.parquet" + ".partition_column_limit\" = " + defaultValue); } }
@Override public void validate(OptionValue v) { super.validate(v); if (v.getNumVal() > max || v.getNumVal() < 1) { throw UserException.validationError() .message(String.format("Option %s must be between %d and %d.", getOptionName(), 1, max)) .build(logger); } } }
@Test public void testJobCleanup() throws Exception { jobsService = (LocalJobsService) l(JobsService.class); SqlQuery ctas = getQueryFromSQL("SHOW SCHEMAS"); Job job = jobsService.submitJob(JobRequest.newBuilder() .setSqlQuery(ctas) .build(), NoOpJobStatusListener.INSTANCE); job.getData().loadIfNecessary(); SabotContext context = l(SabotContext.class); OptionValue days = OptionValue.createLong(OptionType.SYSTEM, ExecConstants.RESULTS_MAX_AGE_IN_DAYS.getOptionName(), 0); context.getOptionManager().setOption(days); OptionValue millis = OptionValue.createLong(OptionType.SYSTEM, ExecConstants.DEBUG_RESULTS_MAX_AGE_IN_MILLISECONDS.getOptionName(), 10); context.getOptionManager().setOption(millis); Thread.sleep(20); LocalJobsService.CleanupTask cleanupTask = jobsService.new CleanupTask(); cleanupTask.cleanup(); //make sure that the job output directory is gone assertFalse(jobsService.getJobResultsStore().jobOutputDirectoryExists(job.getJobId())); job = jobsService.getJob(job.getJobId()); assertFalse(JobDetailsUI.of(job).getResultsAvailable()); context.getOptionManager().setOption(OptionValue.createLong(OptionType.SYSTEM, ExecConstants.RESULTS_MAX_AGE_IN_DAYS.getOptionName(), 30)); context.getOptionManager().setOption(OptionValue.createLong(OptionType.SYSTEM, ExecConstants.DEBUG_RESULTS_MAX_AGE_IN_MILLISECONDS.getOptionName(), 0)); }
@Test // see DRILL-2408 public void testWriteEmptyFileAfterFlush() throws Exception { final String outputFile = "testparquetwriteremptyfiles_test_write_empty_file_after_flush"; deleteTableIfExists(outputFile); try { // this specific value will force a flush just after the final row is written // this may cause the creation of a new "empty" parquet file test("ALTER SESSION SET \"store.parquet.block-size\" = 19926"); final String query = "SELECT * FROM cp.\"employee.json\" LIMIT 100"; test("CREATE TABLE dfs_test.%s AS %s", outputFile, query); // this query will fail if an "empty" file was created testBuilder() .unOrdered() .sqlQuery("SELECT * FROM dfs_test.%s", outputFile) .sqlBaselineQuery(query) .go(); } finally { // restore the session option test("ALTER SESSION SET \"store.parquet.block-size\" = %d", ExecConstants.PARQUET_BLOCK_SIZE_VALIDATOR.getDefault().getNumVal()); deleteTableIfExists(outputFile); } }
public long getPlanningMemoryLimit() { return options.getOption(PLANNER_MEMORY_LIMIT.getOptionName()).getNumVal(); }
@Override public long getOption(LongValidator validator) { return getOption(validator.getOptionName()).getNumVal(); }
@Test public void testSortSpill() throws Exception { final String query = "CREATE TABLE dfs_test.test PARTITION BY (ss_store_sk) LOCALSORT BY (ss_customer_sk) AS SELECT * FROM dfs_test.tpcds.store_sales"; setSessionOption(ExecConstants.TEST_MEMORY_LIMIT.getOptionName(), "1000000000"); testRunAndPrint(UserBitShared.QueryType.SQL, query); }
@Override public void validate(OptionValue v) { super.validate(v); if (v.getNumVal() > max || v.getNumVal() < min) { throw UserException.validationError() .message(String.format("Option %s must be between %d and %d.", getOptionName(), min, max)) .build(logger); } } }
public AutoCloseable with(final LongValidator validator, final long value){ final long oldValue = testContext.getOptions().getOption(validator); testContext.getOptions().setOption(OptionValue.createLong(OptionType.SYSTEM, validator.getOptionName(), value)); return new AutoCloseable(){ @Override public void close() throws Exception { testContext.getOptions().setOption(OptionValue.createLong(OptionType.SYSTEM, validator.getOptionName(), oldValue)); }}; }
@Override public void close() throws Exception { testContext.getOptions().setOption(OptionValue.createLong(OptionType.SYSTEM, validator.getOptionName(), oldValue)); }}; }