} else if (this.granularitySpec.isRollup()) { log.warn("No metricsSpec has been specified. Are you sure this is what you want?");
protected File mergeQueryableIndex( final List<QueryableIndex> indexes, final AggregatorFactory[] aggs, final File file, ProgressIndicator progressIndicator ) throws IOException { boolean rollup = config.getSchema().getDataSchema().getGranularitySpec().isRollup(); return HadoopDruidIndexerConfig.INDEX_MERGER_V9 .mergeQueryableIndex(indexes, rollup, aggs, file, config.getIndexSpec(), progressIndicator, null); }
segmentGranularity, config.getGranularitySpec().getQueryGranularity(), config.getGranularitySpec().isRollup(), Lists.newArrayList(bucketsToRun)
indexMergerV9.mergeQueryableIndex( indexes, schema.getGranularitySpec().isRollup(), schema.getAggregators(), fileToUpload,
config.getGranularitySpec().getSegmentGranularity(), config.getGranularitySpec().getQueryGranularity(), config.getGranularitySpec().isRollup(), intervals
@Test public void testRollupSetting() { List<Interval> intervals = Lists.newArrayList( Intervals.of("2012-01-08T00Z/2012-01-11T00Z"), Intervals.of("2012-01-07T00Z/2012-01-08T00Z"), Intervals.of("2012-01-03T00Z/2012-01-04T00Z"), Intervals.of("2012-01-01T00Z/2012-01-03T00Z") ); final GranularitySpec spec = new UniformGranularitySpec(Granularities.DAY, Granularities.NONE, false, intervals); Assert.assertFalse(spec.isRollup()); }
@Test public void testRollupSetting() { List<Interval> intervals = Lists.newArrayList( Intervals.of("2012-01-08T00Z/2012-01-11T00Z"), Intervals.of("2012-02-01T00Z/2012-03-01T00Z"), Intervals.of("2012-01-07T00Z/2012-01-08T00Z"), Intervals.of("2012-01-03T00Z/2012-01-04T00Z"), Intervals.of("2012-01-01T00Z/2012-01-03T00Z") ); final GranularitySpec spec = new ArbitraryGranularitySpec(Granularities.NONE, false, intervals); Assert.assertFalse(spec.isRollup()); }
schema.getGranularitySpec().isRollup(), schema.getAggregators(), mergedTarget,
schema.getGranularitySpec().isRollup(), schema.getAggregators(), mergedTarget,
.withDimensionsSpec(schema.getParser()) .withMetrics(schema.getAggregators()) .withRollup(schema.getGranularitySpec().isRollup()) .build(); final IncrementalIndex newIndex = new IncrementalIndex.Builder()
Assert.assertTrue(spec.isRollup());
)); Assert.assertTrue(spec.isRollup());
private static IncrementalIndex makeIncrementalIndex( Bucket theBucket, AggregatorFactory[] aggs, HadoopDruidIndexerConfig config, Iterable<String> oldDimOrder, Map<String, ColumnCapabilitiesImpl> oldCapabilities ) { final HadoopTuningConfig tuningConfig = config.getSchema().getTuningConfig(); final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder() .withMinTimestamp(theBucket.time.getMillis()) .withTimestampSpec(config.getSchema().getDataSchema().getParser().getParseSpec().getTimestampSpec()) .withDimensionsSpec(config.getSchema().getDataSchema().getParser()) .withQueryGranularity(config.getSchema().getDataSchema().getGranularitySpec().getQueryGranularity()) .withMetrics(aggs) .withRollup(config.getSchema().getDataSchema().getGranularitySpec().isRollup()) .build(); IncrementalIndex newIndex = new IncrementalIndex.Builder() .setIndexSchema(indexSchema) .setReportParseExceptions(!tuningConfig.isIgnoreInvalidRows()) // only used by OffHeapIncrementalIndex .setMaxRowCount(tuningConfig.getRowFlushBoundary()) .setMaxBytesInMemory(TuningConfigs.getMaxBytesInMemoryOrDefault(tuningConfig.getMaxBytesInMemory())) .buildOnheap(); if (oldDimOrder != null && !indexSchema.getDimensionsSpec().hasCustomDimensions()) { newIndex.loadDimensionIterable(oldDimOrder, oldCapabilities); } return newIndex; }
} else if (this.granularitySpec.isRollup()) { log.warn("No metricsSpec has been specified. Are you sure this is what you want?");
protected File mergeQueryableIndex( final List<QueryableIndex> indexes, final AggregatorFactory[] aggs, final File file, ProgressIndicator progressIndicator ) throws IOException { boolean rollup = config.getSchema().getDataSchema().getGranularitySpec().isRollup(); return HadoopDruidIndexerConfig.INDEX_MERGER_V9.mergeQueryableIndex( indexes, rollup, aggs, file, config.getIndexSpec(), progressIndicator, null ); }
segmentGranularity, config.getGranularitySpec().getQueryGranularity(), config.getGranularitySpec().isRollup(), Lists.newArrayList(bucketsToRun)
indexMergerV9.mergeQueryableIndex( indexes, schema.getGranularitySpec().isRollup(), schema.getAggregators(), fileToUpload,
schema.getGranularitySpec().isRollup(), schema.getAggregators(), mergedTarget,
.withDimensionsSpec(schema.getParser()) .withMetrics(schema.getAggregators()) .withRollup(schema.getGranularitySpec().isRollup()) .build(); final IncrementalIndex newIndex = new IncrementalIndex.Builder()
private static IncrementalIndex makeIncrementalIndex( Bucket theBucket, AggregatorFactory[] aggs, HadoopDruidIndexerConfig config, Iterable<String> oldDimOrder, Map<String, ColumnCapabilitiesImpl> oldCapabilities ) { final HadoopTuningConfig tuningConfig = config.getSchema().getTuningConfig(); final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder() .withMinTimestamp(theBucket.time.getMillis()) .withTimestampSpec(config.getSchema().getDataSchema().getParser().getParseSpec().getTimestampSpec()) .withDimensionsSpec(config.getSchema().getDataSchema().getParser()) .withQueryGranularity(config.getSchema().getDataSchema().getGranularitySpec().getQueryGranularity()) .withMetrics(aggs) .withRollup(config.getSchema().getDataSchema().getGranularitySpec().isRollup()) .build(); IncrementalIndex newIndex = new IncrementalIndex.Builder() .setIndexSchema(indexSchema) .setReportParseExceptions(!tuningConfig.isIgnoreInvalidRows()) // only used by OffHeapIncrementalIndex .setMaxRowCount(tuningConfig.getRowFlushBoundary()) .setMaxBytesInMemory(TuningConfigs.getMaxBytesInMemoryOrDefault(tuningConfig.getMaxBytesInMemory())) .buildOnheap(); if (oldDimOrder != null && !indexSchema.getDimensionsSpec().hasCustomDimensions()) { newIndex.loadDimensionIterable(oldDimOrder, oldCapabilities); } return newIndex; }