@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); rollupGranularity = getConfig().getGranularitySpec().getQueryGranularity(); }
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); rollupGranularity = getConfig().getGranularitySpec().getQueryGranularity(); config = HadoopDruidIndexerConfig.fromConfiguration(context.getConfiguration()); Optional<Set<Interval>> intervals = config.getSegmentGranularIntervals(); if (intervals.isPresent()) { determineIntervals = false; final ImmutableMap.Builder<Interval, HyperLogLogCollector> builder = ImmutableMap.builder(); for (final Interval bucketInterval : intervals.get()) { builder.put(bucketInterval, HyperLogLogCollector.makeLatestCollector()); } hyperLogLogs = builder.build(); } else { determineIntervals = true; hyperLogLogs = Maps.newHashMap(); } }
this.rollupGran = spec.getDataSchema().getGranularitySpec().getQueryGranularity();
final long truncatedTimestamp = granularitySpec.getQueryGranularity().bucketStart(inputRow.getTimestamp()).getMillis(); final byte[] hashedDimensions = hashFunction.hashBytes( HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsBytes(
new UniformGranularitySpec( segmentGranularity, config.getGranularitySpec().getQueryGranularity(), config.getGranularitySpec().isRollup(), Lists.newArrayList(bucketsToRun)
int thrownAway = 0; int unparseable = 0; final Granularity queryGranularity = granularitySpec.getQueryGranularity();
updatedIngestionSpec = updatedIngestionSpec.withQueryGranularity(config.getGranularitySpec().getQueryGranularity());
@Override public SegmentIdentifier allocate( final InputRow row, final String sequenceName, final String previousSegmentId, final boolean skipSegmentLineageCheck ) throws IOException { return taskActionClient.submit( new SegmentAllocateAction( dataSchema.getDataSource(), row.getTimestamp(), dataSchema.getGranularitySpec().getQueryGranularity(), dataSchema.getGranularitySpec().getSegmentGranularity(), sequenceName, previousSegmentId, skipSegmentLineageCheck ) ); } }
new UniformGranularitySpec( config.getGranularitySpec().getSegmentGranularity(), config.getGranularitySpec().getQueryGranularity(), config.getGranularitySpec().isRollup(), intervals
.withMinTimestamp(minTimestamp) .withTimestampSpec(schema.getParser()) .withQueryGranularity(schema.getGranularitySpec().getQueryGranularity()) .withDimensionsSpec(schema.getParser()) .withMetrics(schema.getAggregators())
private static IncrementalIndex makeIncrementalIndex( Bucket theBucket, AggregatorFactory[] aggs, HadoopDruidIndexerConfig config, Iterable<String> oldDimOrder, Map<String, ColumnCapabilitiesImpl> oldCapabilities ) { final HadoopTuningConfig tuningConfig = config.getSchema().getTuningConfig(); final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder() .withMinTimestamp(theBucket.time.getMillis()) .withTimestampSpec(config.getSchema().getDataSchema().getParser().getParseSpec().getTimestampSpec()) .withDimensionsSpec(config.getSchema().getDataSchema().getParser()) .withQueryGranularity(config.getSchema().getDataSchema().getGranularitySpec().getQueryGranularity()) .withMetrics(aggs) .withRollup(config.getSchema().getDataSchema().getGranularitySpec().isRollup()) .build(); IncrementalIndex newIndex = new IncrementalIndex.Builder() .setIndexSchema(indexSchema) .setReportParseExceptions(!tuningConfig.isIgnoreInvalidRows()) .setMaxRowCount(tuningConfig.getRowFlushBoundary()) .buildOnheap(); if (oldDimOrder != null && !indexSchema.getDimensionsSpec().hasCustomDimensions()) { newIndex.loadDimensionIterable(oldDimOrder, oldCapabilities); } return newIndex; }