@Override protected void setup(Context context) { final HadoopDruidIndexerConfig config = HadoopDruidIndexerConfig.fromConfiguration(context.getConfiguration()); SingleDimensionPartitionsSpec spec = (SingleDimensionPartitionsSpec) config.getPartitionsSpec(); helper = new DeterminePartitionsDimSelectionMapperHelper(config, spec.getPartitionDimension()); }
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); final HadoopDruidIndexerConfig config = HadoopDruidIndexerConfig.fromConfiguration(context.getConfiguration()); final SingleDimensionPartitionsSpec spec = (SingleDimensionPartitionsSpec) config.getPartitionsSpec(); helper = new DeterminePartitionsDimSelectionMapperHelper(config, spec.getPartitionDimension()); }
job = config.getPartitionsSpec().getPartitionJob(config); config.setHadoopJobIdFileName(hadoopJobIdFile); return JobHelper.runSingleJob(job, config); } else { int shardsPerInterval = config.getPartitionsSpec().getNumShards(); Map<Long, List<HadoopyShardSpec>> shardSpecs = new TreeMap<>(); int shardCount = 0; i, shardsPerInterval, config.getPartitionsSpec().getPartitionDimensions(), HadoopDruidIndexerConfig.JSON_MAPPER ),
if (!(config.getPartitionsSpec() instanceof SingleDimensionPartitionsSpec)) { throw new ISE( "DeterminePartitionsJob can only be run for SingleDimensionPartitionsSpec, partitionSpec found [%s]", config.getPartitionsSpec() ); if (!config.getPartitionsSpec().isAssumeGrouped()) { groupByJob = Job.getInstance( new Configuration(), config.addJobProperties(dimSelectionJob); if (!config.getPartitionsSpec().isAssumeGrouped()) {
@Override protected void setup(Context context) { final HadoopDruidIndexerConfig config = HadoopDruidIndexerConfig.fromConfiguration(context.getConfiguration()); SingleDimensionPartitionsSpec spec = (SingleDimensionPartitionsSpec) config.getPartitionsSpec(); helper = new DeterminePartitionsDimSelectionMapperHelper(config, spec.getPartitionDimension()); }
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); final HadoopDruidIndexerConfig config = HadoopDruidIndexerConfig.fromConfiguration(context.getConfiguration()); final SingleDimensionPartitionsSpec spec = (SingleDimensionPartitionsSpec) config.getPartitionsSpec(); helper = new DeterminePartitionsDimSelectionMapperHelper(config, spec.getPartitionDimension()); }
job = config.getPartitionsSpec().getPartitionJob(config); return JobHelper.runSingleJob(job, config); } else { int shardsPerInterval = config.getPartitionsSpec().getNumShards(); Map<Long, List<HadoopyShardSpec>> shardSpecs = Maps.newTreeMap(); int shardCount = 0; i, shardsPerInterval, config.getPartitionsSpec().getPartitionDimensions(), HadoopDruidIndexerConfig.JSON_MAPPER ),
if (!(config.getPartitionsSpec() instanceof SingleDimensionPartitionsSpec)) { throw new ISE( "DeterminePartitionsJob can only be run for SingleDimensionPartitionsSpec, partitionSpec found [%s]", config.getPartitionsSpec() ); if (!config.getPartitionsSpec().isAssumeGrouped()) { groupByJob = Job.getInstance( new Configuration(), config.addJobProperties(dimSelectionJob); if (!config.getPartitionsSpec().isAssumeGrouped()) {