@Override public String getDataSource() { return schema.getDataSource(); }
@Override public String getId() { return dataSchema.getDataSource(); }
protected File computeCorruptedFileDumpDir(File persistDir, DataSchema schema) { return new File( StringUtils.replace( persistDir.getAbsolutePath(), schema.getDataSource(), "corrupted" + File.pathSeparator + schema.getDataSource() ) ); }
private static String makeGroupId(IndexIngestionSpec ingestionSchema) { return makeGroupId(ingestionSchema.ioConfig.appendToExisting, ingestionSchema.dataSchema.getDataSource()); }
@Override public List<String> getDataSources() { return ImmutableList.of(getDataSchema().getDataSource()); }
public String getDataSource() { return schema.getDataSchema().getDataSource(); }
private static String getTheDataSource(HadoopIngestionSpec spec) { return spec.getDataSchema().getDataSource(); }
protected File computeBaseDir(DataSchema schema) { return new File(config.getBasePersistDirectory(), schema.getDataSource()); }
private static String makeDatasource(FireDepartment fireDepartment) { return fireDepartment.getDataSchema().getDataSource(); }
public DataSegment getSegment() { return new DataSegment( schema.getDataSource(), interval, version, ImmutableMap.of(), Collections.emptyList(), Lists.transform(Arrays.asList(schema.getAggregators()), AggregatorFactory::getName), shardSpec, null, 0 ); }
private void removeDirectory(final File target) { if (target.exists()) { try { log.info("Deleting Index File[%s]", target); FileUtils.deleteDirectory(target); } catch (Exception e) { log.makeAlert(e, "Failed to remove directory[%s]", schema.getDataSource()) .addData("file", target) .emit(); } } } }
@LifecycleStart public void start() { serverAnnouncer.announce(); fireChiefExecutor = Execs.multiThreaded(fireDepartments.size(), "chief-%d"); for (final FireDepartment fireDepartment : fireDepartments) { final DataSchema schema = fireDepartment.getDataSchema(); final FireChief chief = new FireChief(fireDepartment, conglomerate); chiefs.computeIfAbsent(schema.getDataSource(), k -> new HashMap<>()) .put(fireDepartment.getTuningConfig().getShardSpec().getPartitionNum(), chief); fireChiefExecutor.submit(chief); } }
@After public void tearDown() throws Exception { EasyMock.verify(announcer, segmentPublisher, dataSegmentPusher, handoffNotifierFactory, handoffNotifier, emitter); FileUtils.deleteDirectory( new File( tuningConfig.getBasePersistDirectory(), schema.getDataSource() ) ); FileUtils.deleteDirectory(tmpDir); }
public void verify() { Preconditions.checkNotNull(schema.getDataSchema().getDataSource(), "dataSource"); Preconditions.checkNotNull(schema.getDataSchema().getParser().getParseSpec(), "parseSpec"); Preconditions.checkNotNull(schema.getDataSchema().getParser().getParseSpec().getTimestampSpec(), "timestampSpec"); Preconditions.checkNotNull(schema.getDataSchema().getGranularitySpec(), "granularitySpec"); Preconditions.checkNotNull(pathSpec, "inputSpec"); Preconditions.checkNotNull(schema.getTuningConfig().getWorkingPath(), "workingPath"); Preconditions.checkNotNull(schema.getIOConfig().getSegmentOutputPath(), "segmentOutputPath"); Preconditions.checkNotNull(schema.getTuningConfig().getVersion(), "version"); }
private static IndexIOConfig createIoConfig(TaskToolbox toolbox, DataSchema dataSchema, Interval interval) { return new IndexIOConfig( new IngestSegmentFirehoseFactory( dataSchema.getDataSource(), interval, null, // no filter // set dimensions and metrics names to make sure that the generated dataSchema is used for the firehose dataSchema.getParser().getParseSpec().getDimensionsSpec().getDimensionNames(), Arrays.stream(dataSchema.getAggregators()).map(AggregatorFactory::getName).collect(Collectors.toList()), toolbox.getIndexIO() ), false ); }
@Override public void run() { log.makeAlert( "RealtimeIndexTask for dataSource [%s] hasn't finished in configured time [%d] ms.", spec.getDataSchema().getDataSource(), spec.getTuningConfig().getAlertTimeout() ).emit(); } },
@Override public void run() { log.makeAlert( "RealtimeIndexTask for dataSource [%s] hasn't finished in configured time [%d] ms.", spec.getDataSchema().getDataSource(), spec.getTuningConfig().getAlertTimeout() ).emit(); } },
@Override public Object startJob() { log.info("Starting job for %s", getSchema().getDataSource()); computeBaseDir(getSchema()).mkdirs(); initializeExecutors(); if (flushScheduledExec == null) { flushScheduledExec = Execs.scheduledSingleThreaded("flushing_scheduled_%d"); } Object retVal = bootstrapSinksFromDisk(); startFlushThread(); return retVal; }
private static String makeTaskId(FireDepartment fireDepartment) { return makeTaskId( fireDepartment.getDataSchema().getDataSource(), fireDepartment.getTuningConfig().getShardSpec().getPartitionNum(), DateTimes.nowUtc(), makeRandomId() ); }
private static String makeTaskId(RealtimeAppenderatorIngestionSpec spec) { return StringUtils.format( "index_realtime_%s_%d_%s_%s", spec.getDataSchema().getDataSource(), spec.getTuningConfig().getShardSpec().getPartitionNum(), DateTimes.nowUtc(), RealtimeIndexTask.makeRandomId() ); }