static void updateKafkaIngestionSpec(String overlordAddress, KafkaSupervisorSpec spec) { try { String task = JSON_MAPPER.writeValueAsString(spec); CONSOLE.printInfo("submitting kafka Spec {}", task); LOG.info("submitting kafka Supervisor Spec {}", task); FullResponseHolder response = DruidStorageHandlerUtils.getResponseFromCurrentLeader(DruidStorageHandler.getHttpClient(), new Request(HttpMethod.POST, new URL(String.format("http://%s/druid/indexer/v1/supervisor", overlordAddress))).setContent( "application/json", JSON_MAPPER.writeValueAsBytes(spec)), new FullResponseHandler(Charset.forName("UTF-8"))); if (response.getStatus().equals(HttpResponseStatus.OK)) { String msg = String.format("Kafka Supervisor for [%s] Submitted Successfully to druid.", spec.getDataSchema().getDataSource()); LOG.info(msg); CONSOLE.printInfo(msg); } else { throw new IOException(String.format("Unable to update Kafka Ingestion for Druid status [%d] full response [%s]", response.getStatus().getCode(), response.getContent())); } } catch (Exception e) { throw new RuntimeException(e); } }
if (currentOpenSegment == null) { currentOpenSegment = new SegmentIdentifier(dataSchema.getDataSource(), interval, tuningConfig.getVersioningPolicy().getVersion(interval), } else { retVal = new SegmentIdentifier(dataSchema.getDataSource(), interval, tuningConfig.getVersioningPolicy().getVersion(interval), new SegmentIdentifier(dataSchema.getDataSource(), interval, tuningConfig.getVersioningPolicy().getVersion(interval),
pushSegments(ImmutableList.of(currentOpenSegment)); currentOpenSegment = new SegmentIdentifier(dataSchema.getDataSource(), interval, tuningConfig.getVersioningPolicy().getVersion(interval), new SegmentIdentifier(dataSchema.getDataSource(), interval, tuningConfig.getVersioningPolicy().getVersion(interval),
@Override public String getId() { return dataSchema.getDataSource(); }
@Override public String getDataSource() { return schema.getDataSource(); }
protected File computeCorruptedFileDumpDir(File persistDir, DataSchema schema) { return new File( persistDir.getAbsolutePath() .replace(schema.getDataSource(), "corrupted" + File.pathSeparator + schema.getDataSource()) ); }
@Override public List<String> getDataSources() { return ImmutableList.of(getDataSchema().getDataSource()); }
private static String getTheDataSource(HadoopIngestionSpec spec) { return spec.getDataSchema().getDataSource(); }
private static String makeDatasource(FireDepartment fireDepartment) { return fireDepartment.getDataSchema().getDataSource(); }
private static String makeGroupId(IndexIngestionSpec ingestionSchema) { return makeGroupId(ingestionSchema.ioConfig.appendToExisting, ingestionSchema.dataSchema.getDataSource()); }
protected File computeBaseDir(DataSchema schema) { return new File(config.getBasePersistDirectory(), schema.getDataSource()); }
public String getDataSource() { return schema.getDataSchema().getDataSource(); }
@JsonCreator public IndexTask( @JsonProperty("id") final String id, @JsonProperty("resource") final TaskResource taskResource, @JsonProperty("spec") final IndexIngestionSpec ingestionSchema, @JsonProperty("context") final Map<String, Object> context ) { this( id, makeGroupId(ingestionSchema), taskResource, ingestionSchema.dataSchema.getDataSource(), ingestionSchema, context ); }
private void removeDirectory(final File target) { if (target.exists()) { try { log.info("Deleting Index File[%s]", target); FileUtils.deleteDirectory(target); } catch (Exception e) { log.makeAlert(e, "Failed to remove directory[%s]", schema.getDataSource()) .addData("file", target) .emit(); } } } }
@LifecycleStart public void start() throws IOException { serverAnnouncer.announce(); fireChiefExecutor = Execs.multiThreaded(fireDepartments.size(), "chief-%d"); for (final FireDepartment fireDepartment : fireDepartments) { final DataSchema schema = fireDepartment.getDataSchema(); final FireChief chief = new FireChief(fireDepartment, conglomerate); chiefs.computeIfAbsent(schema.getDataSource(), k -> new HashMap<>()) .put(fireDepartment.getTuningConfig().getShardSpec().getPartitionNum(), chief); fireChiefExecutor.submit(chief); } }
private static String makeTaskId(FireDepartment fireDepartment) { return makeTaskId( fireDepartment.getDataSchema().getDataSource(), fireDepartment.getTuningConfig().getShardSpec().getPartitionNum(), DateTimes.nowUtc(), random.nextInt() ); }
@Override public void run() { log.makeAlert( "RealtimeIndexTask for dataSource [%s] hasn't finished in configured time [%d] ms.", spec.getDataSchema().getDataSource(), spec.getTuningConfig().getAlertTimeout() ).emit(); } },
@Override public Object startJob() { log.info("Starting job for %s", getSchema().getDataSource()); computeBaseDir(getSchema()).mkdirs(); initializeExecutors(); if (flushScheduledExec == null) { flushScheduledExec = Execs.scheduledSingleThreaded("flushing_scheduled_%d"); } Object retVal = bootstrapSinksFromDisk(); startFlushThread(); return retVal; }
/** * Authorizes action to be performed on this task's datasource * * @return authorization result */ private Access authorizationCheck(final HttpServletRequest req, Action action) { ResourceAction resourceAction = new ResourceAction( new Resource(dataSchema.getDataSource(), ResourceType.DATASOURCE), action ); Access access = AuthorizationUtils.authorizeResourceAction(req, resourceAction, authorizerMapper); if (!access.isAllowed()) { throw new ForbiddenException(access.toString()); } return access; }
private void addSink(final Sink sink) { sinks.put(sink.getInterval().getStartMillis(), sink); metrics.setSinkCount(sinks.size()); sinkTimeline.add( sink.getInterval(), sink.getVersion(), new SingleElementPartitionChunk<Sink>(sink) ); try { segmentAnnouncer.announceSegment(sink.getSegment()); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) .addData("interval", sink.getInterval()) .emit(); } }