/** * Submit a failure sla event */ private void submitFailureSlaEvent(Dataset dataset, String eventName) { try { CompactionSlaEventHelper.getEventSubmitterBuilder(dataset, Optional.<Job> absent(), this.fs) .eventSubmitter(this.eventSubmitter).eventName(eventName).build().submit(); } catch (Throwable t) { LOG.warn("Failed to submit failure sla event:" + t, t); } } }
/** * Submit an event when completeness verification is successful */ private void submitVerificationSuccessSlaEvent(Results.Result result) { try { CompactionSlaEventHelper.getEventSubmitterBuilder(result.dataset(), Optional.<Job> absent(), this.fs) .eventSubmitter(this.eventSubmitter).eventName(CompactionSlaEventHelper.COMPLETION_VERIFICATION_SUCCESS_EVENT_NAME) .additionalMetadata(Maps.transformValues(result.verificationContext(), Functions.toStringFunction())).build() .submit(); } catch (Throwable t) { LOG.warn("Failed to submit verification success event:" + t, t); } }
/** * Submit an event reporting late record counts and non-late record counts. */ private void submitRecordsCountsEvent() { long lateOutputRecordCount = this.datasetHelper.getLateOutputRecordCount(); long outputRecordCount = this.datasetHelper.getOutputRecordCount(); try { CompactionSlaEventHelper .getEventSubmitterBuilder(this.dataset, Optional.<Job> absent(), this.fs) .eventSubmitter(this.eventSubmitter) .eventName(CompactionSlaEventHelper.COMPACTION_RECORD_COUNT_EVENT) .additionalMetadata(CompactionSlaEventHelper.DATASET_OUTPUT_PATH, this.dataset.outputPath().toString()) .additionalMetadata( CompactionSlaEventHelper.LATE_RECORD_COUNT, Long.toString(lateOutputRecordCount)) .additionalMetadata( CompactionSlaEventHelper.REGULAR_RECORD_COUNT, Long.toString(outputRecordCount)) .additionalMetadata(CompactionSlaEventHelper.NEED_RECOMPACT, Boolean.toString(this.dataset.needToRecompact())) .build().submit(); } catch (Throwable e) { LOG.warn("Failed to submit late event count:" + e, e); } } }
/** * Submit an event when compaction MR job completes */ private void submitSlaEvent(Job job) { try { CompactionSlaEventHelper .getEventSubmitterBuilder(this.dataset, Optional.of(job), this.fs) .eventSubmitter(this.eventSubmitter) .eventName(CompactionSlaEventHelper.COMPACTION_COMPLETED_EVENT_NAME) .additionalMetadata( CompactionSlaEventHelper.LATE_RECORD_COUNT, Long.toString(this.lateOutputRecordCountProvider.getRecordCount(this.getApplicableFilePaths(this.dataset .outputLatePath())))) .additionalMetadata( CompactionSlaEventHelper.REGULAR_RECORD_COUNT, Long.toString(this.outputRecordCountProvider.getRecordCount(this.getApplicableFilePaths(this.dataset .outputPath())))) .additionalMetadata(CompactionSlaEventHelper.RECOMPATED_METADATA_NAME, Boolean.toString(this.dataset.needToRecompact())).build().submit(); } catch (Throwable e) { LOG.warn("Failed to submit compcation completed event:" + e, e); } }
@Override protected HiveDataset computeNext() { while (this.tables.hasNext()) { DbAndTable dbAndTable = this.tables.next(); try (AutoReturnableObject<IMetaStoreClient> client = HiveDatasetFinder.this.clientPool.getClient()) { Table table = client.get().getTable(dbAndTable.getDb(), dbAndTable.getTable()); Config datasetConfig = getDatasetConfig(table); if (ConfigUtils.getBoolean(datasetConfig, HIVE_DATASET_IS_BLACKLISTED_KEY, DEFAULT_HIVE_DATASET_IS_BLACKLISTED_KEY)) { continue; } if (HiveDatasetFinder.this.eventSubmitter.isPresent()) { SlaEventSubmitter.builder().datasetUrn(dbAndTable.toString()) .eventSubmitter(HiveDatasetFinder.this.eventSubmitter.get()).eventName(DATASET_FOUND).build().submit(); } return createHiveDataset(table, datasetConfig); } catch (Throwable t) { log.error(String.format("Failed to create HiveDataset for table %s.%s", dbAndTable.getDb(), dbAndTable.getTable()), t); if (HiveDatasetFinder.this.eventSubmitter.isPresent()) { SlaEventSubmitter.builder().datasetUrn(dbAndTable.toString()) .eventSubmitter(HiveDatasetFinder.this.eventSubmitter.get()).eventName(DATASET_ERROR) .additionalMetadata(FAILURE_CONTEXT, t.toString()).build().submit(); } } } return endOfData(); } };
if (!wus.getPropAsBoolean(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY)) { try { new SlaEventSubmitter(eventSubmitter, EventConstants.CONVERSION_FAILED_EVENT, wus.getProperties()).submit(); } catch (Exception e) { log.error("Failed while emitting SLA event, but ignoring and moving forward to curate " + "all clean up comamnds", e); try { new SlaEventSubmitter(eventSubmitter, EventConstants.CONVERSION_SUCCESSFUL_SLA_EVENT, wus.getProperties()) .submit(); } catch (Exception e) { log.error("Failed while emitting SLA event, but ignoring and moving forward to curate " + "all clean up commands", e);
/** * Submit an sla event when a {@link gobblin.data.management.copy.CopyableFile} is published. The <code>workUnitState</code> passed should have the * required {@link SlaEventKeys} set. * * @see SlaEventSubmitter#submit() * * @param eventSubmitter * @param workUnitState */ static void submitSuccessfulFilePublish(EventSubmitter eventSubmitter, CopyableFile cf, WorkUnitState workUnitState) { String datasetUrn = workUnitState.getProp(SlaEventKeys.DATASET_URN_KEY); String partition = workUnitState.getProp(SlaEventKeys.PARTITION_KEY); String completenessPercentage = workUnitState.getProp(SlaEventKeys.COMPLETENESS_PERCENTAGE_KEY); String recordCount = workUnitState.getProp(SlaEventKeys.RECORD_COUNT_KEY); String previousPublishTimestamp = workUnitState.getProp(SlaEventKeys.PREVIOUS_PUBLISH_TS_IN_MILLI_SECS_KEY); String dedupeStatus = workUnitState.getProp(SlaEventKeys.DEDUPE_STATUS_KEY); SlaEventSubmitter.builder().eventSubmitter(eventSubmitter).eventName(FILE_PUBLISHED_EVENT_NAME) .datasetUrn(datasetUrn).partition(partition).originTimestamp(Long.toString(cf.getOriginTimestamp())) .upstreamTimestamp(Long.toString(cf.getUpstreamTimestamp())).completenessPercentage(completenessPercentage) .recordCount(recordCount).previousPublishTimestamp(previousPublishTimestamp).dedupeStatus(dedupeStatus) .additionalMetadata(TARGET_PATH, cf.getDestination().toString()) .additionalMetadata(SOURCE_PATH, cf.getOrigin().getPath().toString()) .additionalMetadata(SIZE_IN_BYTES, Long.toString(cf.getOrigin().getLen())).build().submit(); } }
static void submitSuccessfulDatasetPublish(EventSubmitter eventSubmitter, CopyEntity.DatasetAndPartition datasetAndPartition, String originTimestamp, String upstreamTimestamp, Map<String, String> additionalMetadata) { SlaEventSubmitter.builder().eventSubmitter(eventSubmitter).eventName(DATASET_PUBLISHED_EVENT_NAME) .datasetUrn(datasetAndPartition.getDataset().getDatasetURN()).partition(datasetAndPartition.getPartition()) .originTimestamp(originTimestamp).upstreamTimestamp(upstreamTimestamp).additionalMetadata(additionalMetadata) .build().submit(); }