/** * Submit a failure sla event */ private void submitFailureSlaEvent(Dataset dataset, String eventName) { try { CompactionSlaEventHelper.getEventSubmitterBuilder(dataset, Optional.<Job> absent(), this.fs) .eventSubmitter(this.eventSubmitter).eventName(eventName).build().submit(); } catch (Throwable t) { LOG.warn("Failed to submit failure sla event:" + t, t); } } }
/** * Submit the sla event by calling {@link SlaEventSubmitter#EventSubmitter#submit()}. If * {@link SlaEventSubmitter#eventName}, {@link SlaEventSubmitter#eventSubmitter}, {@link SlaEventSubmitter#datasetUrn} * are not available the method is a no-op. */ public void submit() { try { Preconditions.checkArgument(Predicates.notNull().apply(eventSubmitter), "EventSubmitter needs to be set"); Preconditions.checkArgument(NOT_NULL_OR_EMPTY_PREDICATE.apply(eventName), "Eventname is required"); Preconditions.checkArgument(NOT_NULL_OR_EMPTY_PREDICATE.apply(datasetUrn), "DatasetUrn is required"); eventSubmitter.submit(eventName, buildEventMap()); } catch (IllegalArgumentException e) { log.info("Required arguments to submit an SLA event is not available. No Sla event will be submitted. " + e.toString()); } }
/** * Builds an EventMetadata {@link Map} from the {@link #SlaEventSubmitter}. The method filters out metadata by * applying {@link #NOT_NULL_OR_EMPTY_PREDICATE} * */ private Map<String, String> buildEventMap() { Map<String, String> eventMetadataMap = Maps.newHashMap(); eventMetadataMap.put(withoutPropertiesPrefix(SlaEventKeys.DATASET_URN_KEY), datasetUrn); eventMetadataMap.put(withoutPropertiesPrefix(SlaEventKeys.PARTITION_KEY), partition); eventMetadataMap.put(withoutPropertiesPrefix(SlaEventKeys.ORIGIN_TS_IN_MILLI_SECS_KEY), originTimestamp); eventMetadataMap.put(withoutPropertiesPrefix(SlaEventKeys.UPSTREAM_TS_IN_MILLI_SECS_KEY), upstreamTimestamp); eventMetadataMap.put(withoutPropertiesPrefix(SlaEventKeys.COMPLETENESS_PERCENTAGE_KEY), completenessPercentage); eventMetadataMap.put(withoutPropertiesPrefix(SlaEventKeys.RECORD_COUNT_KEY), recordCount); eventMetadataMap.put(withoutPropertiesPrefix(SlaEventKeys.PREVIOUS_PUBLISH_TS_IN_MILLI_SECS_KEY), previousPublishTimestamp); eventMetadataMap.put(withoutPropertiesPrefix(SlaEventKeys.DEDUPE_STATUS_KEY), dedupeStatus); eventMetadataMap.put(withoutPropertiesPrefix(SlaEventKeys.IS_FIRST_PUBLISH), isFirstPublish); if (additionalMetadata != null) { eventMetadataMap.putAll(additionalMetadata); } return Maps.newHashMap(Maps.filterValues(eventMetadataMap, NOT_NULL_OR_EMPTY_PREDICATE)); }
@Override protected HiveDataset computeNext() { while (this.tables.hasNext()) { DbAndTable dbAndTable = this.tables.next(); try (AutoReturnableObject<IMetaStoreClient> client = HiveDatasetFinder.this.clientPool.getClient()) { Table table = client.get().getTable(dbAndTable.getDb(), dbAndTable.getTable()); Config datasetConfig = getDatasetConfig(table); if (ConfigUtils.getBoolean(datasetConfig, HIVE_DATASET_IS_BLACKLISTED_KEY, DEFAULT_HIVE_DATASET_IS_BLACKLISTED_KEY)) { continue; } if (HiveDatasetFinder.this.eventSubmitter.isPresent()) { SlaEventSubmitter.builder().datasetUrn(dbAndTable.toString()) .eventSubmitter(HiveDatasetFinder.this.eventSubmitter.get()).eventName(DATASET_FOUND).build().submit(); } return createHiveDataset(table, datasetConfig); } catch (Throwable t) { log.error(String.format("Failed to create HiveDataset for table %s.%s", dbAndTable.getDb(), dbAndTable.getTable()), t); if (HiveDatasetFinder.this.eventSubmitter.isPresent()) { SlaEventSubmitter.builder().datasetUrn(dbAndTable.toString()) .eventSubmitter(HiveDatasetFinder.this.eventSubmitter.get()).eventName(DATASET_ERROR) .additionalMetadata(FAILURE_CONTEXT, t.toString()).build().submit(); } } } return endOfData(); } };
if (!wus.getPropAsBoolean(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY)) { try { new SlaEventSubmitter(eventSubmitter, EventConstants.CONVERSION_FAILED_EVENT, wus.getProperties()).submit(); } catch (Exception e) { log.error("Failed while emitting SLA event, but ignoring and moving forward to curate " + "all clean up comamnds", e); EventWorkunitUtils.setIsFirstPublishMetadata(wus); try { new SlaEventSubmitter(eventSubmitter, EventConstants.CONVERSION_SUCCESSFUL_SLA_EVENT, wus.getProperties()) .submit(); } catch (Exception e) { log.error("Failed while emitting SLA event, but ignoring and moving forward to curate " + "all clean up commands", e);
/** * Get an {@link SlaEventSubmitterBuilder} that has dataset urn, partition, record count, previous publish timestamp * and dedupe status set. * The caller MUST set eventSubmitter, eventname before submitting. */ public static SlaEventSubmitterBuilder getEventSubmitterBuilder(Dataset dataset, Optional<Job> job, FileSystem fs) { SlaEventSubmitterBuilder builder = SlaEventSubmitter.builder().datasetUrn(dataset.getUrn()) .partition(dataset.jobProps().getProp(MRCompactor.COMPACTION_JOB_DEST_PARTITION, "")) .dedupeStatus(getOutputDedupeStatus(dataset.jobProps())); long previousPublishTime = getPreviousPublishTime(dataset, fs); long upstreamTime = dataset.jobProps().getPropAsLong(SlaEventKeys.UPSTREAM_TS_IN_MILLI_SECS_KEY, -1l); long recordCount = getRecordCount(job); // Previous publish only exists when this is a recompact job if (previousPublishTime != -1l) { builder.previousPublishTimestamp(Long.toString(previousPublishTime)); } // Upstream time is the logical time represented by the compaction input directory if (upstreamTime != -1l) { builder.upstreamTimestamp(Long.toString(upstreamTime)); } if (recordCount != -1l) { builder.recordCount(Long.toString(recordCount)); } return builder; }
static void submitSuccessfulDatasetPublish(EventSubmitter eventSubmitter, CopyEntity.DatasetAndPartition datasetAndPartition, String originTimestamp, String upstreamTimestamp, Map<String, String> additionalMetadata) { SlaEventSubmitter.builder().eventSubmitter(eventSubmitter).eventName(DATASET_PUBLISHED_EVENT_NAME) .datasetUrn(datasetAndPartition.getDataset().getDatasetURN()).partition(datasetAndPartition.getPartition()) .originTimestamp(originTimestamp).upstreamTimestamp(upstreamTimestamp).additionalMetadata(additionalMetadata) .build().submit(); }
/** * Submit an sla event when a {@link gobblin.data.management.copy.CopyableFile} is published. The <code>workUnitState</code> passed should have the * required {@link SlaEventKeys} set. * * @see SlaEventSubmitter#submit() * * @param eventSubmitter * @param workUnitState */ static void submitSuccessfulFilePublish(EventSubmitter eventSubmitter, CopyableFile cf, WorkUnitState workUnitState) { String datasetUrn = workUnitState.getProp(SlaEventKeys.DATASET_URN_KEY); String partition = workUnitState.getProp(SlaEventKeys.PARTITION_KEY); String completenessPercentage = workUnitState.getProp(SlaEventKeys.COMPLETENESS_PERCENTAGE_KEY); String recordCount = workUnitState.getProp(SlaEventKeys.RECORD_COUNT_KEY); String previousPublishTimestamp = workUnitState.getProp(SlaEventKeys.PREVIOUS_PUBLISH_TS_IN_MILLI_SECS_KEY); String dedupeStatus = workUnitState.getProp(SlaEventKeys.DEDUPE_STATUS_KEY); SlaEventSubmitter.builder().eventSubmitter(eventSubmitter).eventName(FILE_PUBLISHED_EVENT_NAME) .datasetUrn(datasetUrn).partition(partition).originTimestamp(Long.toString(cf.getOriginTimestamp())) .upstreamTimestamp(Long.toString(cf.getUpstreamTimestamp())).completenessPercentage(completenessPercentage) .recordCount(recordCount).previousPublishTimestamp(previousPublishTimestamp).dedupeStatus(dedupeStatus) .additionalMetadata(TARGET_PATH, cf.getDestination().toString()) .additionalMetadata(SOURCE_PATH, cf.getOrigin().getPath().toString()) .additionalMetadata(SIZE_IN_BYTES, Long.toString(cf.getOrigin().getLen())).build().submit(); } }
/** * Submit an event when completeness verification is successful */ private void submitVerificationSuccessSlaEvent(Results.Result result) { try { CompactionSlaEventHelper.getEventSubmitterBuilder(result.dataset(), Optional.<Job> absent(), this.fs) .eventSubmitter(this.eventSubmitter).eventName(CompactionSlaEventHelper.COMPLETION_VERIFICATION_SUCCESS_EVENT_NAME) .additionalMetadata(Maps.transformValues(result.verificationContext(), Functions.toStringFunction())).build() .submit(); } catch (Throwable t) { LOG.warn("Failed to submit verification success event:" + t, t); } }
/** * Submit an event reporting late record counts and non-late record counts. */ private void submitRecordsCountsEvent() { long lateOutputRecordCount = this.datasetHelper.getLateOutputRecordCount(); long outputRecordCount = this.datasetHelper.getOutputRecordCount(); try { CompactionSlaEventHelper .getEventSubmitterBuilder(this.dataset, Optional.<Job> absent(), this.fs) .eventSubmitter(this.eventSubmitter) .eventName(CompactionSlaEventHelper.COMPACTION_RECORD_COUNT_EVENT) .additionalMetadata(CompactionSlaEventHelper.DATASET_OUTPUT_PATH, this.dataset.outputPath().toString()) .additionalMetadata( CompactionSlaEventHelper.LATE_RECORD_COUNT, Long.toString(lateOutputRecordCount)) .additionalMetadata( CompactionSlaEventHelper.REGULAR_RECORD_COUNT, Long.toString(outputRecordCount)) .additionalMetadata(CompactionSlaEventHelper.NEED_RECOMPACT, Boolean.toString(this.dataset.needToRecompact())) .build().submit(); } catch (Throwable e) { LOG.warn("Failed to submit late event count:" + e, e); } } }
/** * Submit an event when compaction MR job completes */ private void submitSlaEvent(Job job) { try { CompactionSlaEventHelper .getEventSubmitterBuilder(this.dataset, Optional.of(job), this.fs) .eventSubmitter(this.eventSubmitter) .eventName(CompactionSlaEventHelper.COMPACTION_COMPLETED_EVENT_NAME) .additionalMetadata( CompactionSlaEventHelper.LATE_RECORD_COUNT, Long.toString(this.lateOutputRecordCountProvider.getRecordCount(this.getApplicableFilePaths(this.dataset .outputLatePath())))) .additionalMetadata( CompactionSlaEventHelper.REGULAR_RECORD_COUNT, Long.toString(this.outputRecordCountProvider.getRecordCount(this.getApplicableFilePaths(this.dataset .outputPath())))) .additionalMetadata(CompactionSlaEventHelper.RECOMPATED_METADATA_NAME, Boolean.toString(this.dataset.needToRecompact())).build().submit(); } catch (Throwable e) { LOG.warn("Failed to submit compcation completed event:" + e, e); } }