Codota Logo
SlaEventSubmitter.submit
Code IndexAdd Codota to your IDE (free)

How to use
submit
method
in
gobblin.metrics.event.sla.SlaEventSubmitter

Best Java code snippets using gobblin.metrics.event.sla.SlaEventSubmitter.submit (Showing top 8 results out of 315)

  • Add the Codota plugin to your IDE and get smart completions
private void myMethod () {
BufferedReader b =
  • Codota IconInputStream in;new BufferedReader(new InputStreamReader(in))
  • Codota IconReader in;new BufferedReader(in)
  • Codota IconFile file;new BufferedReader(new FileReader(file))
  • Smart code suggestions by Codota
}
origin: com.linkedin.gobblin/gobblin-compaction

 /**
  * Submit a failure sla event
  */
 private void submitFailureSlaEvent(Dataset dataset, String eventName) {
  try {
   CompactionSlaEventHelper.getEventSubmitterBuilder(dataset, Optional.<Job> absent(), this.fs)
   .eventSubmitter(this.eventSubmitter).eventName(eventName).build().submit();
  } catch (Throwable t) {
   LOG.warn("Failed to submit failure sla event:" + t, t);
  }
 }
}
origin: com.linkedin.gobblin/gobblin-compaction

/**
 * Submit an event when completeness verification is successful
 */
private void submitVerificationSuccessSlaEvent(Results.Result result) {
 try {
  CompactionSlaEventHelper.getEventSubmitterBuilder(result.dataset(), Optional.<Job> absent(), this.fs)
  .eventSubmitter(this.eventSubmitter).eventName(CompactionSlaEventHelper.COMPLETION_VERIFICATION_SUCCESS_EVENT_NAME)
  .additionalMetadata(Maps.transformValues(result.verificationContext(), Functions.toStringFunction())).build()
  .submit();
 } catch (Throwable t) {
  LOG.warn("Failed to submit verification success event:" + t, t);
 }
}
origin: com.linkedin.gobblin/gobblin-compaction

 /**
  * Submit an event reporting late record counts and non-late record counts.
  */
 private void submitRecordsCountsEvent() {
  long lateOutputRecordCount = this.datasetHelper.getLateOutputRecordCount();
  long outputRecordCount = this.datasetHelper.getOutputRecordCount();

  try {
   CompactionSlaEventHelper
     .getEventSubmitterBuilder(this.dataset, Optional.<Job> absent(), this.fs)
     .eventSubmitter(this.eventSubmitter)
     .eventName(CompactionSlaEventHelper.COMPACTION_RECORD_COUNT_EVENT)
     .additionalMetadata(CompactionSlaEventHelper.DATASET_OUTPUT_PATH, this.dataset.outputPath().toString())
     .additionalMetadata(
       CompactionSlaEventHelper.LATE_RECORD_COUNT,
       Long.toString(lateOutputRecordCount))
     .additionalMetadata(
       CompactionSlaEventHelper.REGULAR_RECORD_COUNT,
       Long.toString(outputRecordCount))
     .additionalMetadata(CompactionSlaEventHelper.NEED_RECOMPACT, Boolean.toString(this.dataset.needToRecompact()))
     .build().submit();
  } catch (Throwable e) {
   LOG.warn("Failed to submit late event count:" + e, e);
  }
 }
}
origin: com.linkedin.gobblin/gobblin-compaction

/**
 * Submit an event when compaction MR job completes
 */
private void submitSlaEvent(Job job) {
 try {
  CompactionSlaEventHelper
    .getEventSubmitterBuilder(this.dataset, Optional.of(job), this.fs)
    .eventSubmitter(this.eventSubmitter)
    .eventName(CompactionSlaEventHelper.COMPACTION_COMPLETED_EVENT_NAME)
    .additionalMetadata(
      CompactionSlaEventHelper.LATE_RECORD_COUNT,
      Long.toString(this.lateOutputRecordCountProvider.getRecordCount(this.getApplicableFilePaths(this.dataset
        .outputLatePath()))))
    .additionalMetadata(
      CompactionSlaEventHelper.REGULAR_RECORD_COUNT,
      Long.toString(this.outputRecordCountProvider.getRecordCount(this.getApplicableFilePaths(this.dataset
        .outputPath()))))
    .additionalMetadata(CompactionSlaEventHelper.RECOMPATED_METADATA_NAME,
      Boolean.toString(this.dataset.needToRecompact())).build().submit();
 } catch (Throwable e) {
  LOG.warn("Failed to submit compcation completed event:" + e, e);
 }
}
origin: com.linkedin.gobblin/gobblin-data-management

 @Override
 protected HiveDataset computeNext() {
  while (this.tables.hasNext()) {
   DbAndTable dbAndTable = this.tables.next();
   try (AutoReturnableObject<IMetaStoreClient> client = HiveDatasetFinder.this.clientPool.getClient()) {
    Table table = client.get().getTable(dbAndTable.getDb(), dbAndTable.getTable());
    Config datasetConfig = getDatasetConfig(table);
    if (ConfigUtils.getBoolean(datasetConfig, HIVE_DATASET_IS_BLACKLISTED_KEY, DEFAULT_HIVE_DATASET_IS_BLACKLISTED_KEY)) {
     continue;
    }
    if (HiveDatasetFinder.this.eventSubmitter.isPresent()) {
     SlaEventSubmitter.builder().datasetUrn(dbAndTable.toString())
     .eventSubmitter(HiveDatasetFinder.this.eventSubmitter.get()).eventName(DATASET_FOUND).build().submit();
    }
    return createHiveDataset(table, datasetConfig);
   } catch (Throwable t) {
    log.error(String.format("Failed to create HiveDataset for table %s.%s", dbAndTable.getDb(), dbAndTable.getTable()), t);
    if (HiveDatasetFinder.this.eventSubmitter.isPresent()) {
     SlaEventSubmitter.builder().datasetUrn(dbAndTable.toString())
       .eventSubmitter(HiveDatasetFinder.this.eventSubmitter.get()).eventName(DATASET_ERROR)
       .additionalMetadata(FAILURE_CONTEXT, t.toString()).build().submit();
    }
   }
  }
  return endOfData();
 }
};
origin: com.linkedin.gobblin/gobblin-data-management

if (!wus.getPropAsBoolean(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY)) {
 try {
  new SlaEventSubmitter(eventSubmitter, EventConstants.CONVERSION_FAILED_EVENT, wus.getProperties()).submit();
 } catch (Exception e) {
  log.error("Failed while emitting SLA event, but ignoring and moving forward to curate " + "all clean up comamnds", e);
 try {
  new SlaEventSubmitter(eventSubmitter, EventConstants.CONVERSION_SUCCESSFUL_SLA_EVENT, wus.getProperties())
    .submit();
 } catch (Exception e) {
  log.error("Failed while emitting SLA event, but ignoring and moving forward to curate " + "all clean up commands", e);
origin: com.linkedin.gobblin/gobblin-data-management

 /**
  * Submit an sla event when a {@link gobblin.data.management.copy.CopyableFile} is published. The <code>workUnitState</code> passed should have the
  * required {@link SlaEventKeys} set.
  *
  * @see SlaEventSubmitter#submit()
  *
  * @param eventSubmitter
  * @param workUnitState
  */
 static void submitSuccessfulFilePublish(EventSubmitter eventSubmitter, CopyableFile cf, WorkUnitState workUnitState) {
  String datasetUrn = workUnitState.getProp(SlaEventKeys.DATASET_URN_KEY);
  String partition = workUnitState.getProp(SlaEventKeys.PARTITION_KEY);
  String completenessPercentage = workUnitState.getProp(SlaEventKeys.COMPLETENESS_PERCENTAGE_KEY);
  String recordCount = workUnitState.getProp(SlaEventKeys.RECORD_COUNT_KEY);
  String previousPublishTimestamp = workUnitState.getProp(SlaEventKeys.PREVIOUS_PUBLISH_TS_IN_MILLI_SECS_KEY);
  String dedupeStatus = workUnitState.getProp(SlaEventKeys.DEDUPE_STATUS_KEY);
  SlaEventSubmitter.builder().eventSubmitter(eventSubmitter).eventName(FILE_PUBLISHED_EVENT_NAME)
    .datasetUrn(datasetUrn).partition(partition).originTimestamp(Long.toString(cf.getOriginTimestamp()))
    .upstreamTimestamp(Long.toString(cf.getUpstreamTimestamp())).completenessPercentage(completenessPercentage)
    .recordCount(recordCount).previousPublishTimestamp(previousPublishTimestamp).dedupeStatus(dedupeStatus)
    .additionalMetadata(TARGET_PATH, cf.getDestination().toString())
    .additionalMetadata(SOURCE_PATH, cf.getOrigin().getPath().toString())
    .additionalMetadata(SIZE_IN_BYTES, Long.toString(cf.getOrigin().getLen())).build().submit();
 }
}
origin: com.linkedin.gobblin/gobblin-data-management

static void submitSuccessfulDatasetPublish(EventSubmitter eventSubmitter,
  CopyEntity.DatasetAndPartition datasetAndPartition, String originTimestamp, String upstreamTimestamp,
  Map<String, String> additionalMetadata) {
   SlaEventSubmitter.builder().eventSubmitter(eventSubmitter).eventName(DATASET_PUBLISHED_EVENT_NAME)
   .datasetUrn(datasetAndPartition.getDataset().getDatasetURN()).partition(datasetAndPartition.getPartition())
     .originTimestamp(originTimestamp).upstreamTimestamp(upstreamTimestamp).additionalMetadata(additionalMetadata)
     .build().submit();
}
gobblin.metrics.event.slaSlaEventSubmittersubmit

Javadoc

Submit the sla event by calling SlaEventSubmitter#EventSubmitter#submit(). If SlaEventSubmitter#eventName, SlaEventSubmitter#eventSubmitter, SlaEventSubmitter#datasetUrnare not available the method is a no-op.

Popular methods of SlaEventSubmitter

  • builder
  • <init>
    Construct an SlaEventSubmitter by extracting Sla event metadata from the properties. See SlaEventKey
  • buildEventMap
    Builds an EventMetadata Map from the #SlaEventSubmitter. The method filters out metadata by applying
  • withoutPropertiesPrefix
    SlaEventKeys have a prefix of SlaEventKeys#EVENT_GOBBLIN_STATE_PREFIX to keep properties organized i

Popular in Java

  • Reactive rest calls using spring rest template
  • scheduleAtFixedRate (Timer)
  • onRequestPermissionsResult (Fragment)
  • findViewById (Activity)
  • Rectangle (java.awt)
    A Rectangle specifies an area in a coordinate space that is enclosed by the Rectangle object's top-
  • FileNotFoundException (java.io)
    Thrown when a file specified by a program cannot be found.
  • Proxy (java.net)
    This class represents proxy server settings. A created instance of Proxy stores a type and an addres
  • HashMap (java.util)
    HashMap is an implementation of Map. All optional operations are supported.All elements are permitte
  • ExecutorService (java.util.concurrent)
    An Executor that provides methods to manage termination and methods that can produce a Future for tr
  • ZipFile (java.util.zip)
    This class provides random read access to a zip file. You pay more to read the zip file's central di
Codota Logo
  • Products

    Search for Java codeSearch for JavaScript codeEnterprise
  • IDE Plugins

    IntelliJ IDEAWebStormAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogCodota Academy Plugin user guide Terms of usePrivacy policyJava Code IndexJavascript Code Index
Get Codota for your IDE now