static void submitFailedDatasetPublish(EventSubmitter eventSubmitter, CopyEntity.DatasetAndPartition datasetAndPartition) { eventSubmitter.submit(DATASET_PUBLISHED_FAILED_EVENT_NAME, ImmutableMap.of(DATASET_ROOT_METADATA_NAME, datasetAndPartition.getDataset().getDatasetURN())); }
@Override public void publishData(Collection<? extends WorkUnitState> states) throws IOException { /* * This mapping is used to set WorkingState of all {@link WorkUnitState}s to {@link * WorkUnitState.WorkingState#COMMITTED} after a {@link CopyableDataset} is successfully published */ Multimap<CopyEntity.DatasetAndPartition, WorkUnitState> datasets = groupByFileSet(states); boolean allDatasetsPublished = true; for (CopyEntity.DatasetAndPartition datasetAndPartition : datasets.keySet()) { try { this.publishFileSet(datasetAndPartition, datasets.get(datasetAndPartition)); } catch (Throwable e) { CopyEventSubmitterHelper.submitFailedDatasetPublish(this.eventSubmitter, datasetAndPartition); log.error("Failed to publish " + datasetAndPartition.getDataset().getDatasetURN(), e); allDatasetsPublished = false; } } if (!allDatasetsPublished) { throw new IOException("Not all datasets published successfully"); } }
datasetWriterOutputPath, metadata.getDatasetURN()));
@Test public void testSerializeDeserialize() throws Exception { CopyableDataset copyableDataset = new TestCopyableDataset(); Path target = new Path("/target"); CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(copyableDataset); String serialized = metadata.serialize(); CopyableDatasetMetadata deserialized = CopyableDatasetMetadata.deserialize(serialized); Assert.assertEquals(copyableDataset.datasetURN(), deserialized.getDatasetURN()); }
static void submitSuccessfulDatasetPublish(EventSubmitter eventSubmitter, CopyEntity.DatasetAndPartition datasetAndPartition, String originTimestamp, String upstreamTimestamp, Map<String, String> additionalMetadata) { SlaEventSubmitter.builder().eventSubmitter(eventSubmitter).eventName(DATASET_PUBLISHED_EVENT_NAME) .datasetUrn(datasetAndPartition.getDataset().getDatasetURN()).partition(datasetAndPartition.getPartition()) .originTimestamp(originTimestamp).upstreamTimestamp(upstreamTimestamp).additionalMetadata(additionalMetadata) .build().submit(); }
static void submitFailedDatasetPublish(EventSubmitter eventSubmitter, CopyEntity.DatasetAndPartition datasetAndPartition) { eventSubmitter.submit(DATASET_PUBLISHED_FAILED_EVENT_NAME, ImmutableMap.of(DATASET_ROOT_METADATA_NAME, datasetAndPartition.getDataset().getDatasetURN())); }
@Override public void publishData(Collection<? extends WorkUnitState> states) throws IOException { /* * This mapping is used to set WorkingState of all {@link WorkUnitState}s to {@link * WorkUnitState.WorkingState#COMMITTED} after a {@link CopyableDataset} is successfully published */ Multimap<CopyEntity.DatasetAndPartition, WorkUnitState> datasets = groupByFileSet(states); boolean allDatasetsPublished = true; for (CopyEntity.DatasetAndPartition datasetAndPartition : datasets.keySet()) { try { this.publishFileSet(datasetAndPartition, datasets.get(datasetAndPartition)); } catch (Throwable e) { CopyEventSubmitterHelper.submitFailedDatasetPublish(this.eventSubmitter, datasetAndPartition); log.error("Failed to publish " + datasetAndPartition.getDataset().getDatasetURN(), e); allDatasetsPublished = false; } } if (!allDatasetsPublished) { throw new IOException("Not all datasets published successfully"); } }
datasetWriterOutputPath, metadata.getDatasetURN()));
static void submitSuccessfulDatasetPublish(EventSubmitter eventSubmitter, CopyEntity.DatasetAndPartition datasetAndPartition, String originTimestamp, String upstreamTimestamp, Map<String, String> additionalMetadata) { SlaEventSubmitter.builder().eventSubmitter(eventSubmitter).eventName(DATASET_PUBLISHED_EVENT_NAME) .datasetUrn(datasetAndPartition.getDataset().getDatasetURN()).partition(datasetAndPartition.getPartition()) .originTimestamp(originTimestamp).upstreamTimestamp(upstreamTimestamp).additionalMetadata(additionalMetadata) .build().submit(); }