default String getStorageDir(DataSegment dataSegment, boolean useUniquePath) { return getDefaultStorageDir(dataSegment, useUniquePath); }
/** * @deprecated backward-compatibiliy shim that should be removed on next major release; * use {@link #getStorageDir(DataSegment, boolean)} instead. */ @Deprecated default String getStorageDir(DataSegment dataSegment) { return getStorageDir(dataSegment, false); }
this.allowedHadoopPrefix.add("druid.storage"); this.allowedHadoopPrefix.add("druid.javascript"); this.allowedHadoopPrefix.addAll(DATA_SEGMENT_PUSHER.getAllowedPropertyPrefixesForHadoop()); this.allowedHadoopPrefix.addAll(spec.getTuningConfig().getUserAllowedHadoopPrefix());
@Override public String getStorageDir(DataSegment dataSegment, boolean useUniquePath) { String seg = JOINER.join( dataSegment.getDataSource(), StringUtils.format( "%s_%s", // Use ISODateTimeFormat.basicDateTime() format, to avoid using colons in file path. dataSegment.getInterval().getStart().toString(ISODateTimeFormat.basicDateTime()), dataSegment.getInterval().getEnd().toString(ISODateTimeFormat.basicDateTime()) ), dataSegment.getVersion().replace(':', '_'), dataSegment.getShardSpec().getPartitionNum(), useUniquePath ? DataSegmentPusher.generateUniquePath() : null ); log.info("DataSegment: [%s]", seg); // Replace colons with underscores, since they are not supported through wasb:// prefix return seg; }
.withBinaryVersion(SegmentUtils.getVersionFromDir(fileToUpload)); dataSegmentPusher.push(fileToUpload, segmentToUpload, false);
public static Path makeFileNamePath( final Path basePath, final FileSystem fs, final DataSegment segmentTemplate, final String baseFileName, DataSegmentPusher dataSegmentPusher ) { return new Path( prependFSIfNullScheme(fs, basePath), dataSegmentPusher.makeIndexPathName(segmentTemplate, baseFileName) ); }
toolbox.getObjectMapper().writeValueAsString(spec), toolbox.getConfig().getHadoopWorkingPath(), toolbox.getSegmentPusher().getPathForHadoop(), hadoopJobIdFile };
static String getDefaultStorageDir(DataSegment segment, boolean useUniquePath) { return JOINER.join( segment.getDataSource(), StringUtils.format("%s_%s", segment.getInterval().getStart(), segment.getInterval().getEnd()), segment.getVersion(), segment.getShardSpec().getPartitionNum(), useUniquePath ? generateUniquePath() : null ); }
final DataSegment uploadedSegment = toolbox.getSegmentPusher().push(fileToUpload, mergedSegment, false);
public static Path makeTmpPath( final Path basePath, final FileSystem fs, final DataSegment segmentTemplate, final TaskAttemptID taskAttemptID, DataSegmentPusher dataSegmentPusher ) { return new Path( prependFSIfNullScheme(fs, basePath), StringUtils.format( "./%s.%d", dataSegmentPusher.makeIndexPathName(segmentTemplate, JobHelper.INDEX_ZIP), taskAttemptID.getId() ) ); }
toolbox.getObjectMapper().writeValueAsString(spec), toolbox.getConfig().getHadoopWorkingPath(), toolbox.getSegmentPusher().getPathForHadoop() };
private StorageLocation findStorageLocationIfLoaded(final DataSegment segment) { for (StorageLocation location : getSortedList(locations)) { File localStorageDir = new File(location.getPath(), DataSegmentPusher.getDefaultStorageDir(segment, false)); if (localStorageDir.exists()) { return location; } } return null; }
final String uniquePrefix = useUniquePath ? DataSegmentPusher.generateUniquePath() + "_" : ""; final Path outIndexFile = new Path(StringUtils.format( "%s/%s/%d_%sindex.zip",
() -> dataSegmentPusher.push( mergedFile, sink.getSegment().withDimensions(IndexMerger.getMergedDimensionsFromQueryableIndexes(indexes)),
default String makeIndexPathName(DataSegment dataSegment, String indexName) { // This is only called from Hadoop batch which doesn't require unique segment paths so set useUniquePath=false return StringUtils.format("./%s/%s", getStorageDir(dataSegment, false), indexName); }
public static Path makeFileNamePath( final Path basePath, final FileSystem fs, final DataSegment segmentTemplate, final String baseFileName, DataSegmentPusher dataSegmentPusher ) { return new Path( prependFSIfNullScheme(fs, basePath), dataSegmentPusher.makeIndexPathName(segmentTemplate, baseFileName) ); }
this.allowedHadoopPrefix.add("druid.storage"); this.allowedHadoopPrefix.add("druid.javascript"); this.allowedHadoopPrefix.addAll(DATA_SEGMENT_PUSHER.getAllowedPropertyPrefixesForHadoop()); this.allowedHadoopPrefix.addAll(spec.getTuningConfig().getUserAllowedHadoopPrefix());
@Override public File getSegmentFiles(DataSegment segment) throws SegmentLoadingException { StorageLocation loc = findStorageLocationIfLoaded(segment); String storageDir = DataSegmentPusher.getDefaultStorageDir(segment, false); if (loc == null) { loc = loadSegmentWithRetry(segment, storageDir); } loc.addSegment(segment); return new File(loc.getPath(), storageDir); }
static String getDefaultStorageDir(DataSegment segment, boolean useUniquePath) { return JOINER.join( segment.getDataSource(), StringUtils.format("%s_%s", segment.getInterval().getStart(), segment.getInterval().getEnd()), segment.getVersion(), segment.getShardSpec().getPartitionNum(), useUniquePath ? generateUniquePath() : null ); }
DataSegment segment = dataSegmentPusher.push( mergedFile, sink.getSegment().withDimensions(IndexMerger.getMergedDimensionsFromQueryableIndexes(indexes)),