/** * readCommittedScope provide Read Snapshot isolation. * @param segmentId * @param readCommittedScope * @return */ public static Segment toSegment(String segmentId, ReadCommittedScope readCommittedScope) { // SegmentId can be combination of segmentNo and segmentFileName. String[] split = segmentId.split("#"); if (split.length > 1) { return new Segment(split[0], split[1], readCommittedScope); } else if (split.length > 0) { return new Segment(split[0], null, readCommittedScope); } return new Segment(segmentId, null, readCommittedScope); }
/** * * @param segmentBlockCount * @return */ public static List<Segment> getListOfSegmentsToMarkDeleted(Map<String, Long> segmentBlockCount) { List<Segment> segmentsToBeDeleted = new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE); for (Map.Entry<String, Long> eachSeg : segmentBlockCount.entrySet()) { if (eachSeg.getValue() == 0) { segmentsToBeDeleted.add(new Segment(eachSeg.getKey(), "")); } } return segmentsToBeDeleted; }
@Override public synchronized void clear() { if (segmentMap.size() > 0) { for (String segmentId : segmentMap.keySet().toArray(new String[segmentMap.size()])) { clear(new Segment(segmentId, null, null)); } } }
@Override public synchronized void clear() { if (segmentMap.size() > 0) { List<String> segments = new ArrayList<>(segmentMap.keySet()); for (String segmentId : segments) { clear(new Segment(segmentId, null, null)); } } }
public static Long addDataIndexSizeIntoMetaEntry(LoadMetadataDetails loadMetadataDetails, String segmentId, CarbonTable carbonTable) throws IOException { Map<String, Long> dataIndexSize = CarbonUtil.getDataSizeAndIndexSize( carbonTable.getTablePath(), new Segment(segmentId, loadMetadataDetails.getSegmentFile())); Long dataSize = dataIndexSize.get(CarbonCommonConstants.CARBON_TOTAL_DATA_SIZE); loadMetadataDetails.setDataSize(String.valueOf(dataSize)); Long indexSize = dataIndexSize.get(CarbonCommonConstants.CARBON_TOTAL_INDEX_SIZE); loadMetadataDetails.setIndexSize(String.valueOf(indexSize)); return dataSize + indexSize; }
/** * After updating table status file clear the dataMap cache for all segmentId's on which * dataMap is being created because flows like merge index file creation involves modification of * segment file and once segment file is modified the cache for that segment need to be cleared * otherwise the old cache will be used which is stale * * @param carbonTable * @param segmentId */ public static void clearBlockDataMapCache(CarbonTable carbonTable, String segmentId) { TableDataMap defaultDataMap = DataMapStoreManager.getInstance().getDefaultDataMap(carbonTable); Segment segment = new Segment(segmentId); List<Segment> segments = new ArrayList<>(); segments.add(segment); LOGGER.info( "clearing cache while updating segment file entry in table status file for segmentId: " + segmentId); defaultDataMap.clear(segments); }
/** * Get the segment object corresponding to segmentNo * @param segmentNo * @param loadMetadataDetails * @return */ public static Segment getSegment(String segmentNo, LoadMetadataDetails[] loadMetadataDetails) { for (LoadMetadataDetails details: loadMetadataDetails) { if (details.getLoadName().equals(segmentNo)) { return new Segment(details.getLoadName(), details.getSegmentFile()); } } return null; }
/** * Register a DataMapWriter */ private void register(DataMapFactory factory, String segmentId, String taskNo, SegmentProperties segmentProperties) { assert (factory != null); assert (segmentId != null); DataMapMeta meta = factory.getMeta(); if (meta == null) { // if data map does not have meta, no need to register return; } List<CarbonColumn> columns = factory.getMeta().getIndexedColumns(); List<DataMapWriter> writers = registry.get(columns); DataMapWriter writer = null; try { writer = factory.createWriter(new Segment(segmentId), taskNo, segmentProperties); } catch (IOException e) { LOG.error("Failed to create DataMapWriter: " + e.getMessage()); throw new DataMapWriterException(e); } if (writers != null) { writers.add(writer); } else { writers = new ArrayList<>(); writers.add(writer); registry.put(columns, writers); } LOG.info("DataMapWriter " + writer + " added"); }
Segment segment = Segment.getSegment(targetSegment, carbonTable.getTablePath()); segmentList.add( new Segment(segment.getSegmentNo(), segment.getSegmentFileName(), readCommittedScope)); setSegmentsToAccess(job.getConfiguration(), segmentList);
segmentUpdateStatusManager.getDeleteDeltaFilesList(new Segment(seg), blockName);
Segment seg = new Segment(segment.getMergedLoadName(), segment.getSegmentFile(), readCommittedScope, segment); if (!listOfValidSegments.contains(seg)) { new Segment(segment.getLoadName(), segment.getSegmentFile(), readCommittedScope)); new Segment(segment.getLoadName(), segment.getSegmentFile(), readCommittedScope)); continue; new Segment(segment.getLoadName(), segment.getSegmentFile(), readCommittedScope, segment)); } else if ((SegmentStatus.LOAD_FAILURE == segment.getSegmentStatus() || SegmentStatus.COMPACTED == segment.getSegmentStatus() || SegmentStatus.MARKED_FOR_DELETE == segment.getSegmentStatus())) { listOfInvalidSegments.add(new Segment(segment.getLoadName(), segment.getSegmentFile())); } else if (SegmentStatus.INSERT_IN_PROGRESS == segment.getSegmentStatus() || SegmentStatus.INSERT_OVERWRITE_IN_PROGRESS == segment.getSegmentStatus()) { listOfInProgressSegments.add( new Segment(segment.getLoadName(), segment.getSegmentFile(), readCommittedScope));
/** * method to identify the segments qualified for merging in case of IUD Compaction. * * @param segments * @param carbonLoadModel * @return */ private static List<LoadMetadataDetails> identifySegmentsToBeMergedBasedOnIUD( List<LoadMetadataDetails> segments, CarbonLoadModel carbonLoadModel) { List<LoadMetadataDetails> validSegments = new ArrayList<>(segments.size()); AbsoluteTableIdentifier absoluteTableIdentifier = carbonLoadModel.getCarbonDataLoadSchema().getCarbonTable().getAbsoluteTableIdentifier(); int numberUpdateDeltaFilesThreshold = CarbonProperties.getInstance().getNoUpdateDeltaFilesThresholdForIUDCompaction(); for (LoadMetadataDetails seg : segments) { if ((isSegmentValid(seg)) && checkUpdateDeltaFilesInSeg( new Segment(seg.getLoadName(), seg.getSegmentFile()), absoluteTableIdentifier, carbonLoadModel.getSegmentUpdateStatusManager(), numberUpdateDeltaFilesThreshold)) { validSegments.add(seg); } } return validSegments; }
} else { sizeOfOneSegmentAcrossPartition = CarbonUtil.getSizeOfSegment(carbonTable.getTablePath(), new Segment(segId, segment.getSegmentFile()));
seg = new Segment("null", null, readCommittedScope); externalTableSegments.add(seg); } else { LoadMetadataDetails[] loadMetadataDetails = readCommittedScope.getSegmentList(); for (LoadMetadataDetails load : loadMetadataDetails) { seg = new Segment(load.getLoadName(), null, readCommittedScope); externalTableSegments.add(seg);
if (oneLoad.getSegmentFile() != null) { SegmentFileStore.deleteSegment(carbonTable.getAbsoluteTableIdentifier().getTablePath(), new Segment(oneLoad.getLoadName(), oneLoad.getSegmentFile()), specs, updateStatusManager); } else { for (TableDataMap dataMap : indexDataMaps) { segments.clear(); segments.add(new Segment(oneLoad.getLoadName())); dataMap.deleteDatamapData(segments);
if (segmentsToBeDeleted.contains(new Segment(loadMetadata.getLoadName()))) { loadMetadata.setSegmentStatus(SegmentStatus.MARKED_FOR_DELETE); loadMetadata.setModificationOrdeletionTimesStamp(Long.parseLong(updatedTimeStamp));
if (segmentsToBeDeleted.contains(new Segment(detail.getLoadName()))) { detail.setSegmentStatus(SegmentStatus.MARKED_FOR_DELETE); } else if (segmentFilesTobeUpdated
String file = SegmentFileStore.writeSegmentFile(table, segment.getSegmentNo(), UUID); segmentFilesToBeUpdatedLatest.add(new Segment(segment.getSegmentNo(), file));