@Override public BloomCacheKeyValue.CacheValue getIfPresent(BloomCacheKeyValue.CacheKey key) { return (BloomCacheKeyValue.CacheValue) lruCache.get(key.toString()); }
@Override public void invalidate(BloomCacheKeyValue.CacheKey key) { lruCache.remove(key.toString()); }
/** * This method will create the lru cache instance based on the given type * */ private void createLRULevelCacheInstance() { boolean isDriver = Boolean.parseBoolean(CarbonProperties.getInstance() .getProperty(CarbonCommonConstants.IS_DRIVER_INSTANCE, CarbonCommonConstants.IS_DRIVER_INSTANCE_DEFAULT)); if (isDriver) { carbonLRUCache = new CarbonLRUCache(CarbonCommonConstants.CARBON_MAX_DRIVER_LRU_CACHE_SIZE, CarbonCommonConstants.CARBON_MAX_LRU_CACHE_SIZE_DEFAULT); } else { // if executor cache size is not configured then driver cache conf will be used String executorCacheSize = CarbonProperties.getInstance() .getProperty(CarbonCommonConstants.CARBON_MAX_EXECUTOR_LRU_CACHE_SIZE); if (null != executorCacheSize) { carbonLRUCache = new CarbonLRUCache(CarbonCommonConstants.CARBON_MAX_EXECUTOR_LRU_CACHE_SIZE, CarbonCommonConstants.CARBON_MAX_LRU_CACHE_SIZE_DEFAULT); } else { LOGGER.info( "Executor LRU cache size not configured. Initializing with driver LRU cache size."); carbonLRUCache = new CarbonLRUCache(CarbonCommonConstants.CARBON_MAX_DRIVER_LRU_CACHE_SIZE, CarbonCommonConstants.CARBON_MAX_LRU_CACHE_SIZE_DEFAULT); } } }
/** * This method will check a required column can be loaded into memory or not. If required * this method will call for eviction of existing data from memory * * @param requiredSize * @return */ private boolean freeMemorySizeForAddingCache(long requiredSize) { boolean memoryAvailable = false; if (isSizeAvailableToLoadColumnDictionary(requiredSize)) { memoryAvailable = true; } else { // get the keys that can be removed from memory List<String> keysToBeRemoved = getKeysToBeRemoved(requiredSize); for (String cacheKey : keysToBeRemoved) { removeKey(cacheKey); } // after removing the keys check again if required size is available if (isSizeAvailableToLoadColumnDictionary(requiredSize)) { memoryAvailable = true; } } return memoryAvailable; }
if (isLRUCacheSizeConfigured()) { synchronized (lruCacheMap) { if (freeMemorySizeForAddingCache(requiredSize)) { currentSize = currentSize + requiredSize; addEntryToLRUCacheMap(columnIdentifier, cacheInfo); columnKeyAddedSuccessfully = true; } else { addEntryToLRUCacheMap(columnIdentifier, cacheInfo);
/** * This method will check if required size is available in the memory * @param columnIdentifier * @param requiredSize * @return */ public boolean tryPut(String columnIdentifier, long requiredSize) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("checking Required size for entry " + columnIdentifier + " :: " + requiredSize + " Current cache size :: " + currentSize); } boolean columnKeyCanBeAdded = false; if (isLRUCacheSizeConfigured()) { synchronized (lruCacheMap) { if (freeMemorySizeForAddingCache(requiredSize)) { columnKeyCanBeAdded = true; } else { LOGGER.error( "Size check failed.Size not available. Entry cannot be added to lru cache :: " + columnIdentifier + " .Required Size = " + requiredSize + " Size available " + ( lruCacheMemorySize - currentSize)); } } } else { columnKeyCanBeAdded = true; } return columnKeyCanBeAdded; }
String lruCacheKey = identifier.getUniqueTableSegmentIdentifier(); BlockletDataMapIndexWrapper blockletDataMapIndexWrapper = (BlockletDataMapIndexWrapper) lruCache.get(lruCacheKey); List<BlockDataMap> dataMaps = new ArrayList<>(); if (blockletDataMapIndexWrapper == null) { lruCache.put(identifier.getUniqueTableSegmentIdentifier(), blockletDataMapIndexWrapper, blockletDataMapIndexWrapper.getMemorySize());
dictionaryInfo.setMemorySize(requiredSize); boolean colCanBeAddedToLRUCache = carbonLRUCache.tryPut(lruCacheKey, requiredSize); long updateRequiredSize = ObjectSizeCalculator.estimate(dictionaryInfo, requiredSize); dictionaryInfo.setMemorySize(updateRequiredSize); if (!carbonLRUCache.put(lruCacheKey, dictionaryInfo, updateRequiredSize)) { throw new DictionaryBuilderException( "Cannot load dictionary into memory. Not enough memory available");
@Override public BloomCacheKeyValue.CacheValue get(BloomCacheKeyValue.CacheKey key) throws IOException { BloomCacheKeyValue.CacheValue cacheValue = getIfPresent(key); if (cacheValue == null) { List<CarbonBloomFilter> bloomFilters = BloomIndexFileStore.loadBloomFilterFromFile(key.getShardPath(), key.getIndexColumn()); cacheValue = new BloomCacheKeyValue.CacheValue(bloomFilters); lruCache.put(key.toString(), cacheValue, cacheValue.getMemorySize()); } return cacheValue; }
/** * Below method will be used to clear the cache */ public void dropAllCache() { if (null != carbonLRUCache) { carbonLRUCache.clear(); carbonLRUCache = null; } cacheTypeToCacheMap.clear(); } }
/** * @param propertyName property name to take the size configured * @param defaultPropertyName default property in case size is not configured */ public CarbonLRUCache(String propertyName, String defaultPropertyName) { try { lruCacheMemorySize = Integer .parseInt(CarbonProperties.getInstance().getProperty(propertyName, defaultPropertyName)); } catch (NumberFormatException e) { lruCacheMemorySize = Integer.parseInt(defaultPropertyName); } initCache(); if (lruCacheMemorySize > 0) { LOGGER.info("Configured LRU cache size is " + lruCacheMemorySize + " MB"); // convert in bytes lruCacheMemorySize = lruCacheMemorySize * BYTE_CONVERSION_CONSTANT; } else { LOGGER.info("LRU cache size not configured. Therefore default behavior will be " + "considered and no LRU based eviction of columns will be done"); } }
Cacheable cacheInfo = entry.getValue(); long memorySize = cacheInfo.getMemorySize(); if (canBeRemoved(cacheInfo)) { removedSize = removedSize + memorySize; toBeDeletedKeys.add(key);
@Override public void put(TableBlockIndexUniqueIdentifierWrapper tableBlockIndexUniqueIdentifierWrapper, BlockletDataMapIndexWrapper wrapper) throws IOException, MemoryException { // As dataMap will use unsafe memory, it is not recommended to overwrite an existing entry // as in that case clearing unsafe memory need to be taken card. If at all datamap entry // in the cache need to be overwritten then use the invalidate interface // and then use the put interface if (null == getIfPresent(tableBlockIndexUniqueIdentifierWrapper)) { List<BlockDataMap> dataMaps = wrapper.getDataMaps(); try { for (BlockDataMap blockletDataMap : dataMaps) { blockletDataMap.convertToUnsafeDMStore(); } // Locking is not required here because in LRU cache map add method is synchronized to add // only one entry at a time and if a key already exists it will not overwrite the entry lruCache.put(tableBlockIndexUniqueIdentifierWrapper.getTableBlockIndexUniqueIdentifier() .getUniqueTableSegmentIdentifier(), wrapper, wrapper.getMemorySize()); } catch (Throwable e) { // clear all the memory acquired by data map in case of any failure for (DataMap blockletDataMap : dataMaps) { blockletDataMap.clear(); } throw new IOException("Problem in adding datamap to cache.", e); } } }
/** * returns the SegmentTaskIndexWrapper * * @param tableSegmentUniqueIdentifierWrapper * @return */ @Override public BlockletDataMapIndexWrapper getIfPresent( TableBlockIndexUniqueIdentifierWrapper tableSegmentUniqueIdentifierWrapper) { return (BlockletDataMapIndexWrapper) lruCache.get( tableSegmentUniqueIdentifierWrapper.getTableBlockIndexUniqueIdentifier() .getUniqueTableSegmentIdentifier()); }
/** * This method will remove the cache for a given key * * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName, * tableName and columnIdentifier */ @Override public void invalidate( DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) { carbonLRUCache.remove( getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(), CacheType.FORWARD_DICTIONARY)); }
/** * This method will check and create columnReverseDictionaryInfo object for the given column * * @param dictionaryColumnUniqueIdentifier * @param columnIdentifier * @return */ private ColumnReverseDictionaryInfo getColumnReverseDictionaryInfo( DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier, String columnIdentifier) { ColumnReverseDictionaryInfo columnReverseDictionaryInfo = (ColumnReverseDictionaryInfo) carbonLRUCache .get(getLruCacheKey(columnIdentifier, CacheType.REVERSE_DICTIONARY)); if (null == columnReverseDictionaryInfo) { synchronized (dictionaryColumnUniqueIdentifier) { columnReverseDictionaryInfo = (ColumnReverseDictionaryInfo) carbonLRUCache .get(getLruCacheKey(columnIdentifier, CacheType.REVERSE_DICTIONARY)); if (null == columnReverseDictionaryInfo) { columnReverseDictionaryInfo = new ColumnReverseDictionaryInfo(); } } } return columnReverseDictionaryInfo; }
/** * This method will remove the cache for a given key * * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName, * tableName and columnIdentifier */ @Override public void invalidate( DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) { carbonLRUCache.remove( getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(), CacheType.REVERSE_DICTIONARY)); }
/** * This method will check and create columnDictionaryInfo object for the given column * * @param dictionaryColumnUniqueIdentifier * @param columnIdentifier * @return */ private ColumnDictionaryInfo getColumnDictionaryInfo( DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier, String columnIdentifier) { ColumnDictionaryInfo columnDictionaryInfo = (ColumnDictionaryInfo) carbonLRUCache .get(getLruCacheKey(columnIdentifier, CacheType.FORWARD_DICTIONARY)); if (null == columnDictionaryInfo) { synchronized (dictionaryColumnUniqueIdentifier) { columnDictionaryInfo = (ColumnDictionaryInfo) carbonLRUCache .get(getLruCacheKey(columnIdentifier, CacheType.FORWARD_DICTIONARY)); if (null == columnDictionaryInfo) { columnDictionaryInfo = new ColumnDictionaryInfo(dictionaryColumnUniqueIdentifier.getDataType()); } } } return columnDictionaryInfo; }
/** * method invalidate the segment cache for segment * * @param tableSegmentUniqueIdentifierWrapper */ @Override public void invalidate( TableBlockIndexUniqueIdentifierWrapper tableSegmentUniqueIdentifierWrapper) { BlockletDataMapIndexWrapper blockletDataMapIndexWrapper = getIfPresent(tableSegmentUniqueIdentifierWrapper); if (null != blockletDataMapIndexWrapper) { // clear the segmentProperties cache List<BlockDataMap> dataMaps = blockletDataMapIndexWrapper.getDataMaps(); if (null != dataMaps && !dataMaps.isEmpty()) { String segmentId = tableSegmentUniqueIdentifierWrapper.getTableBlockIndexUniqueIdentifier().getSegmentId(); // as segmentId will be same for all the dataMaps and segmentProperties cache is // maintained at segment level so it need to be called only once for clearing SegmentPropertiesAndSchemaHolder.getInstance() .invalidate(segmentId, dataMaps.get(0).getSegmentPropertiesIndex(), tableSegmentUniqueIdentifierWrapper.isAddTableBlockToUnsafeAndLRUCache()); } } lruCache.remove(tableSegmentUniqueIdentifierWrapper.getTableBlockIndexUniqueIdentifier() .getUniqueTableSegmentIdentifier()); }
/** * The method clears the access count of table segments * * @param tableSegmentUniqueIdentifiersWrapper */ @Override public void clearAccessCount( List<TableBlockIndexUniqueIdentifierWrapper> tableSegmentUniqueIdentifiersWrapper) { for (TableBlockIndexUniqueIdentifierWrapper identifierWrapper : tableSegmentUniqueIdentifiersWrapper) { BlockDataMap cacheable = (BlockDataMap) lruCache.get( identifierWrapper.getTableBlockIndexUniqueIdentifier().getUniqueTableSegmentIdentifier()); cacheable.clear(); } } }