@Override public long nextTimestamp() { return regionFactory.nextTimestamp(); }
public EnabledCaching(SessionFactoryImplementor sessionFactory) { this.sessionFactory = sessionFactory; this.regionFactory = getSessionFactory().getSessionFactoryOptions().getServiceRegistry().getService( RegionFactory.class ); this.regionFactory.start( sessionFactory.getSessionFactoryOptions(), sessionFactory.getProperties() ); if ( getSessionFactory().getSessionFactoryOptions().isQueryCacheEnabled() ) { final TimestampsRegion timestampsRegion = regionFactory.buildTimestampsRegion( RegionFactory.DEFAULT_UPDATE_TIMESTAMPS_REGION_UNQUALIFIED_NAME, sessionFactory ); timestampsCache = sessionFactory.getSessionFactoryOptions() .getTimestampsCacheFactory() .buildTimestampsCache( this, timestampsRegion ); legacySecondLevelCacheNames.add( timestampsRegion.getName() ); final QueryResultsRegion queryResultsRegion = regionFactory.buildQueryResultsRegion( RegionFactory.DEFAULT_QUERY_RESULTS_REGION_UNQUALIFIED_NAME, sessionFactory ); regionsByName.put( queryResultsRegion.getName(), queryResultsRegion ); defaultQueryResultsCache = new QueryResultsCacheImpl( queryResultsRegion, timestampsCache ); } else { timestampsCache = new TimestampsCacheDisabledImpl(); defaultQueryResultsCache = null; } }
@SuppressWarnings("WeakerAccess") protected void handleLockExpiry(SharedSessionContractImplementor session, Object key, Lockable lock) { SecondLevelCacheLogger.INSTANCE.softLockedCacheExpired( getRegion().getName(), key ); log.info( "Cached entry expired : " + key ); // create new lock that times out immediately long ts = getRegion().getRegionFactory().nextTimestamp() + getRegion().getRegionFactory().getTimeout(); SoftLockImpl newLock = new SoftLockImpl( ts, uuid, nextLockId.getAndIncrement(), null ); //newLock.unlock( ts ); newLock.unlock( ts - getRegion().getRegionFactory().getTimeout() ); getStorageAccess().putIntoCache( key, newLock, session ); }
settings.getRegionFactory().start( settings, properties ); this.queryPlanCache = new QueryPlanCache( this ); LOG.tracev( "Building cache for entity data [{0}]", model.getEntityName() ); EntityRegion entityRegion = settings.getRegionFactory().buildEntityRegion( cacheRegionName, properties, CacheDataDescriptionImpl.decode( model ) ); accessStrategy = entityRegion.buildAccessStrategy( accessType ); entityAccessStrategies.put( cacheRegionName, accessStrategy ); final NaturalIdRegion naturalIdRegion = settings.getRegionFactory().buildNaturalIdRegion( naturalIdCacheRegionName, properties, CacheDataDescriptionImpl.decode( model ) ); naturalIdAccessStrategy = naturalIdRegion.buildAccessStrategy( settings.getRegionFactory().getDefaultAccessType() ); entityAccessStrategies.put( naturalIdCacheRegionName, naturalIdAccessStrategy ); allCacheRegions.put( naturalIdCacheRegionName, naturalIdRegion ); LOG.tracev("Building cache for collection data [{0}]", model.getRole() ); CollectionRegion collectionRegion = settings.getRegionFactory().buildCollectionRegion( cacheRegionName, properties, CacheDataDescriptionImpl .decode( model ) ); accessStrategy = collectionRegion.buildAccessStrategy( accessType );
settings.getRegionFactory().start( settings, properties ); this.queryPlanCache = new QueryPlanCache( this ); LOG.tracev( "Building cache for entity data [{0}]", model.getEntity().getName() ); EntityRegion entityRegion = settings.getRegionFactory().buildEntityRegion( cacheRegionName, properties, CacheDataDescriptionImpl.decode( model ) ); LOG.tracev( "Building cache for collection data [{0}]", model.getAttribute().getRole() ); CollectionRegion collectionRegion = settings.getRegionFactory().buildCollectionRegion( cacheRegionName, properties, CacheDataDescriptionImpl.decode( model ) );
protected QueryResultsRegion makeQueryResultsRegion(String regionName) { // make sure there is not an existing domain-data region with that name.. final Region existing = regionsByName.get( regionName ); if ( existing != null ) { if ( !QueryResultsRegion.class.isInstance( existing ) ) { throw new IllegalStateException( "Cannot store both domain-data and query-result-data in the same region [" + regionName ); } throw new IllegalStateException( "Illegal call to create QueryResultsRegion - one already existed" ); } return regionFactory.buildQueryResultsRegion( regionName, getSessionFactory() ); }
: serviceRegistry.getService( RegionFactory.class ).getDefaultAccessType() );
USE_MINIMAL_PUTS, BOOLEAN, regionFactory.isMinimalPutsEnabledByDefault() ); this.structuredCacheEntriesEnabled = cfgService.getSetting( USE_STRUCTURED_CACHE, BOOLEAN, false );
public UpdateTimestampsCache(Settings settings, Properties props, final SessionFactoryImplementor factory) throws HibernateException { this.factory = factory; String prefix = settings.getCacheRegionPrefix(); String regionName = prefix == null ? REGION_NAME : prefix + '.' + REGION_NAME; LOG.startingUpdateTimestampsCache( regionName ); this.region = settings.getRegionFactory().buildTimestampsRegion( regionName, props ); } @SuppressWarnings({"UnusedDeclaration"})
@Override public void prime(Set<DomainDataRegionConfig> cacheRegionConfigs) { for ( DomainDataRegionConfig regionConfig : cacheRegionConfigs ) { final DomainDataRegion region = getRegionFactory().buildDomainDataRegion( regionConfig, this ); regionsByName.put( region.getName(), region );
@Override public SoftLock lockItem(SharedSessionContractImplementor session, Object key, Object version) { try { writeLock.lock(); long timeout = getRegion().getRegionFactory().nextTimestamp() + getRegion().getRegionFactory().getTimeout(); log.debugf( "Locking cache item [region=`%s` (%s)] : `%s` (timeout=%s, version=%s)", getRegion().getName(), getAccessType(), key, timeout, version ); Lockable item = (Lockable) getStorageAccess().getFromCache( key, session ); final SoftLockImpl lock = ( item == null ) ? new SoftLockImpl( timeout, uuid, nextLockId(), version ) : item.lock( timeout, uuid, nextLockId() ); getStorageAccess().putIntoCache( key, lock, session ); return lock; } finally { writeLock.unlock(); } }
settings.getRegionFactory().start( settings, properties ); this.queryPlanCache = new QueryPlanCache( this ); LOG.tracev( "Building cache for entity data [{0}]", model.getEntityName() ); EntityRegion entityRegion = settings.getRegionFactory().buildEntityRegion( cacheRegionName, properties, CacheDataDescriptionImpl.decode( model ) ); accessStrategy = entityRegion.buildAccessStrategy( accessType ); entityAccessStrategies.put( cacheRegionName, accessStrategy ); final NaturalIdRegion naturalIdRegion = settings.getRegionFactory().buildNaturalIdRegion( naturalIdCacheRegionName, properties, CacheDataDescriptionImpl.decode( model ) ); naturalIdAccessStrategy = naturalIdRegion.buildAccessStrategy( settings.getRegionFactory().getDefaultAccessType() ); entityAccessStrategies.put( naturalIdCacheRegionName, naturalIdAccessStrategy ); allCacheRegions.put( naturalIdCacheRegionName, naturalIdRegion ); LOG.tracev("Building cache for collection data [{0}]", model.getRole() ); CollectionRegion collectionRegion = settings.getRegionFactory().buildCollectionRegion( cacheRegionName, properties, CacheDataDescriptionImpl .decode( model ) ); accessStrategy = collectionRegion.buildAccessStrategy( accessType );
settings.getRegionFactory().start( settings, properties ); this.queryPlanCache = new QueryPlanCache( this ); LOG.tracev( "Building cache for entity data [{0}]", model.getEntity().getName() ); EntityRegion entityRegion = settings.getRegionFactory().buildEntityRegion( cacheRegionName, properties, CacheDataDescriptionImpl.decode( model ) ); LOG.tracev( "Building cache for collection data [{0}]", model.getAttribute().getRole() ); CollectionRegion collectionRegion = settings.getRegionFactory().buildCollectionRegion( cacheRegionName, properties, CacheDataDescriptionImpl.decode( model ) );
protected QueryResultsRegion makeQueryResultsRegion(String regionName) { // make sure there is not an existing domain-data region with that name.. final Region existing = regionsByName.get( regionName ); if ( existing != null ) { if ( !QueryResultsRegion.class.isInstance( existing ) ) { throw new IllegalStateException( "Cannot store both domain-data and query-result-data in the same region [" + regionName ); } throw new IllegalStateException( "Illegal call to create QueryResultsRegion - one already existed" ); } return regionFactory.buildQueryResultsRegion( regionName, getSessionFactory() ); }
private static CacheConcurrencyStrategy determineCacheConcurrencyStrategy(Mappings mappings) { if ( DEFAULT_CACHE_CONCURRENCY_STRATEGY == null ) { final RegionFactory cacheRegionFactory = SettingsFactory.createRegionFactory( mappings.getConfigurationProperties(), true ); DEFAULT_CACHE_CONCURRENCY_STRATEGY = CacheConcurrencyStrategy.fromAccessType( cacheRegionFactory.getDefaultAccessType() ); } return DEFAULT_CACHE_CONCURRENCY_STRATEGY; }
USE_MINIMAL_PUTS, BOOLEAN, regionFactory.isMinimalPutsEnabledByDefault() ); this.structuredCacheEntriesEnabled = cfgService.getSetting( USE_STRUCTURED_CACHE, BOOLEAN, false );
public UpdateTimestampsCache(Settings settings, Properties props, final SessionFactoryImplementor factory) throws HibernateException { this.factory = factory; String prefix = settings.getCacheRegionPrefix(); String regionName = prefix == null ? REGION_NAME : prefix + '.' + REGION_NAME; LOG.startingUpdateTimestampsCache( regionName ); this.region = settings.getRegionFactory().buildTimestampsRegion( regionName, props ); } @SuppressWarnings({"UnusedDeclaration"})
@Override public void prime(Set<DomainDataRegionConfig> cacheRegionConfigs) { for ( DomainDataRegionConfig regionConfig : cacheRegionConfigs ) { final DomainDataRegion region = getRegionFactory().buildDomainDataRegion( regionConfig, this ); regionsByName.put( region.getName(), region );
@Override public long nextTimestamp() { return regionFactory.nextTimestamp(); }
@Override public void preInvalidate( String[] spaces, SharedSessionContractImplementor session) { final SessionFactoryImplementor factory = session.getFactory(); final RegionFactory regionFactory = factory.getCache().getRegionFactory(); final boolean stats = factory.getStatistics().isStatisticsEnabled(); final Long ts = regionFactory.nextTimestamp() + regionFactory.getTimeout(); for ( Serializable space : spaces ) { if ( DEBUG_ENABLED ) { log.debugf( "Pre-invalidating space [%s], timestamp: %s", space, ts ); } try { session.getEventListenerManager().cachePutStart(); //put() has nowait semantics, is this really appropriate? //note that it needs to be async replication, never local or sync timestampsRegion.putIntoCache( space, ts, session ); } finally { session.getEventListenerManager().cachePutEnd(); } if ( stats ) { factory.getStatistics().updateTimestampsCachePut(); } } }