@Override public RegionAttributes getAttributes() { return this.proxy.getAttributes(); }
@Override public RegionAttributes getAttributes() { return this.partitionedRegion.getAttributes(); }
private void setDiskAttributes(BucketAttributesFactory factory) { factory.setDiskSynchronous(this.partitionedRegion.getAttributes().isDiskSynchronous()); factory.setDiskStoreName(this.partitionedRegion.getAttributes().getDiskStoreName()); }
/** * validates the persistence for datastores should match between members */ void validatePersistentMatchBetweenDataStores(PartitionRegionConfig prconf) { final boolean isPersistent = pr.getAttributes().getDataPolicy() == DataPolicy.PERSISTENT_PARTITION; if (pr.getLocalMaxMemory() == 0 || prconf == null) { return; } Set<Node> nodes = prconf.getNodes(); Iterator itor = nodes.iterator(); while (itor.hasNext()) { Node n = (Node) itor.next(); if (n.getPRType() != Node.ACCESSOR_DATASTORE) { continue; } else { if (n.isPersistent() != (pr.getAttributes() .getDataPolicy() == DataPolicy.PERSISTENT_PARTITION)) { throw new IllegalStateException( "DataPolicy for Datastore members should all be persistent or not."); } } } }
continue; } else { if (n.isCacheLoaderAttached() && pr.getAttributes().getCacheLoader() == null) { throw new IllegalStateException( String.format( new Object[] {this.pr.getName()})); if (!n.isCacheLoaderAttached() && pr.getAttributes().getCacheLoader() != null) { throw new IllegalStateException( String.format( new Object[] {this.pr.getName()})); if (n.isCacheWriterAttached() && pr.getAttributes().getCacheWriter() == null) { throw new IllegalStateException( String.format( new Object[] {this.pr.getName()})); if (!n.isCacheWriterAttached() && pr.getAttributes().getCacheWriter() != null) { throw new IllegalStateException( String.format(
/** * Creates PartitionedRegionDataStore for dataStorage of PR and starts a PartitionService to * handle remote operations on this DataStore from other participating nodes. * * @param pr PartitionedRegion associated with this DataStore. */ PartitionedRegionDataStore(final PartitionedRegion pr) { final int bucketCount = pr.getTotalNumberOfBuckets(); this.localBucket2RegionMap = new ConcurrentHashMap<Integer, BucketRegion>(bucketCount); this.partitionedRegion = pr; this.bucketCreationLock = new StoppableReentrantReadWriteLock(pr.getCancelCriterion()); if (pr.getAttributes().getCacheLoader() != null) { this.loader = pr.getAttributes().getCacheLoader(); if (logger.isDebugEnabled()) { logger.debug("Installing cache loader from partitioned region attributes: {}", loader); } } // this.maximumLocalBytes = (long) (pr.getLocalMaxMemory() * // PartitionedRegionHelper.BYTES_PER_MB // * this.partitionedRegion.rebalanceThreshold); this.maximumLocalBytes = (pr.getLocalMaxMemory() * PartitionedRegionHelper.BYTES_PER_MB); // this.bucketStats = new CachePerfStats(pr.getSystem(), "partition-" + pr.getName()); this.bucketStats = new RegionPerfStats(pr.getCache(), pr.getCachePerfStats(), "partition-" + pr.getName()); this.keysOfInterest = new ConcurrentHashMap(); }
private void createIndexOnExistingRegion(PartitionedRegion region, String indexName, String regionPath, String[] fields, Analyzer analyzer, Map<String, Analyzer> fieldAnalyzers, LuceneSerializer serializer) { validateRegionAttributes(region.getAttributes()); LuceneIndexCreationProfile luceneIndexCreationProfile = new LuceneIndexCreationProfile( indexName, regionPath, fields, analyzer, fieldAnalyzers, serializer); Runnable validateIndexProfile = getIndexValidationRunnable(region, indexName, luceneIndexCreationProfile); region.executeSynchronizedOperationOnCacheProfiles(validateIndexProfile); String aeqId = LuceneServiceImpl.getUniqueIndexName(indexName, regionPath); region.updatePRConfigWithNewGatewaySender(aeqId); LuceneIndexImpl luceneIndex = beforeDataRegionCreated(indexName, regionPath, region.getAttributes(), analyzer, fieldAnalyzers, aeqId, serializer, fields); try { afterDataRegionCreated(luceneIndex); } catch (LuceneIndexDestroyedException e) { logger.warn(String.format("Lucene index %s on region %s was destroyed while being created", indexName, regionPath)); return; } createLuceneIndexOnDataRegion(region, luceneIndex); }
@Override protected boolean includePartitionedRegion(PartitionedRegion region) { return region.getEvictionAttributes().getAlgorithm().isLRUHeap() && region.getDataStore() != null && region.getAttributes().getOffHeap(); }
protected boolean includePartitionedRegion(PartitionedRegion region) { return region.getEvictionAttributes().getAlgorithm().isLRUHeap() && region.getDataStore() != null && !region.getAttributes().getOffHeap(); }
final PartitionAttributes userPA = pr.getAttributes().getPartitionAttributes(); validatePartitionResolver(prconf, userPA); validateColocatedWith(prconf, userPA); validateExpirationAttributes(pr.getAttributes(), prconf);
if (pr.getAttributes().getDataPolicy().withPersistence() || !pr.getAttributes().getEvictionAttributes().getAlgorithm().isNone()) { return ClusterDistributionManager.PARTITIONED_REGION_EXECUTOR;
private Region initializeScenario(final boolean withPersistence, final String regionPath, final Cache cache, int localMaxMemory) { PartitionedRegion region = mock(PartitionedRegion.class); PartitionAttributes partitionAttributes = new PartitionAttributesFactory() .setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(103).create(); RegionAttributes regionAttributes = spy(createRegionAttributes(withPersistence, partitionAttributes)); ExtensionPoint extensionPoint = mock(ExtensionPoint.class); when(cache.getRegion(regionPath)).thenReturn(region); when(cache.getRegionAttributes(any())).thenReturn(regionAttributes); when(region.getAttributes()).thenReturn(regionAttributes); when(regionAttributes.getPartitionAttributes()).thenReturn(partitionAttributes); when(region.getPartitionAttributes()).thenReturn(partitionAttributes); when(region.getExtensionPoint()).thenReturn(extensionPoint); return region; }
@Test public void userRegionShouldNotBeSetBeforeIndexInitialized() throws Exception { TestLuceneServiceImpl testService = new TestLuceneServiceImpl(); Field f = LuceneServiceImpl.class.getDeclaredField("cache"); f.setAccessible(true); f.set(testService, cache); AsyncEventQueueFactoryImpl aeqFactory = mock(AsyncEventQueueFactoryImpl.class); when(cache.createAsyncEventQueueFactory()).thenReturn(aeqFactory); DistributedSystem ds = mock(DistributedSystem.class); Statistics luceneIndexStats = mock(Statistics.class); when(cache.getDistributedSystem()).thenReturn(ds); when(((StatisticsFactory) ds).createAtomicStatistics(any(), anyString())) .thenReturn(luceneIndexStats); when(cache.getRegion(anyString())).thenReturn(region); when(cache.getDistributionManager()).thenReturn(mock(DistributionManager.class)); when(cache.getDistributionManager().getWaitingThreadPool()) .thenReturn(Executors.newSingleThreadExecutor()); RegionAttributes ratts = mock(RegionAttributes.class); when(region.getAttributes()).thenReturn(ratts); when(ratts.getDataPolicy()).thenReturn(DataPolicy.PARTITION); EvictionAttributes evictionAttrs = mock(EvictionAttributes.class); when(ratts.getEvictionAttributes()).thenReturn(evictionAttrs); when(evictionAttrs.getAlgorithm()).thenReturn(EvictionAlgorithm.NONE); Map<String, Analyzer> fieldMap = new HashMap<String, Analyzer>(); fieldMap.put("field1", null); fieldMap.put("field2", null); testService.createIndex("index", "region", fieldMap, null, true); }
candidate = candidateMembers.iterator().next(); } else { String prName = this.prRegion.getAttributes().getPartitionAttributes().getColocatedWith(); if (prName != null) { candidate = getColocatedDataStore(candidateMembers, alreadyUsed, bucketId, prName);
if (region.getAttributes().getDataPolicy().withPersistence()) { diskStore = region.getDiskStore(); } else if (ColocationHelper.getLeaderRegion(region).getAttributes().getDataPolicy() .withPersistence()) { diskStore = ColocationHelper.getLeaderRegion(region).getDiskStore();
void validateColocation() { final PartitionAttributesImpl userPA = (PartitionAttributesImpl) pr.getAttributes().getPartitionAttributes(); if (pr.getAttributes().getDataPolicy().withPersistence()) { if (!colocatedPR.getDataPolicy().withPersistence()) { throw new IllegalStateException(
public synchronized void initializeRegionAdvisor() { if (this.buckets != null) { return; } PartitionedRegion p = getPartitionedRegion(); int numBuckets = p.getAttributes().getPartitionAttributes().getTotalNumBuckets(); ProxyBucketRegion[] bucs = new ProxyBucketRegion[numBuckets]; InternalRegionArguments args = new InternalRegionArguments(); args.setPartitionedRegionAdvisor(this); for (int i = 0; i < bucs.length; i++) { bucs[i] = new ProxyBucketRegion(i, p, args); bucs[i].initialize(); } this.buckets = bucs; }
final EvictionAttributes ea = pr.getAttributes().getEvictionAttributes();
partitionedRegion.getGemFireCache().getPartitionedRegionLockService(); DiskStoreImpl ds = partitionedRegion.getDiskStore(); EvictionAttributes ea = partitionedRegion.getAttributes().getEvictionAttributes(); EnumSet<DiskRegionFlag> diskFlags = EnumSet.noneOf(DiskRegionFlag.class); overflowEnabled, partitionedRegion.isDiskSynchronous(), partitionedRegion.getDiskRegionStats(), partitionedRegion.getCancelCriterion(), partitionedRegion, partitionedRegion.getAttributes(), diskFlags, partitionName, startingBucketID, partitionedRegion.getCompressor(), partitionedRegion.getOffHeap());
initializeDataStore(this.getAttributes());