void checkEntryTimeoutAction(String mode, ExpirationAction ea) { if ((this.getDataPolicy().withReplication() || this.getDataPolicy().withPartitioning()) && (ea == ExpirationAction.LOCAL_DESTROY || ea == ExpirationAction.LOCAL_INVALIDATE)) { throw new IllegalArgumentException( String.format("%s action is incompatible with this region's data policy.", mode)); } }
@Override public <K, V> Set<Integer> getLocalBucketSet(Region<K, V> region) { if (!region.getAttributes().getDataPolicy().withPartitioning()) { return null; } return this.localBucketSet; } }
@Override public MirrorType getMirrorType() { if (this.dataPolicy.isNormal() || this.dataPolicy.isPreloaded() || this.dataPolicy.isEmpty() || this.dataPolicy.withPartitioning()) { return MirrorType.NONE; } else if (this.dataPolicy.withReplication()) { return MirrorType.KEYS_VALUES; } else { throw new IllegalStateException( String.format("No mirror type corresponds to data policy %s", this.dataPolicy)); } }
@Override @SuppressWarnings("deprecation") public MirrorType getMirrorType() { if (this.dataPolicy.isNormal() || this.dataPolicy.isPreloaded() || this.dataPolicy.isEmpty() || this.dataPolicy.withPartitioning()) { return MirrorType.NONE; } else if (this.dataPolicy.withReplication()) { return MirrorType.KEYS_VALUES; } else { throw new IllegalStateException( String.format("No mirror type corresponds to data policy %s.", this.dataPolicy)); } }
@Override public MirrorType getMirrorType() { // checkReadiness(); if (this.dataPolicy.isNormal() || this.dataPolicy.isPreloaded() || this.dataPolicy.isEmpty() || this.dataPolicy.withPartitioning()) { return MirrorType.NONE; } else if (this.dataPolicy.withReplication()) { return MirrorType.KEYS_VALUES; } else { throw new IllegalStateException( String.format("No mirror type corresponds to data policy %s", this.dataPolicy)); } }
private boolean shouldRunInParallel(SnapshotOptions<K, V> options) { return options.isParallelMode() && region.getAttributes().getDataPolicy().withPartitioning() && !(region instanceof LocalDataSet); }
static void validateRegionAttributes(RegionAttributes attrs) { if (!attrs.getDataPolicy().withPartitioning()) { // replicated region throw new UnsupportedOperationException( "Lucene indexes on replicated regions are not supported"); } // For now we cannot support eviction with local destroy. // Eviction with overflow to disk still needs to be supported EvictionAttributes evictionAttributes = attrs.getEvictionAttributes(); EvictionAlgorithm evictionAlgorithm = evictionAttributes.getAlgorithm(); if (evictionAlgorithm != EvictionAlgorithm.NONE && evictionAttributes.getAction().isLocalDestroy()) { throw new UnsupportedOperationException( "Lucene indexes on regions with eviction and action local destroy are not supported"); } }
private void refreshFields() { this.ra = this.r.getAttributes(); if (getStatisticsEnabled() && !this.ra.getDataPolicy().withPartitioning()) { this.rs = this.r.getStatistics(); } else { this.rs = null; } { // set subregionNames Set s = this.r.subregions(false); Set names = new TreeSet(); Set paths = new TreeSet(); Iterator it = s.iterator(); while (it.hasNext()) { Region r = (Region) it.next(); String name = r.getName(); names.add(name); paths.add(this.getFullPath() + Region.SEPARATOR_CHAR + name); } this.subregionNames = names; this.subregionFullPaths = paths; } try { int[] sizes = this.r.sizes(); this.entryCount = sizes[0]; this.subregionCount = sizes[1]; } catch (CacheException ignore) { this.entryCount = 0; this.subregionCount = 0; } }
@Override public MirrorType getMirrorType() { if (this.getDataPolicy().isNormal() || this.getDataPolicy().isPreloaded() || this.getDataPolicy().isEmpty() || this.getDataPolicy().withPartitioning()) { return MirrorType.NONE; } else if (this.getDataPolicy().withReplication()) { return MirrorType.KEYS_VALUES; } else { throw new IllegalStateException( String.format("No mirror type corresponds to data policy %s", this.getDataPolicy())); } }
public void setDataPolicy(DataPolicy dataPolicy) { this.dataPolicy = dataPolicy; setHasDataPolicy(true); if (this.dataPolicy.withPartitioning() && !this.hasPartitionAttributes()) { setPartitionAttributes((new PartitionAttributesFactory()).create()); setHasPartitionAttributes(false); } }
/** * Sets whether or not a persistent backup should be made of the region. * * @since GemFire 3.2 * @deprecated as of GemFire 5.0, use {@link DataPolicy#PERSISTENT_REPLICATE} instead */ @Deprecated public void setPersistBackup(boolean persistBackup) { if (persistBackup) { if (!this.regionAttributes.getDataPolicy().withPersistence()) { if (this.regionAttributes.getDataPolicy().withPartitioning()) { setDataPolicy(DataPolicy.PERSISTENT_PARTITION); } else { setDataPolicy(DataPolicy.PERSISTENT_REPLICATE); } } } else { // It is less clear what we should do here for backwards compat. // If the current data policy is persist then we need to change it // otherwise just leave it alone if (this.regionAttributes.getDataPolicy().withReplication()) { setDataPolicy(DataPolicy.REPLICATE); } else if (this.regionAttributes.getDataPolicy().withPartitioning()) { setDataPolicy(DataPolicy.PARTITION); } } }
@Override public ExpirationAttributes setRegionIdleTimeout(ExpirationAttributes idleTimeout) { checkReadiness(); if (idleTimeout == null) { throw new IllegalArgumentException( "idleTimeout must not be null"); } if (this.getAttributes().getDataPolicy().withPartitioning()) { validatePRRegionExpirationAttributes(idleTimeout); } if (idleTimeout.getAction() == ExpirationAction.LOCAL_INVALIDATE && this.getDataPolicy().withReplication()) { throw new IllegalArgumentException( String.format("%s action is incompatible with this region's data policy.", "idleTimeout")); } if (!this.statisticsEnabled) { throw new IllegalStateException( "Cannot set idle timeout when statistics are disabled."); } ExpirationAttributes oldAttrs = getRegionIdleTimeout(); this.regionIdleTimeout = idleTimeout.getTimeout(); this.regionIdleTimeoutExpirationAction = idleTimeout.getAction(); this.setRegionIdleTimeoutAttributes(); regionIdleTimeoutChanged(oldAttrs); return oldAttrs; }
public void setPersistBackup(boolean persistBackup) { if (persistBackup) { if (!getDataPolicy().withPersistence()) { if (getDataPolicy().withPartitioning()) { setDataPolicy(DataPolicy.PERSISTENT_PARTITION); } else { setDataPolicy(DataPolicy.PERSISTENT_REPLICATE); } } } else { // It is less clear what we should do here for backwards compat. // If the current data policy is persist then we need to change it // otherwise just leave it alone if (getDataPolicy().withReplication()) { setDataPolicy(DataPolicy.REPLICATE); } else if (getDataPolicy().withPartitioning()) { setDataPolicy(DataPolicy.PARTITION); } } }
@Override public ExpirationAttributes setRegionTimeToLive(ExpirationAttributes timeToLive) { checkReadiness(); if (timeToLive == null) { throw new IllegalArgumentException( "timeToLive must not be null"); } if (this.getAttributes().getDataPolicy().withPartitioning()) { validatePRRegionExpirationAttributes(timeToLive); } if (timeToLive.getAction() == ExpirationAction.LOCAL_INVALIDATE && this.getDataPolicy().withReplication()) { throw new IllegalArgumentException( String.format("%s action is incompatible with this region's data policy.", "timeToLive")); } if (!this.statisticsEnabled) { throw new IllegalStateException( "Cannot set time to live when statistics are disabled"); } ExpirationAttributes oldAttrs = getRegionTimeToLive(); this.regionTimeToLive = timeToLive.getTimeout(); this.regionTimeToLiveExpirationAction = timeToLive.getAction(); this.setRegionTimeToLiveAtts(); regionTimeToLiveChanged(timeToLive); return oldAttrs; }
@Test public void beforeDataRegionCreatedShouldHaveSerializer() { String name = "indexName"; String regionPath = "regionName"; String[] fields = {"field1", "field2"}; String aeqId = LuceneServiceImpl.getUniqueIndexName(name, regionPath); InternalCache cache = Fakes.cache(); final Region region = Fakes.region(regionPath, cache); RegionAttributes attributes = region.getAttributes(); DataPolicy policy = attributes.getDataPolicy(); when(policy.withPartitioning()).thenReturn(true); EvictionAttributes evictionAttributes = mock(EvictionAttributes.class); when(attributes.getEvictionAttributes()).thenReturn(evictionAttributes); CopyOnWriteArraySet set = new CopyOnWriteArraySet(); set.add(aeqId); when(attributes.getAsyncEventQueueIds()).thenReturn(set); when(evictionAttributes.getAlgorithm()).thenReturn(EvictionAlgorithm.NONE); LuceneServiceImpl service = mock(LuceneServiceImpl.class); Analyzer analyzer = mock(Analyzer.class); LuceneSerializer serializer = mock(LuceneSerializer.class); InternalRegionArguments internalRegionArgs = mock(InternalRegionArguments.class); when(internalRegionArgs.addCacheServiceProfile(any())).thenReturn(internalRegionArgs); LuceneRegionListener listener = new LuceneRegionListener(service, cache, name, "/" + regionPath, fields, analyzer, null, serializer); listener.beforeCreate(null, regionPath, attributes, internalRegionArgs); verify(service).beforeDataRegionCreated(eq(name), eq("/" + regionPath), eq(attributes), eq(analyzer), any(), eq(aeqId), eq(serializer), any()); } }
dataPolicy = regionDescPerMember.getDataPolicy(); name = regionDescPerMember.getName(); isPartition = dataPolicy.withPartitioning(); isPersistent = dataPolicy.withPersistence(); isReplicate = dataPolicy.withReplication();
static <K, V> Exporter<K, V> createExporter(InternalCache cache, Region<?, ?> region, SnapshotOptions<K, V> options) { String pool = region.getAttributes().getPoolName(); if (pool != null) { return new ClientExporter<>(PoolManager.find(pool)); } else if (cache.getInternalDistributedSystem().isLoner() || region.getAttributes().getDataPolicy().equals(DataPolicy.NORMAL) || region.getAttributes().getDataPolicy().equals(DataPolicy.PRELOADED) || region instanceof LocalDataSet || (options.isParallelMode() && region.getAttributes().getDataPolicy().withPartitioning())) { // Avoid function execution: // for loner systems to avoid inlining fn execution // for NORMAL/PRELOAD since they don't support fn execution // for LocalDataSet since we're already running a fn // for parallel ops since we're already running a fn return new LocalExporter<>(); } return new WindowedExporter<>(); }
/** * @return true if the event should not be tracked, false otherwise */ private boolean ignoreEvent(InternalCacheEvent event, EventID eventID) { if (eventID == null) { return true; } else { boolean isVersioned = (event.getVersionTag() != null); boolean isClient = event.hasClientOrigin(); if (isVersioned && isClient) { return false; // version tags for client events are kept for retries by the client } boolean isEntry = event.getOperation().isEntry(); boolean isPr = event.getRegion().getAttributes().getDataPolicy().withPartitioning() || ((LocalRegion) event.getRegion()).isUsedForPartitionedRegionBucket(); return (!isClient && // ignore if it originated on a server, and isEntry && // it affects an entry and !isPr); // is not on a PR } }
if (rgn.getAttributes().getDataPolicy().withPartitioning()) {
@Override public CliFunctionResult executeFunction(FunctionContext<Object[]> context) throws Exception { JdbcConnectorService service = FunctionContextArgumentProvider.getJdbcConnectorService(context); // input Object[] arguments = context.getArguments(); RegionMapping regionMapping = (RegionMapping) arguments[0]; boolean synchronous = (boolean) arguments[1]; String regionName = regionMapping.getRegionName(); Region<?, ?> region = verifyRegionExists(context.getCache(), regionName); // action String queueName = CreateMappingCommand.createAsyncEventQueueName(regionName); if (!synchronous) { createAsyncEventQueue(context.getCache(), queueName, region.getAttributes().getDataPolicy().withPartitioning()); } alterRegion(region, queueName, synchronous); createRegionMapping(service, regionMapping); // output String member = context.getMemberName(); String message = "Created JDBC mapping for region " + regionMapping.getRegionName() + " on " + member; return new CliFunctionResult(member, true, message); }