@Override public void run() { RegionFactory fact = getCache().createRegionFactory(RegionShortcut.PARTITION); fact.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL)); fact.addCacheListener(new CacheListenerAdapter(){ @Override public void afterInvalidate(EntryEvent event) { getLogWriter().info("afterInvalidate invoked with " + event); InvalidateInvoked = true; } }); fact.create(name+"Region"); } });
public void run() { props.setProperty(DistributionConfig.NAME_NAME, "sleeper"); getSystem(props); RegionFactory rf = new RegionFactory(); Region r = rf.setScope(Scope.DISTRIBUTED_ACK) .setDataPolicy(DataPolicy.REPLICATE) .setEarlyAck(false) .addCacheListener(getSleepingListener(false)) .create("testRegion"); myCache = r.getCache(); try { createAlertListener(); } catch (Exception e) { throw new RuntimeException("failed to create alert listener", e); } } });
factory.addCacheListener(rml); final Region<String, Number> region = factory.create(regionName);
factory.addCacheListener(cl); final Region<String, Number> region = factory.create(regionName);
public void run() { props.setProperty(DistributionConfig.NAME_NAME, "sleeper"); getSystem(props); LogWriter log = system.getLogWriter(); log.info("<ExpectedException action=add>service failure</ExpectedException>"); log.info("<ExpectedException action=add>com.gemstone.gemfire.ForcedDisconnectException</ExpectedException>"); RegionFactory rf = new RegionFactory(); Region r = rf.setScope(Scope.DISTRIBUTED_ACK) .setDataPolicy(DataPolicy.REPLICATE) .addCacheListener(getSleepingListener(true)) .create("testRegion"); myCache = r.getCache(); } });
/** * Creates a region and assigns a compressor. * * @param name * a region name. * @param compressor * a compressor. */ private Region createRegion(String name, Compressor compressor, boolean offHeap) { return getCache().<String, String> createRegionFactory() .addCacheListener(CACHE_LISTENER).setCacheWriter(CACHE_WRITER) .setDataPolicy(DataPolicy.REPLICATE).setCompressor(compressor) .setEnableOffHeapMemory(offHeap) .create(name); } }
public void testAddCacheListener() throws CacheException, IOException { Cache c = createCache(); Region r1 = null; try { RegionFactory factory = c.createRegionFactory(REPLICATE_PROXY); CacheListener cl = new MyCacheListener(); r1 = factory.addCacheListener(cl).create(this.r1Name); RegionAttributes ra = r1.getAttributes(); assertEquals(cl, ra.getCacheListener()); } finally { cleanUpRegion(r1); } }
private void createPartitionedRegion(String regionName) { final Cache cache = getCache(); // Create the data region RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION); dataRegionFactory.setConcurrencyLevel(4); EvictionAttributes ea = EvictionAttributes.createLIFOEntryAttributes(100, EvictionAction.LOCAL_DESTROY); dataRegionFactory.setEvictionAttributes(ea); dataRegionFactory.setEnableAsyncConflation(true); FixedPartitionAttributes fpa = FixedPartitionAttributes.createFixedPartition("Par1", true); PartitionAttributes pa = new PartitionAttributesFactory() .setLocalMaxMemory(100) .setRecoveryDelay(2) .setTotalMaxMemory(200) .setRedundantCopies(1) .addFixedPartitionAttributes(fpa) .create(); dataRegionFactory.setPartitionAttributes(pa); dataRegionFactory.addCacheListener(new CacheListener1()); dataRegionFactory.addCacheListener(new CacheListener2()); dataRegionFactory.create(regionName); }
private void initializeRedis() { synchronized (this.cache) { RegionFactory<String, RedisDataType> rfMeta = cache.createRegionFactory(RegionShortcut.REPLICATE); rfMeta.addCacheListener(this.metaListener); RegionFactory<ByteArrayWrapper, ByteArrayWrapper> rfString = cache.createRegionFactory(DEFAULT_REGION_TYPE); RegionFactory<ByteArrayWrapper, HyperLogLogPlus> rfHLL = cache.createRegionFactory(DEFAULT_REGION_TYPE); Region<ByteArrayWrapper, ByteArrayWrapper> stringsRegion; if ((stringsRegion = this.cache.getRegion(STRING_REGION)) == null) stringsRegion = rfString.create(GemFireRedisServer.STRING_REGION); Region<ByteArrayWrapper, HyperLogLogPlus> hLLRegion; if ((hLLRegion = this.cache.getRegion(HLL_REGION)) == null) hLLRegion = rfHLL.create(HLL_REGION); Region<String, RedisDataType> redisMetaData; if ((redisMetaData = this.cache.getRegion(REDIS_META_DATA_REGION)) == null) redisMetaData = rfMeta.create(REDIS_META_DATA_REGION); this.regionCache = new RegionProvider(stringsRegion, hLLRegion, redisMetaData, expirationFutures, expirationExecutor, this.DEFAULT_REGION_TYPE); redisMetaData.put(REDIS_META_DATA_REGION, RedisDataType.REDIS_PROTECTED); redisMetaData.put(HLL_REGION, RedisDataType.REDIS_PROTECTED); redisMetaData.put(STRING_REGION, RedisDataType.REDIS_PROTECTED); } checkForRegions(); }
/** Exec a mapReduce job to find all updates for a particular entry and write * those entries back out to a GemFire region (HDFS_RESULTS_REGION) as * ("originalKey_EventNumber_Operation", value). Note that destroyed keys have a value * of "DESTORYED". Creates the HDFSResultRegion if needed. */ public static synchronized void getAllHDFSEventsForKey(String keys) { Cache cache = CacheHelper.getCache(); if (cache.getRegion(HDFS_RESULT_REGION) == null) { // make this a replicated region (so it doesn't affect rebalance, waitForRecovery, etc) Region hdfsResultRegion = cache.createRegionFactory(RegionShortcut.REPLICATE). addCacheListener(new SummaryLogListener()). create(HDFS_RESULT_REGION); Log.getLogWriter().info("Created hdfsResultRegion " + hdfsResultRegion.getFullPath() + " for dumping HDFS updates for " + keys); CacheServer cs = cache.addCacheServer(); int port = PortHelper.getRandomPort(); cs.setPort(port); try { cs.start(); Log.getLogWriter().info("Started server listenening on port " + port); } catch (IOException e) { throw new TestException("getAllHDSEventsForKey caught " + e + " while starting a CacheServer " + TestHelper.getStackTrace(e)); } } execMapReduceJob("hdfs.mapreduce.GetAllHDFSEventsForKey", keys); }
for (String cacheListener : cacheListeners) { Class<CacheListener<K, V>> cacheListenerKlass = forName(cacheListener, CliStrings.CREATE_REGION__CACHELISTENER); factory.addCacheListener(newInstance(cacheListenerKlass, CliStrings.CREATE_REGION__CACHELISTENER));
for (String cacheListener : cacheListeners) { Class<CacheListener<K, V>> cacheListenerKlass = forName(cacheListener, CliStrings.CREATE_REGION__CACHELISTENER); factory.addCacheListener(newInstance(cacheListenerKlass, CliStrings.CREATE_REGION__CACHELISTENER));