public void run() { Cache cache = getCache(); RegionFactory rf = new RegionFactory(); rf.setScope(Scope.DISTRIBUTED_ACK); rf.setConcurrencyChecksEnabled(false); rf.setDataPolicy(DataPolicy.REPLICATE); rf.create("region"); } });
/** * Creates a region and assigns a compressor. * * @param name * a region name. * @param compressor * a compressor. */ private Region createRegion(String name, Compressor compressor, boolean offHeap) { return getCache().<String, String> createRegionFactory() .addCacheListener(CACHE_LISTENER).setCacheWriter(CACHE_WRITER) .setDataPolicy(DataPolicy.REPLICATE).setCompressor(compressor) .setEnableOffHeapMemory(offHeap) .create(name); } }
/** * Creates a region and assigns a compressor. * @param name the region name. * @param dataPolicy the type of peer. * @param compressor a compressor. * @return the newly created region. */ private Region<String,String> createRegion(String name,DataPolicy dataPolicy,Compressor compressor) { return getCache().<String,String>createRegionFactory().setDataPolicy(dataPolicy).setCloningEnabled(true).setCompressor(compressor).create(name); }
protected Region createRegion() { RegionFactory rf = new RegionFactory(); rf.setDiskStoreName("diskStore"); rf.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE); Region region = rf.create("region"); return region; }
private Region<?,?> createParReg(String regionName, Cache cache, Class keyConstraint, Class valueConstraint ) { RegionFactory regionFactory = cache.createRegionFactory(); regionFactory.setDataPolicy(DataPolicy.PARTITION); regionFactory.setKeyConstraint(keyConstraint); regionFactory.setValueConstraint(valueConstraint); return regionFactory.create(regionName); }
factory.setPartitionAttributes(partitionAttributes); factory.setDataPolicy(originalDataPolicy); if (keyConstraint != null && !keyConstraint.isEmpty()) { Class<K> keyConstraintClass = forName(keyConstraint, CliStrings.CREATE_REGION__KEYCONSTRAINT); factory.setKeyConstraint(keyConstraintClass); factory.setValueConstraint(valueConstraintClass); factory.setEntryIdleTimeout(entryExpirationIdleTime.convertToExpirationAttributes()); factory.setEntryTimeToLive(entryExpirationTTL.convertToExpirationAttributes()); factory.setEntryIdleTimeout(regionExpirationIdleTime.convertToExpirationAttributes()); factory.setEntryTimeToLive(regionExpirationTTL.convertToExpirationAttributes()); factory.setDiskStoreName(diskStore); factory.setDiskSynchronous(regionCreateArgs.isDiskSynchronous()); factory.setOffHeap(regionCreateArgs.isOffHeap()); factory.setStatisticsEnabled(regionCreateArgs.isStatisticsEnabled()); factory.setEnableAsyncConflation(regionCreateArgs.isEnableAsyncConflation()); factory.setEnableSubscriptionConflation(regionCreateArgs.isEnableSubscriptionConflation());
public void run() { Cache cache = getCache(); RegionFactory regionFactory = cache.createRegionFactory(); regionFactory.setDiskStoreName(regionName); regionFactory.setDiskSynchronous(true); regionFactory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE); regionFactory.setScope(Scope.DISTRIBUTED_ACK); regionFactory.create(regionName); } });
private void createPartitionedRegion(String regionName) { final Cache cache = getCache(); // Create the data region RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION); dataRegionFactory.setConcurrencyLevel(4); EvictionAttributes ea = EvictionAttributes.createLIFOEntryAttributes(100, EvictionAction.LOCAL_DESTROY); dataRegionFactory.setEvictionAttributes(ea); dataRegionFactory.setEnableAsyncConflation(true); FixedPartitionAttributes fpa = FixedPartitionAttributes.createFixedPartition("Par1", true); PartitionAttributes pa = new PartitionAttributesFactory() .setLocalMaxMemory(100) .setRecoveryDelay(2) .setTotalMaxMemory(200) .setRedundantCopies(1) .addFixedPartitionAttributes(fpa) .create(); dataRegionFactory.setPartitionAttributes(pa); dataRegionFactory.addCacheListener(new CacheListener1()); dataRegionFactory.addCacheListener(new CacheListener2()); dataRegionFactory.create(regionName); }
public void run() { Cache cache = getCache(); RegionFactory rf = new RegionFactory(); rf.setEnableOffHeapMemory(isOffHeapEnabled()); rf.setDataPolicy(DataPolicy.PARTITION); PartitionAttributesFactory paf = new PartitionAttributesFactory(); paf.setRedundantCopies(1); paf.setTotalNumBuckets(5); rf.setPartitionAttributes(paf.create()); rf.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1)); rf.setConcurrencyChecksEnabled(false); rf.create(name); } });
public void testRegionFactoryRegionAttributes() throws CacheException { Region r1 = null, r2 = null, r3 = null; try { Properties p = new Properties(); p.put("mcast-port", "0"); r1 = new RegionFactory(p).setScope(Scope.LOCAL) .setConcurrencyLevel(1).setLoadFactor(0.8F).setKeyConstraint( String.class).setStatisticsEnabled(true).create(r1Name); assertBasicRegionFunctionality(r1, r1Name); final RegionFactory factory = new RegionFactory(p, r1.getAttributes()); r2 = factory.create(r2Name); assertBasicRegionFunctionality(r2, r2Name); assertEquals(r1.getAttributes(), r2.getAttributes()); r3 = factory.create(r3Name); try { assertEquals(r2.getAttributes(), r3.getAttributes()); fail("Expected r2 attributes to be different from r3"); } catch (AssertionFailedError expected) { } } finally { cleanUpRegion(r1); cleanUpRegion(r2); cleanUpRegion(r3); } }
public Object call() throws Exception { RegionFactory<Integer,TestDataCloseDuringRegionOp> rf = getCache().createRegionFactory(RegionShortcut.REPLICATE_PROXY); rf.setConcurrencyChecksEnabled(false); rf.create( replName); return null; } };
public Region createRegion() { DiskStoreImpl ds = createDiskStore(); RegionFactory rf = new RegionFactory(); rf.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE); rf.setDiskDirs(diskDirs); DiskWriteAttributesFactory daf = new DiskWriteAttributesFactory(); daf.setMaxOplogSize(1); rf.setDiskWriteAttributes(daf.create()); Region region = rf.create("region"); return region; } });
public void setUp() throws Exception { super.setUp(); wan = new WanListener(); cache.createAsyncEventQueueFactory().setBatchSize(1).create("wanqueue", wan); region = cache.<Integer, MyObject>createRegionFactory(RegionShortcut.REPLICATE) .addAsyncEventQueueId("wanqueue").create("test"); }
protected Region createPersistentRegion(Cache cache, String regionName, String diskStoreName, boolean isPR) { RegionFactory factory; if (isPR) { PartitionAttributesFactory paf = new PartitionAttributesFactory(); paf.setRedundantCopies(1); factory = cache.createRegionFactory().setDiskStoreName(diskStoreName) .setDataPolicy(DataPolicy.PERSISTENT_PARTITION) .setPartitionAttributes(paf.create()); } else { factory = cache.createRegionFactory().setDiskStoreName(diskStoreName) .setDataPolicy(DataPolicy.PERSISTENT_REPLICATE); } return factory.create(regionName); }
private Region<?,?> createParReg(String regionName, Cache cache) { RegionFactory regionFactory = cache.createRegionFactory(); regionFactory.setDataPolicy(DataPolicy.PARTITION); return regionFactory.create(regionName); }
/** * Creates a region and assigns a compressor. * * @param name * a region name. * @param compressor * a compressor. */ private Region createRegion(String name, Compressor compressor, boolean offHeap) { return getCache().<String,String>createRegionFactory().setDataPolicy(DataPolicy.REPLICATE).setCompressor(compressor).setEnableOffHeapMemory(offHeap).create(name); } }
private void createRegionsWithSubRegions() { final Cache cache = getCache(); RegionFactory<String, Integer> dataRegionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE); dataRegionFactory.setConcurrencyLevel(3); Region<String, Integer> region1 = dataRegionFactory.create(REGION1); region1.createSubregion(SUBREGION1C, region1.getAttributes()); Region<String, Integer> subregion2 = region1.createSubregion(SUBREGION1A, region1.getAttributes()); subregion2.createSubregion(SUBREGION1B, subregion2.getAttributes()); dataRegionFactory.create(REGION2); dataRegionFactory.create(REGION3); }
.setCacheLoader(new TestCacheLoader()) .create("replicate"); cache.createRegionFactory(RegionShortcut.PARTITION) .setCacheLoader(new TestCacheLoader()) .create("pr"); .setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK)) .setDiskStoreName("store") .setCacheLoader(new TestCacheLoader()) .create("overflow_replicate"); cache.createRegionFactory(RegionShortcut.PARTITION_OVERFLOW) .setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK)) .setDiskStoreName("store") .setCacheLoader(new TestCacheLoader()) .create("overflow_pr");
public void run2() throws CacheException { getCache(); CacheLoader cl = new TestCacheLoader() { public Object load2(LoaderHelper helper) throws CacheLoaderException { assertNotNull(helper); assertEquals(key1, helper.getKey()); assertEquals(rName, helper.getRegion().getName()); assertEquals(arg, helper.getArgument()); return helper.getArgument(); } }; PartitionedRegion pr = (PartitionedRegion) new RegionFactory() .setCacheLoader(cl) .setPartitionAttributes( new PartitionAttributesFactory() .setRedundantCopies(1) .setLocalMaxMemory(localMaxMemory) .create()) .create(rName); assertSame(cl, pr.getDataStore().getCacheLoader()); } };
public Object call() throws Exception { getCache().createRegionFactory(RegionShortcut.REPLICATE).create(testName); return null; } };